ARMAsmParser.cpp revision 4661d4cac3ba7f480a91d0ccd35fb2d22d9692d3
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringExtras.h" 34#include "llvm/ADT/StringSwitch.h" 35#include "llvm/ADT/Twine.h" 36 37using namespace llvm; 38 39namespace { 40 41class ARMOperand; 42 43class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131#define GET_ASSEMBLER_HEADER 132#include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 165 166 // Asm Match Converter Methods 167 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 168 const SmallVectorImpl<MCParsedAsmOperand*> &); 169 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 170 const SmallVectorImpl<MCParsedAsmOperand*> &); 171 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 172 const SmallVectorImpl<MCParsedAsmOperand*> &); 173 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 174 const SmallVectorImpl<MCParsedAsmOperand*> &); 175 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 202 bool validateInstruction(MCInst &Inst, 203 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 204 void processInstruction(MCInst &Inst, 205 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 206 bool shouldOmitCCOutOperand(StringRef Mnemonic, 207 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 208 209public: 210 enum ARMMatchResultTy { 211 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 212 Match_RequiresNotITBlock, 213 Match_RequiresV6, 214 Match_RequiresThumb2 215 }; 216 217 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 218 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 219 MCAsmParserExtension::Initialize(_Parser); 220 221 // Initialize the set of available features. 222 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 223 224 // Not in an ITBlock to start with. 225 ITState.CurPosition = ~0U; 226 } 227 228 // Implementation of the MCTargetAsmParser interface: 229 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 230 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 231 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 232 bool ParseDirective(AsmToken DirectiveID); 233 234 unsigned checkTargetMatchPredicate(MCInst &Inst); 235 236 bool MatchAndEmitInstruction(SMLoc IDLoc, 237 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 238 MCStreamer &Out); 239}; 240} // end anonymous namespace 241 242namespace { 243 244/// ARMOperand - Instances of this class represent a parsed ARM machine 245/// instruction. 246class ARMOperand : public MCParsedAsmOperand { 247 enum KindTy { 248 k_CondCode, 249 k_CCOut, 250 k_ITCondMask, 251 k_CoprocNum, 252 k_CoprocReg, 253 k_CoprocOption, 254 k_Immediate, 255 k_FPImmediate, 256 k_MemBarrierOpt, 257 k_Memory, 258 k_PostIndexRegister, 259 k_MSRMask, 260 k_ProcIFlags, 261 k_VectorIndex, 262 k_Register, 263 k_RegisterList, 264 k_DPRRegisterList, 265 k_SPRRegisterList, 266 k_VectorList, 267 k_ShiftedRegister, 268 k_ShiftedImmediate, 269 k_ShifterImmediate, 270 k_RotateImmediate, 271 k_BitfieldDescriptor, 272 k_Token 273 } Kind; 274 275 SMLoc StartLoc, EndLoc; 276 SmallVector<unsigned, 8> Registers; 277 278 union { 279 struct { 280 ARMCC::CondCodes Val; 281 } CC; 282 283 struct { 284 unsigned Val; 285 } Cop; 286 287 struct { 288 unsigned Val; 289 } CoprocOption; 290 291 struct { 292 unsigned Mask:4; 293 } ITMask; 294 295 struct { 296 ARM_MB::MemBOpt Val; 297 } MBOpt; 298 299 struct { 300 ARM_PROC::IFlags Val; 301 } IFlags; 302 303 struct { 304 unsigned Val; 305 } MMask; 306 307 struct { 308 const char *Data; 309 unsigned Length; 310 } Tok; 311 312 struct { 313 unsigned RegNum; 314 } Reg; 315 316 // A vector register list is a sequential list of 1 to 4 registers. 317 struct { 318 unsigned RegNum; 319 unsigned Count; 320 } VectorList; 321 322 struct { 323 unsigned Val; 324 } VectorIndex; 325 326 struct { 327 const MCExpr *Val; 328 } Imm; 329 330 struct { 331 unsigned Val; // encoded 8-bit representation 332 } FPImm; 333 334 /// Combined record for all forms of ARM address expressions. 335 struct { 336 unsigned BaseRegNum; 337 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 338 // was specified. 339 const MCConstantExpr *OffsetImm; // Offset immediate value 340 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 341 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 342 unsigned ShiftImm; // shift for OffsetReg. 343 unsigned Alignment; // 0 = no alignment specified 344 // n = alignment in bytes (8, 16, or 32) 345 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 346 } Memory; 347 348 struct { 349 unsigned RegNum; 350 bool isAdd; 351 ARM_AM::ShiftOpc ShiftTy; 352 unsigned ShiftImm; 353 } PostIdxReg; 354 355 struct { 356 bool isASR; 357 unsigned Imm; 358 } ShifterImm; 359 struct { 360 ARM_AM::ShiftOpc ShiftTy; 361 unsigned SrcReg; 362 unsigned ShiftReg; 363 unsigned ShiftImm; 364 } RegShiftedReg; 365 struct { 366 ARM_AM::ShiftOpc ShiftTy; 367 unsigned SrcReg; 368 unsigned ShiftImm; 369 } RegShiftedImm; 370 struct { 371 unsigned Imm; 372 } RotImm; 373 struct { 374 unsigned LSB; 375 unsigned Width; 376 } Bitfield; 377 }; 378 379 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 380public: 381 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 382 Kind = o.Kind; 383 StartLoc = o.StartLoc; 384 EndLoc = o.EndLoc; 385 switch (Kind) { 386 case k_CondCode: 387 CC = o.CC; 388 break; 389 case k_ITCondMask: 390 ITMask = o.ITMask; 391 break; 392 case k_Token: 393 Tok = o.Tok; 394 break; 395 case k_CCOut: 396 case k_Register: 397 Reg = o.Reg; 398 break; 399 case k_RegisterList: 400 case k_DPRRegisterList: 401 case k_SPRRegisterList: 402 Registers = o.Registers; 403 break; 404 case k_VectorList: 405 VectorList = o.VectorList; 406 break; 407 case k_CoprocNum: 408 case k_CoprocReg: 409 Cop = o.Cop; 410 break; 411 case k_CoprocOption: 412 CoprocOption = o.CoprocOption; 413 break; 414 case k_Immediate: 415 Imm = o.Imm; 416 break; 417 case k_FPImmediate: 418 FPImm = o.FPImm; 419 break; 420 case k_MemBarrierOpt: 421 MBOpt = o.MBOpt; 422 break; 423 case k_Memory: 424 Memory = o.Memory; 425 break; 426 case k_PostIndexRegister: 427 PostIdxReg = o.PostIdxReg; 428 break; 429 case k_MSRMask: 430 MMask = o.MMask; 431 break; 432 case k_ProcIFlags: 433 IFlags = o.IFlags; 434 break; 435 case k_ShifterImmediate: 436 ShifterImm = o.ShifterImm; 437 break; 438 case k_ShiftedRegister: 439 RegShiftedReg = o.RegShiftedReg; 440 break; 441 case k_ShiftedImmediate: 442 RegShiftedImm = o.RegShiftedImm; 443 break; 444 case k_RotateImmediate: 445 RotImm = o.RotImm; 446 break; 447 case k_BitfieldDescriptor: 448 Bitfield = o.Bitfield; 449 break; 450 case k_VectorIndex: 451 VectorIndex = o.VectorIndex; 452 break; 453 } 454 } 455 456 /// getStartLoc - Get the location of the first token of this operand. 457 SMLoc getStartLoc() const { return StartLoc; } 458 /// getEndLoc - Get the location of the last token of this operand. 459 SMLoc getEndLoc() const { return EndLoc; } 460 461 ARMCC::CondCodes getCondCode() const { 462 assert(Kind == k_CondCode && "Invalid access!"); 463 return CC.Val; 464 } 465 466 unsigned getCoproc() const { 467 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 468 return Cop.Val; 469 } 470 471 StringRef getToken() const { 472 assert(Kind == k_Token && "Invalid access!"); 473 return StringRef(Tok.Data, Tok.Length); 474 } 475 476 unsigned getReg() const { 477 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 478 return Reg.RegNum; 479 } 480 481 const SmallVectorImpl<unsigned> &getRegList() const { 482 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 483 Kind == k_SPRRegisterList) && "Invalid access!"); 484 return Registers; 485 } 486 487 const MCExpr *getImm() const { 488 assert(Kind == k_Immediate && "Invalid access!"); 489 return Imm.Val; 490 } 491 492 unsigned getFPImm() const { 493 assert(Kind == k_FPImmediate && "Invalid access!"); 494 return FPImm.Val; 495 } 496 497 unsigned getVectorIndex() const { 498 assert(Kind == k_VectorIndex && "Invalid access!"); 499 return VectorIndex.Val; 500 } 501 502 ARM_MB::MemBOpt getMemBarrierOpt() const { 503 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 504 return MBOpt.Val; 505 } 506 507 ARM_PROC::IFlags getProcIFlags() const { 508 assert(Kind == k_ProcIFlags && "Invalid access!"); 509 return IFlags.Val; 510 } 511 512 unsigned getMSRMask() const { 513 assert(Kind == k_MSRMask && "Invalid access!"); 514 return MMask.Val; 515 } 516 517 bool isCoprocNum() const { return Kind == k_CoprocNum; } 518 bool isCoprocReg() const { return Kind == k_CoprocReg; } 519 bool isCoprocOption() const { return Kind == k_CoprocOption; } 520 bool isCondCode() const { return Kind == k_CondCode; } 521 bool isCCOut() const { return Kind == k_CCOut; } 522 bool isITMask() const { return Kind == k_ITCondMask; } 523 bool isITCondCode() const { return Kind == k_CondCode; } 524 bool isImm() const { return Kind == k_Immediate; } 525 bool isFPImm() const { return Kind == k_FPImmediate; } 526 bool isImm8s4() const { 527 if (Kind != k_Immediate) 528 return false; 529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 530 if (!CE) return false; 531 int64_t Value = CE->getValue(); 532 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 533 } 534 bool isImm0_1020s4() const { 535 if (Kind != k_Immediate) 536 return false; 537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 538 if (!CE) return false; 539 int64_t Value = CE->getValue(); 540 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 541 } 542 bool isImm0_508s4() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 549 } 550 bool isImm0_255() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return Value >= 0 && Value < 256; 557 } 558 bool isImm0_7() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return Value >= 0 && Value < 8; 565 } 566 bool isImm0_15() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value >= 0 && Value < 16; 573 } 574 bool isImm0_31() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 32; 581 } 582 bool isImm1_16() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return Value > 0 && Value < 17; 589 } 590 bool isImm1_32() const { 591 if (Kind != k_Immediate) 592 return false; 593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 594 if (!CE) return false; 595 int64_t Value = CE->getValue(); 596 return Value > 0 && Value < 33; 597 } 598 bool isImm0_65535() const { 599 if (Kind != k_Immediate) 600 return false; 601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 602 if (!CE) return false; 603 int64_t Value = CE->getValue(); 604 return Value >= 0 && Value < 65536; 605 } 606 bool isImm0_65535Expr() const { 607 if (Kind != k_Immediate) 608 return false; 609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 610 // If it's not a constant expression, it'll generate a fixup and be 611 // handled later. 612 if (!CE) return true; 613 int64_t Value = CE->getValue(); 614 return Value >= 0 && Value < 65536; 615 } 616 bool isImm24bit() const { 617 if (Kind != k_Immediate) 618 return false; 619 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 620 if (!CE) return false; 621 int64_t Value = CE->getValue(); 622 return Value >= 0 && Value <= 0xffffff; 623 } 624 bool isImmThumbSR() const { 625 if (Kind != k_Immediate) 626 return false; 627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 628 if (!CE) return false; 629 int64_t Value = CE->getValue(); 630 return Value > 0 && Value < 33; 631 } 632 bool isPKHLSLImm() const { 633 if (Kind != k_Immediate) 634 return false; 635 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 636 if (!CE) return false; 637 int64_t Value = CE->getValue(); 638 return Value >= 0 && Value < 32; 639 } 640 bool isPKHASRImm() const { 641 if (Kind != k_Immediate) 642 return false; 643 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 644 if (!CE) return false; 645 int64_t Value = CE->getValue(); 646 return Value > 0 && Value <= 32; 647 } 648 bool isARMSOImm() const { 649 if (Kind != k_Immediate) 650 return false; 651 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 652 if (!CE) return false; 653 int64_t Value = CE->getValue(); 654 return ARM_AM::getSOImmVal(Value) != -1; 655 } 656 bool isT2SOImm() const { 657 if (Kind != k_Immediate) 658 return false; 659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 660 if (!CE) return false; 661 int64_t Value = CE->getValue(); 662 return ARM_AM::getT2SOImmVal(Value) != -1; 663 } 664 bool isSetEndImm() const { 665 if (Kind != k_Immediate) 666 return false; 667 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 668 if (!CE) return false; 669 int64_t Value = CE->getValue(); 670 return Value == 1 || Value == 0; 671 } 672 bool isReg() const { return Kind == k_Register; } 673 bool isRegList() const { return Kind == k_RegisterList; } 674 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 675 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 676 bool isToken() const { return Kind == k_Token; } 677 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 678 bool isMemory() const { return Kind == k_Memory; } 679 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 680 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 681 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 682 bool isRotImm() const { return Kind == k_RotateImmediate; } 683 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 684 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 685 bool isPostIdxReg() const { 686 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 687 } 688 bool isMemNoOffset(bool alignOK = false) const { 689 if (!isMemory()) 690 return false; 691 // No offset of any kind. 692 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 693 (alignOK || Memory.Alignment == 0); 694 } 695 bool isAlignedMemory() const { 696 return isMemNoOffset(true); 697 } 698 bool isAddrMode2() const { 699 if (!isMemory() || Memory.Alignment != 0) return false; 700 // Check for register offset. 701 if (Memory.OffsetRegNum) return true; 702 // Immediate offset in range [-4095, 4095]. 703 if (!Memory.OffsetImm) return true; 704 int64_t Val = Memory.OffsetImm->getValue(); 705 return Val > -4096 && Val < 4096; 706 } 707 bool isAM2OffsetImm() const { 708 if (Kind != k_Immediate) 709 return false; 710 // Immediate offset in range [-4095, 4095]. 711 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 712 if (!CE) return false; 713 int64_t Val = CE->getValue(); 714 return Val > -4096 && Val < 4096; 715 } 716 bool isAddrMode3() const { 717 if (!isMemory() || Memory.Alignment != 0) return false; 718 // No shifts are legal for AM3. 719 if (Memory.ShiftType != ARM_AM::no_shift) return false; 720 // Check for register offset. 721 if (Memory.OffsetRegNum) return true; 722 // Immediate offset in range [-255, 255]. 723 if (!Memory.OffsetImm) return true; 724 int64_t Val = Memory.OffsetImm->getValue(); 725 return Val > -256 && Val < 256; 726 } 727 bool isAM3Offset() const { 728 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 729 return false; 730 if (Kind == k_PostIndexRegister) 731 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 732 // Immediate offset in range [-255, 255]. 733 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 734 if (!CE) return false; 735 int64_t Val = CE->getValue(); 736 // Special case, #-0 is INT32_MIN. 737 return (Val > -256 && Val < 256) || Val == INT32_MIN; 738 } 739 bool isAddrMode5() const { 740 if (!isMemory() || Memory.Alignment != 0) return false; 741 // Check for register offset. 742 if (Memory.OffsetRegNum) return false; 743 // Immediate offset in range [-1020, 1020] and a multiple of 4. 744 if (!Memory.OffsetImm) return true; 745 int64_t Val = Memory.OffsetImm->getValue(); 746 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 747 Val == INT32_MIN; 748 } 749 bool isMemTBB() const { 750 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 751 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 752 return false; 753 return true; 754 } 755 bool isMemTBH() const { 756 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 757 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 758 Memory.Alignment != 0 ) 759 return false; 760 return true; 761 } 762 bool isMemRegOffset() const { 763 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 764 return false; 765 return true; 766 } 767 bool isT2MemRegOffset() const { 768 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 769 Memory.Alignment != 0) 770 return false; 771 // Only lsl #{0, 1, 2, 3} allowed. 772 if (Memory.ShiftType == ARM_AM::no_shift) 773 return true; 774 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 775 return false; 776 return true; 777 } 778 bool isMemThumbRR() const { 779 // Thumb reg+reg addressing is simple. Just two registers, a base and 780 // an offset. No shifts, negations or any other complicating factors. 781 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 782 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 783 return false; 784 return isARMLowRegister(Memory.BaseRegNum) && 785 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 786 } 787 bool isMemThumbRIs4() const { 788 if (!isMemory() || Memory.OffsetRegNum != 0 || 789 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 790 return false; 791 // Immediate offset, multiple of 4 in range [0, 124]. 792 if (!Memory.OffsetImm) return true; 793 int64_t Val = Memory.OffsetImm->getValue(); 794 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 795 } 796 bool isMemThumbRIs2() const { 797 if (!isMemory() || Memory.OffsetRegNum != 0 || 798 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 799 return false; 800 // Immediate offset, multiple of 4 in range [0, 62]. 801 if (!Memory.OffsetImm) return true; 802 int64_t Val = Memory.OffsetImm->getValue(); 803 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 804 } 805 bool isMemThumbRIs1() const { 806 if (!isMemory() || Memory.OffsetRegNum != 0 || 807 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 808 return false; 809 // Immediate offset in range [0, 31]. 810 if (!Memory.OffsetImm) return true; 811 int64_t Val = Memory.OffsetImm->getValue(); 812 return Val >= 0 && Val <= 31; 813 } 814 bool isMemThumbSPI() const { 815 if (!isMemory() || Memory.OffsetRegNum != 0 || 816 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 817 return false; 818 // Immediate offset, multiple of 4 in range [0, 1020]. 819 if (!Memory.OffsetImm) return true; 820 int64_t Val = Memory.OffsetImm->getValue(); 821 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 822 } 823 bool isMemImm8s4Offset() const { 824 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 825 return false; 826 // Immediate offset a multiple of 4 in range [-1020, 1020]. 827 if (!Memory.OffsetImm) return true; 828 int64_t Val = Memory.OffsetImm->getValue(); 829 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 830 } 831 bool isMemImm0_1020s4Offset() const { 832 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 833 return false; 834 // Immediate offset a multiple of 4 in range [0, 1020]. 835 if (!Memory.OffsetImm) return true; 836 int64_t Val = Memory.OffsetImm->getValue(); 837 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 838 } 839 bool isMemImm8Offset() const { 840 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 841 return false; 842 // Immediate offset in range [-255, 255]. 843 if (!Memory.OffsetImm) return true; 844 int64_t Val = Memory.OffsetImm->getValue(); 845 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 846 } 847 bool isMemPosImm8Offset() const { 848 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 849 return false; 850 // Immediate offset in range [0, 255]. 851 if (!Memory.OffsetImm) return true; 852 int64_t Val = Memory.OffsetImm->getValue(); 853 return Val >= 0 && Val < 256; 854 } 855 bool isMemNegImm8Offset() const { 856 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 857 return false; 858 // Immediate offset in range [-255, -1]. 859 if (!Memory.OffsetImm) return true; 860 int64_t Val = Memory.OffsetImm->getValue(); 861 return Val > -256 && Val < 0; 862 } 863 bool isMemUImm12Offset() const { 864 // If we have an immediate that's not a constant, treat it as a label 865 // reference needing a fixup. If it is a constant, it's something else 866 // and we reject it. 867 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 868 return true; 869 870 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 871 return false; 872 // Immediate offset in range [0, 4095]. 873 if (!Memory.OffsetImm) return true; 874 int64_t Val = Memory.OffsetImm->getValue(); 875 return (Val >= 0 && Val < 4096); 876 } 877 bool isMemImm12Offset() const { 878 // If we have an immediate that's not a constant, treat it as a label 879 // reference needing a fixup. If it is a constant, it's something else 880 // and we reject it. 881 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 882 return true; 883 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [-4095, 4095]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 890 } 891 bool isPostIdxImm8() const { 892 if (Kind != k_Immediate) 893 return false; 894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 895 if (!CE) return false; 896 int64_t Val = CE->getValue(); 897 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 898 } 899 bool isPostIdxImm8s4() const { 900 if (Kind != k_Immediate) 901 return false; 902 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 903 if (!CE) return false; 904 int64_t Val = CE->getValue(); 905 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 906 (Val == INT32_MIN); 907 } 908 909 bool isMSRMask() const { return Kind == k_MSRMask; } 910 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 911 912 // NEON operands. 913 bool isVecListOneD() const { 914 if (Kind != k_VectorList) return false; 915 return VectorList.Count == 1; 916 } 917 918 bool isVecListTwoD() const { 919 if (Kind != k_VectorList) return false; 920 return VectorList.Count == 2; 921 } 922 923 bool isVecListThreeD() const { 924 if (Kind != k_VectorList) return false; 925 return VectorList.Count == 3; 926 } 927 928 bool isVecListFourD() const { 929 if (Kind != k_VectorList) return false; 930 return VectorList.Count == 4; 931 } 932 933 bool isVecListTwoQ() const { 934 if (Kind != k_VectorList) return false; 935 //FIXME: We haven't taught the parser to handle by-two register lists 936 // yet, so don't pretend to know one. 937 return VectorList.Count == 2 && false; 938 } 939 940 bool isVectorIndex8() const { 941 if (Kind != k_VectorIndex) return false; 942 return VectorIndex.Val < 8; 943 } 944 bool isVectorIndex16() const { 945 if (Kind != k_VectorIndex) return false; 946 return VectorIndex.Val < 4; 947 } 948 bool isVectorIndex32() const { 949 if (Kind != k_VectorIndex) return false; 950 return VectorIndex.Val < 2; 951 } 952 953 bool isNEONi8splat() const { 954 if (Kind != k_Immediate) 955 return false; 956 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 957 // Must be a constant. 958 if (!CE) return false; 959 int64_t Value = CE->getValue(); 960 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 961 // value. 962 return Value >= 0 && Value < 256; 963 } 964 965 bool isNEONi16splat() const { 966 if (Kind != k_Immediate) 967 return false; 968 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 969 // Must be a constant. 970 if (!CE) return false; 971 int64_t Value = CE->getValue(); 972 // i16 value in the range [0,255] or [0x0100, 0xff00] 973 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 974 } 975 976 bool isNEONi32splat() const { 977 if (Kind != k_Immediate) 978 return false; 979 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 980 // Must be a constant. 981 if (!CE) return false; 982 int64_t Value = CE->getValue(); 983 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 984 return (Value >= 0 && Value < 256) || 985 (Value >= 0x0100 && Value <= 0xff00) || 986 (Value >= 0x010000 && Value <= 0xff0000) || 987 (Value >= 0x01000000 && Value <= 0xff000000); 988 } 989 990 bool isNEONi32vmov() const { 991 if (Kind != k_Immediate) 992 return false; 993 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 994 // Must be a constant. 995 if (!CE) return false; 996 int64_t Value = CE->getValue(); 997 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 998 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 999 return (Value >= 0 && Value < 256) || 1000 (Value >= 0x0100 && Value <= 0xff00) || 1001 (Value >= 0x010000 && Value <= 0xff0000) || 1002 (Value >= 0x01000000 && Value <= 0xff000000) || 1003 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1004 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1005 } 1006 1007 bool isNEONi64splat() const { 1008 if (Kind != k_Immediate) 1009 return false; 1010 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1011 // Must be a constant. 1012 if (!CE) return false; 1013 uint64_t Value = CE->getValue(); 1014 // i64 value with each byte being either 0 or 0xff. 1015 for (unsigned i = 0; i < 8; ++i) 1016 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1017 return true; 1018 } 1019 1020 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1021 // Add as immediates when possible. Null MCExpr = 0. 1022 if (Expr == 0) 1023 Inst.addOperand(MCOperand::CreateImm(0)); 1024 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1025 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1026 else 1027 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1028 } 1029 1030 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1031 assert(N == 2 && "Invalid number of operands!"); 1032 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1033 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1034 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1035 } 1036 1037 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1038 assert(N == 1 && "Invalid number of operands!"); 1039 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1040 } 1041 1042 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1043 assert(N == 1 && "Invalid number of operands!"); 1044 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1045 } 1046 1047 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1048 assert(N == 1 && "Invalid number of operands!"); 1049 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1050 } 1051 1052 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1053 assert(N == 1 && "Invalid number of operands!"); 1054 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1055 } 1056 1057 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1058 assert(N == 1 && "Invalid number of operands!"); 1059 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1060 } 1061 1062 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1063 assert(N == 1 && "Invalid number of operands!"); 1064 Inst.addOperand(MCOperand::CreateReg(getReg())); 1065 } 1066 1067 void addRegOperands(MCInst &Inst, unsigned N) const { 1068 assert(N == 1 && "Invalid number of operands!"); 1069 Inst.addOperand(MCOperand::CreateReg(getReg())); 1070 } 1071 1072 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1073 assert(N == 3 && "Invalid number of operands!"); 1074 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1075 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1076 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1077 Inst.addOperand(MCOperand::CreateImm( 1078 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1079 } 1080 1081 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1082 assert(N == 2 && "Invalid number of operands!"); 1083 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1084 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1085 Inst.addOperand(MCOperand::CreateImm( 1086 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1087 } 1088 1089 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1090 assert(N == 1 && "Invalid number of operands!"); 1091 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1092 ShifterImm.Imm)); 1093 } 1094 1095 void addRegListOperands(MCInst &Inst, unsigned N) const { 1096 assert(N == 1 && "Invalid number of operands!"); 1097 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1098 for (SmallVectorImpl<unsigned>::const_iterator 1099 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1100 Inst.addOperand(MCOperand::CreateReg(*I)); 1101 } 1102 1103 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1104 addRegListOperands(Inst, N); 1105 } 1106 1107 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1108 addRegListOperands(Inst, N); 1109 } 1110 1111 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1112 assert(N == 1 && "Invalid number of operands!"); 1113 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1114 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1115 } 1116 1117 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1118 assert(N == 1 && "Invalid number of operands!"); 1119 // Munge the lsb/width into a bitfield mask. 1120 unsigned lsb = Bitfield.LSB; 1121 unsigned width = Bitfield.Width; 1122 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1123 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1124 (32 - (lsb + width))); 1125 Inst.addOperand(MCOperand::CreateImm(Mask)); 1126 } 1127 1128 void addImmOperands(MCInst &Inst, unsigned N) const { 1129 assert(N == 1 && "Invalid number of operands!"); 1130 addExpr(Inst, getImm()); 1131 } 1132 1133 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1134 assert(N == 1 && "Invalid number of operands!"); 1135 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1136 } 1137 1138 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1139 assert(N == 1 && "Invalid number of operands!"); 1140 // FIXME: We really want to scale the value here, but the LDRD/STRD 1141 // instruction don't encode operands that way yet. 1142 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1143 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1144 } 1145 1146 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1147 assert(N == 1 && "Invalid number of operands!"); 1148 // The immediate is scaled by four in the encoding and is stored 1149 // in the MCInst as such. Lop off the low two bits here. 1150 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1151 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1152 } 1153 1154 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1155 assert(N == 1 && "Invalid number of operands!"); 1156 // The immediate is scaled by four in the encoding and is stored 1157 // in the MCInst as such. Lop off the low two bits here. 1158 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1159 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1160 } 1161 1162 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1163 assert(N == 1 && "Invalid number of operands!"); 1164 addExpr(Inst, getImm()); 1165 } 1166 1167 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1168 assert(N == 1 && "Invalid number of operands!"); 1169 addExpr(Inst, getImm()); 1170 } 1171 1172 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1173 assert(N == 1 && "Invalid number of operands!"); 1174 addExpr(Inst, getImm()); 1175 } 1176 1177 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1178 assert(N == 1 && "Invalid number of operands!"); 1179 addExpr(Inst, getImm()); 1180 } 1181 1182 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1183 assert(N == 1 && "Invalid number of operands!"); 1184 // The constant encodes as the immediate-1, and we store in the instruction 1185 // the bits as encoded, so subtract off one here. 1186 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1187 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1188 } 1189 1190 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1191 assert(N == 1 && "Invalid number of operands!"); 1192 // The constant encodes as the immediate-1, and we store in the instruction 1193 // the bits as encoded, so subtract off one here. 1194 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1195 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1196 } 1197 1198 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1199 assert(N == 1 && "Invalid number of operands!"); 1200 addExpr(Inst, getImm()); 1201 } 1202 1203 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1204 assert(N == 1 && "Invalid number of operands!"); 1205 addExpr(Inst, getImm()); 1206 } 1207 1208 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1209 assert(N == 1 && "Invalid number of operands!"); 1210 addExpr(Inst, getImm()); 1211 } 1212 1213 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1214 assert(N == 1 && "Invalid number of operands!"); 1215 // The constant encodes as the immediate, except for 32, which encodes as 1216 // zero. 1217 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1218 unsigned Imm = CE->getValue(); 1219 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1220 } 1221 1222 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1223 assert(N == 1 && "Invalid number of operands!"); 1224 addExpr(Inst, getImm()); 1225 } 1226 1227 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1228 assert(N == 1 && "Invalid number of operands!"); 1229 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1230 // the instruction as well. 1231 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1232 int Val = CE->getValue(); 1233 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1234 } 1235 1236 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1237 assert(N == 1 && "Invalid number of operands!"); 1238 addExpr(Inst, getImm()); 1239 } 1240 1241 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1242 assert(N == 1 && "Invalid number of operands!"); 1243 addExpr(Inst, getImm()); 1244 } 1245 1246 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1247 assert(N == 1 && "Invalid number of operands!"); 1248 addExpr(Inst, getImm()); 1249 } 1250 1251 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1252 assert(N == 1 && "Invalid number of operands!"); 1253 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1254 } 1255 1256 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1257 assert(N == 1 && "Invalid number of operands!"); 1258 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1259 } 1260 1261 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1262 assert(N == 2 && "Invalid number of operands!"); 1263 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1264 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1265 } 1266 1267 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1268 assert(N == 3 && "Invalid number of operands!"); 1269 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1270 if (!Memory.OffsetRegNum) { 1271 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1272 // Special case for #-0 1273 if (Val == INT32_MIN) Val = 0; 1274 if (Val < 0) Val = -Val; 1275 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1276 } else { 1277 // For register offset, we encode the shift type and negation flag 1278 // here. 1279 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1280 Memory.ShiftImm, Memory.ShiftType); 1281 } 1282 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1283 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1284 Inst.addOperand(MCOperand::CreateImm(Val)); 1285 } 1286 1287 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1288 assert(N == 2 && "Invalid number of operands!"); 1289 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1290 assert(CE && "non-constant AM2OffsetImm operand!"); 1291 int32_t Val = CE->getValue(); 1292 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1293 // Special case for #-0 1294 if (Val == INT32_MIN) Val = 0; 1295 if (Val < 0) Val = -Val; 1296 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1297 Inst.addOperand(MCOperand::CreateReg(0)); 1298 Inst.addOperand(MCOperand::CreateImm(Val)); 1299 } 1300 1301 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1302 assert(N == 3 && "Invalid number of operands!"); 1303 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1304 if (!Memory.OffsetRegNum) { 1305 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1306 // Special case for #-0 1307 if (Val == INT32_MIN) Val = 0; 1308 if (Val < 0) Val = -Val; 1309 Val = ARM_AM::getAM3Opc(AddSub, Val); 1310 } else { 1311 // For register offset, we encode the shift type and negation flag 1312 // here. 1313 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1314 } 1315 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1316 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1317 Inst.addOperand(MCOperand::CreateImm(Val)); 1318 } 1319 1320 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1321 assert(N == 2 && "Invalid number of operands!"); 1322 if (Kind == k_PostIndexRegister) { 1323 int32_t Val = 1324 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1325 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1326 Inst.addOperand(MCOperand::CreateImm(Val)); 1327 return; 1328 } 1329 1330 // Constant offset. 1331 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1332 int32_t Val = CE->getValue(); 1333 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1334 // Special case for #-0 1335 if (Val == INT32_MIN) Val = 0; 1336 if (Val < 0) Val = -Val; 1337 Val = ARM_AM::getAM3Opc(AddSub, Val); 1338 Inst.addOperand(MCOperand::CreateReg(0)); 1339 Inst.addOperand(MCOperand::CreateImm(Val)); 1340 } 1341 1342 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1343 assert(N == 2 && "Invalid number of operands!"); 1344 // The lower two bits are always zero and as such are not encoded. 1345 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1346 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1347 // Special case for #-0 1348 if (Val == INT32_MIN) Val = 0; 1349 if (Val < 0) Val = -Val; 1350 Val = ARM_AM::getAM5Opc(AddSub, Val); 1351 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1352 Inst.addOperand(MCOperand::CreateImm(Val)); 1353 } 1354 1355 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1356 assert(N == 2 && "Invalid number of operands!"); 1357 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1358 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1359 Inst.addOperand(MCOperand::CreateImm(Val)); 1360 } 1361 1362 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1363 assert(N == 2 && "Invalid number of operands!"); 1364 // The lower two bits are always zero and as such are not encoded. 1365 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1366 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1367 Inst.addOperand(MCOperand::CreateImm(Val)); 1368 } 1369 1370 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1371 assert(N == 2 && "Invalid number of operands!"); 1372 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1373 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1374 Inst.addOperand(MCOperand::CreateImm(Val)); 1375 } 1376 1377 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1378 addMemImm8OffsetOperands(Inst, N); 1379 } 1380 1381 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1382 addMemImm8OffsetOperands(Inst, N); 1383 } 1384 1385 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1386 assert(N == 2 && "Invalid number of operands!"); 1387 // If this is an immediate, it's a label reference. 1388 if (Kind == k_Immediate) { 1389 addExpr(Inst, getImm()); 1390 Inst.addOperand(MCOperand::CreateImm(0)); 1391 return; 1392 } 1393 1394 // Otherwise, it's a normal memory reg+offset. 1395 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1396 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1397 Inst.addOperand(MCOperand::CreateImm(Val)); 1398 } 1399 1400 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1401 assert(N == 2 && "Invalid number of operands!"); 1402 // If this is an immediate, it's a label reference. 1403 if (Kind == k_Immediate) { 1404 addExpr(Inst, getImm()); 1405 Inst.addOperand(MCOperand::CreateImm(0)); 1406 return; 1407 } 1408 1409 // Otherwise, it's a normal memory reg+offset. 1410 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1411 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1412 Inst.addOperand(MCOperand::CreateImm(Val)); 1413 } 1414 1415 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1416 assert(N == 2 && "Invalid number of operands!"); 1417 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1418 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1419 } 1420 1421 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1422 assert(N == 2 && "Invalid number of operands!"); 1423 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1424 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1425 } 1426 1427 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1428 assert(N == 3 && "Invalid number of operands!"); 1429 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1430 Memory.ShiftImm, Memory.ShiftType); 1431 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1432 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1433 Inst.addOperand(MCOperand::CreateImm(Val)); 1434 } 1435 1436 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1437 assert(N == 3 && "Invalid number of operands!"); 1438 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1439 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1440 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1441 } 1442 1443 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1444 assert(N == 2 && "Invalid number of operands!"); 1445 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1446 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1447 } 1448 1449 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1450 assert(N == 2 && "Invalid number of operands!"); 1451 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1452 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1453 Inst.addOperand(MCOperand::CreateImm(Val)); 1454 } 1455 1456 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1457 assert(N == 2 && "Invalid number of operands!"); 1458 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1459 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1460 Inst.addOperand(MCOperand::CreateImm(Val)); 1461 } 1462 1463 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1464 assert(N == 2 && "Invalid number of operands!"); 1465 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1466 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1467 Inst.addOperand(MCOperand::CreateImm(Val)); 1468 } 1469 1470 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1471 assert(N == 2 && "Invalid number of operands!"); 1472 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1473 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1474 Inst.addOperand(MCOperand::CreateImm(Val)); 1475 } 1476 1477 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1478 assert(N == 1 && "Invalid number of operands!"); 1479 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1480 assert(CE && "non-constant post-idx-imm8 operand!"); 1481 int Imm = CE->getValue(); 1482 bool isAdd = Imm >= 0; 1483 if (Imm == INT32_MIN) Imm = 0; 1484 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1485 Inst.addOperand(MCOperand::CreateImm(Imm)); 1486 } 1487 1488 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1489 assert(N == 1 && "Invalid number of operands!"); 1490 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1491 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1492 int Imm = CE->getValue(); 1493 bool isAdd = Imm >= 0; 1494 if (Imm == INT32_MIN) Imm = 0; 1495 // Immediate is scaled by 4. 1496 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1497 Inst.addOperand(MCOperand::CreateImm(Imm)); 1498 } 1499 1500 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1501 assert(N == 2 && "Invalid number of operands!"); 1502 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1503 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1504 } 1505 1506 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1507 assert(N == 2 && "Invalid number of operands!"); 1508 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1509 // The sign, shift type, and shift amount are encoded in a single operand 1510 // using the AM2 encoding helpers. 1511 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1512 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1513 PostIdxReg.ShiftTy); 1514 Inst.addOperand(MCOperand::CreateImm(Imm)); 1515 } 1516 1517 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1518 assert(N == 1 && "Invalid number of operands!"); 1519 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1520 } 1521 1522 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1523 assert(N == 1 && "Invalid number of operands!"); 1524 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1525 } 1526 1527 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1528 assert(N == 1 && "Invalid number of operands!"); 1529 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1530 } 1531 1532 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1533 assert(N == 1 && "Invalid number of operands!"); 1534 // Only the first register actually goes on the instruction. The rest 1535 // are implied by the opcode. 1536 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1537 } 1538 1539 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1540 assert(N == 1 && "Invalid number of operands!"); 1541 // Only the first register actually goes on the instruction. The rest 1542 // are implied by the opcode. 1543 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1544 } 1545 1546 void addVecListFourDOperands(MCInst &Inst, unsigned N) const { 1547 assert(N == 1 && "Invalid number of operands!"); 1548 // Only the first register actually goes on the instruction. The rest 1549 // are implied by the opcode. 1550 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1551 } 1552 1553 void addVecListTwoQOperands(MCInst &Inst, unsigned N) const { 1554 assert(N == 1 && "Invalid number of operands!"); 1555 // Only the first register actually goes on the instruction. The rest 1556 // are implied by the opcode. 1557 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1558 } 1559 1560 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1561 assert(N == 1 && "Invalid number of operands!"); 1562 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1563 } 1564 1565 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1566 assert(N == 1 && "Invalid number of operands!"); 1567 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1568 } 1569 1570 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1571 assert(N == 1 && "Invalid number of operands!"); 1572 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1573 } 1574 1575 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1576 assert(N == 1 && "Invalid number of operands!"); 1577 // The immediate encodes the type of constant as well as the value. 1578 // Mask in that this is an i8 splat. 1579 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1580 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1581 } 1582 1583 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1584 assert(N == 1 && "Invalid number of operands!"); 1585 // The immediate encodes the type of constant as well as the value. 1586 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1587 unsigned Value = CE->getValue(); 1588 if (Value >= 256) 1589 Value = (Value >> 8) | 0xa00; 1590 else 1591 Value |= 0x800; 1592 Inst.addOperand(MCOperand::CreateImm(Value)); 1593 } 1594 1595 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1596 assert(N == 1 && "Invalid number of operands!"); 1597 // The immediate encodes the type of constant as well as the value. 1598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1599 unsigned Value = CE->getValue(); 1600 if (Value >= 256 && Value <= 0xff00) 1601 Value = (Value >> 8) | 0x200; 1602 else if (Value > 0xffff && Value <= 0xff0000) 1603 Value = (Value >> 16) | 0x400; 1604 else if (Value > 0xffffff) 1605 Value = (Value >> 24) | 0x600; 1606 Inst.addOperand(MCOperand::CreateImm(Value)); 1607 } 1608 1609 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1610 assert(N == 1 && "Invalid number of operands!"); 1611 // The immediate encodes the type of constant as well as the value. 1612 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1613 unsigned Value = CE->getValue(); 1614 if (Value >= 256 && Value <= 0xffff) 1615 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1616 else if (Value > 0xffff && Value <= 0xffffff) 1617 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1618 else if (Value > 0xffffff) 1619 Value = (Value >> 24) | 0x600; 1620 Inst.addOperand(MCOperand::CreateImm(Value)); 1621 } 1622 1623 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1624 assert(N == 1 && "Invalid number of operands!"); 1625 // The immediate encodes the type of constant as well as the value. 1626 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1627 uint64_t Value = CE->getValue(); 1628 unsigned Imm = 0; 1629 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1630 Imm |= (Value & 1) << i; 1631 } 1632 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1633 } 1634 1635 virtual void print(raw_ostream &OS) const; 1636 1637 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1638 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1639 Op->ITMask.Mask = Mask; 1640 Op->StartLoc = S; 1641 Op->EndLoc = S; 1642 return Op; 1643 } 1644 1645 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1646 ARMOperand *Op = new ARMOperand(k_CondCode); 1647 Op->CC.Val = CC; 1648 Op->StartLoc = S; 1649 Op->EndLoc = S; 1650 return Op; 1651 } 1652 1653 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1654 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1655 Op->Cop.Val = CopVal; 1656 Op->StartLoc = S; 1657 Op->EndLoc = S; 1658 return Op; 1659 } 1660 1661 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1662 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1663 Op->Cop.Val = CopVal; 1664 Op->StartLoc = S; 1665 Op->EndLoc = S; 1666 return Op; 1667 } 1668 1669 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1670 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1671 Op->Cop.Val = Val; 1672 Op->StartLoc = S; 1673 Op->EndLoc = E; 1674 return Op; 1675 } 1676 1677 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1678 ARMOperand *Op = new ARMOperand(k_CCOut); 1679 Op->Reg.RegNum = RegNum; 1680 Op->StartLoc = S; 1681 Op->EndLoc = S; 1682 return Op; 1683 } 1684 1685 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1686 ARMOperand *Op = new ARMOperand(k_Token); 1687 Op->Tok.Data = Str.data(); 1688 Op->Tok.Length = Str.size(); 1689 Op->StartLoc = S; 1690 Op->EndLoc = S; 1691 return Op; 1692 } 1693 1694 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1695 ARMOperand *Op = new ARMOperand(k_Register); 1696 Op->Reg.RegNum = RegNum; 1697 Op->StartLoc = S; 1698 Op->EndLoc = E; 1699 return Op; 1700 } 1701 1702 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1703 unsigned SrcReg, 1704 unsigned ShiftReg, 1705 unsigned ShiftImm, 1706 SMLoc S, SMLoc E) { 1707 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1708 Op->RegShiftedReg.ShiftTy = ShTy; 1709 Op->RegShiftedReg.SrcReg = SrcReg; 1710 Op->RegShiftedReg.ShiftReg = ShiftReg; 1711 Op->RegShiftedReg.ShiftImm = ShiftImm; 1712 Op->StartLoc = S; 1713 Op->EndLoc = E; 1714 return Op; 1715 } 1716 1717 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1718 unsigned SrcReg, 1719 unsigned ShiftImm, 1720 SMLoc S, SMLoc E) { 1721 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1722 Op->RegShiftedImm.ShiftTy = ShTy; 1723 Op->RegShiftedImm.SrcReg = SrcReg; 1724 Op->RegShiftedImm.ShiftImm = ShiftImm; 1725 Op->StartLoc = S; 1726 Op->EndLoc = E; 1727 return Op; 1728 } 1729 1730 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1731 SMLoc S, SMLoc E) { 1732 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1733 Op->ShifterImm.isASR = isASR; 1734 Op->ShifterImm.Imm = Imm; 1735 Op->StartLoc = S; 1736 Op->EndLoc = E; 1737 return Op; 1738 } 1739 1740 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1741 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1742 Op->RotImm.Imm = Imm; 1743 Op->StartLoc = S; 1744 Op->EndLoc = E; 1745 return Op; 1746 } 1747 1748 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1749 SMLoc S, SMLoc E) { 1750 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1751 Op->Bitfield.LSB = LSB; 1752 Op->Bitfield.Width = Width; 1753 Op->StartLoc = S; 1754 Op->EndLoc = E; 1755 return Op; 1756 } 1757 1758 static ARMOperand * 1759 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1760 SMLoc StartLoc, SMLoc EndLoc) { 1761 KindTy Kind = k_RegisterList; 1762 1763 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1764 Kind = k_DPRRegisterList; 1765 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1766 contains(Regs.front().first)) 1767 Kind = k_SPRRegisterList; 1768 1769 ARMOperand *Op = new ARMOperand(Kind); 1770 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1771 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1772 Op->Registers.push_back(I->first); 1773 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1774 Op->StartLoc = StartLoc; 1775 Op->EndLoc = EndLoc; 1776 return Op; 1777 } 1778 1779 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1780 SMLoc S, SMLoc E) { 1781 ARMOperand *Op = new ARMOperand(k_VectorList); 1782 Op->VectorList.RegNum = RegNum; 1783 Op->VectorList.Count = Count; 1784 Op->StartLoc = S; 1785 Op->EndLoc = E; 1786 return Op; 1787 } 1788 1789 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1790 MCContext &Ctx) { 1791 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1792 Op->VectorIndex.Val = Idx; 1793 Op->StartLoc = S; 1794 Op->EndLoc = E; 1795 return Op; 1796 } 1797 1798 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1799 ARMOperand *Op = new ARMOperand(k_Immediate); 1800 Op->Imm.Val = Val; 1801 Op->StartLoc = S; 1802 Op->EndLoc = E; 1803 return Op; 1804 } 1805 1806 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1807 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1808 Op->FPImm.Val = Val; 1809 Op->StartLoc = S; 1810 Op->EndLoc = S; 1811 return Op; 1812 } 1813 1814 static ARMOperand *CreateMem(unsigned BaseRegNum, 1815 const MCConstantExpr *OffsetImm, 1816 unsigned OffsetRegNum, 1817 ARM_AM::ShiftOpc ShiftType, 1818 unsigned ShiftImm, 1819 unsigned Alignment, 1820 bool isNegative, 1821 SMLoc S, SMLoc E) { 1822 ARMOperand *Op = new ARMOperand(k_Memory); 1823 Op->Memory.BaseRegNum = BaseRegNum; 1824 Op->Memory.OffsetImm = OffsetImm; 1825 Op->Memory.OffsetRegNum = OffsetRegNum; 1826 Op->Memory.ShiftType = ShiftType; 1827 Op->Memory.ShiftImm = ShiftImm; 1828 Op->Memory.Alignment = Alignment; 1829 Op->Memory.isNegative = isNegative; 1830 Op->StartLoc = S; 1831 Op->EndLoc = E; 1832 return Op; 1833 } 1834 1835 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1836 ARM_AM::ShiftOpc ShiftTy, 1837 unsigned ShiftImm, 1838 SMLoc S, SMLoc E) { 1839 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1840 Op->PostIdxReg.RegNum = RegNum; 1841 Op->PostIdxReg.isAdd = isAdd; 1842 Op->PostIdxReg.ShiftTy = ShiftTy; 1843 Op->PostIdxReg.ShiftImm = ShiftImm; 1844 Op->StartLoc = S; 1845 Op->EndLoc = E; 1846 return Op; 1847 } 1848 1849 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1850 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1851 Op->MBOpt.Val = Opt; 1852 Op->StartLoc = S; 1853 Op->EndLoc = S; 1854 return Op; 1855 } 1856 1857 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1858 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1859 Op->IFlags.Val = IFlags; 1860 Op->StartLoc = S; 1861 Op->EndLoc = S; 1862 return Op; 1863 } 1864 1865 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1866 ARMOperand *Op = new ARMOperand(k_MSRMask); 1867 Op->MMask.Val = MMask; 1868 Op->StartLoc = S; 1869 Op->EndLoc = S; 1870 return Op; 1871 } 1872}; 1873 1874} // end anonymous namespace. 1875 1876void ARMOperand::print(raw_ostream &OS) const { 1877 switch (Kind) { 1878 case k_FPImmediate: 1879 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1880 << ") >"; 1881 break; 1882 case k_CondCode: 1883 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1884 break; 1885 case k_CCOut: 1886 OS << "<ccout " << getReg() << ">"; 1887 break; 1888 case k_ITCondMask: { 1889 static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)", 1890 "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)", 1891 "(tee)", "(eee)" }; 1892 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1893 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1894 break; 1895 } 1896 case k_CoprocNum: 1897 OS << "<coprocessor number: " << getCoproc() << ">"; 1898 break; 1899 case k_CoprocReg: 1900 OS << "<coprocessor register: " << getCoproc() << ">"; 1901 break; 1902 case k_CoprocOption: 1903 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1904 break; 1905 case k_MSRMask: 1906 OS << "<mask: " << getMSRMask() << ">"; 1907 break; 1908 case k_Immediate: 1909 getImm()->print(OS); 1910 break; 1911 case k_MemBarrierOpt: 1912 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1913 break; 1914 case k_Memory: 1915 OS << "<memory " 1916 << " base:" << Memory.BaseRegNum; 1917 OS << ">"; 1918 break; 1919 case k_PostIndexRegister: 1920 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1921 << PostIdxReg.RegNum; 1922 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1923 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1924 << PostIdxReg.ShiftImm; 1925 OS << ">"; 1926 break; 1927 case k_ProcIFlags: { 1928 OS << "<ARM_PROC::"; 1929 unsigned IFlags = getProcIFlags(); 1930 for (int i=2; i >= 0; --i) 1931 if (IFlags & (1 << i)) 1932 OS << ARM_PROC::IFlagsToString(1 << i); 1933 OS << ">"; 1934 break; 1935 } 1936 case k_Register: 1937 OS << "<register " << getReg() << ">"; 1938 break; 1939 case k_ShifterImmediate: 1940 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1941 << " #" << ShifterImm.Imm << ">"; 1942 break; 1943 case k_ShiftedRegister: 1944 OS << "<so_reg_reg " 1945 << RegShiftedReg.SrcReg 1946 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1947 << ", " << RegShiftedReg.ShiftReg << ", " 1948 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1949 << ">"; 1950 break; 1951 case k_ShiftedImmediate: 1952 OS << "<so_reg_imm " 1953 << RegShiftedImm.SrcReg 1954 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1955 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1956 << ">"; 1957 break; 1958 case k_RotateImmediate: 1959 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1960 break; 1961 case k_BitfieldDescriptor: 1962 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1963 << ", width: " << Bitfield.Width << ">"; 1964 break; 1965 case k_RegisterList: 1966 case k_DPRRegisterList: 1967 case k_SPRRegisterList: { 1968 OS << "<register_list "; 1969 1970 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1971 for (SmallVectorImpl<unsigned>::const_iterator 1972 I = RegList.begin(), E = RegList.end(); I != E; ) { 1973 OS << *I; 1974 if (++I < E) OS << ", "; 1975 } 1976 1977 OS << ">"; 1978 break; 1979 } 1980 case k_VectorList: 1981 OS << "<vector_list " << VectorList.Count << " * " 1982 << VectorList.RegNum << ">"; 1983 break; 1984 case k_Token: 1985 OS << "'" << getToken() << "'"; 1986 break; 1987 case k_VectorIndex: 1988 OS << "<vectorindex " << getVectorIndex() << ">"; 1989 break; 1990 } 1991} 1992 1993/// @name Auto-generated Match Functions 1994/// { 1995 1996static unsigned MatchRegisterName(StringRef Name); 1997 1998/// } 1999 2000bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2001 SMLoc &StartLoc, SMLoc &EndLoc) { 2002 RegNo = tryParseRegister(); 2003 2004 return (RegNo == (unsigned)-1); 2005} 2006 2007/// Try to parse a register name. The token must be an Identifier when called, 2008/// and if it is a register name the token is eaten and the register number is 2009/// returned. Otherwise return -1. 2010/// 2011int ARMAsmParser::tryParseRegister() { 2012 const AsmToken &Tok = Parser.getTok(); 2013 if (Tok.isNot(AsmToken::Identifier)) return -1; 2014 2015 // FIXME: Validate register for the current architecture; we have to do 2016 // validation later, so maybe there is no need for this here. 2017 std::string upperCase = Tok.getString().str(); 2018 std::string lowerCase = LowercaseString(upperCase); 2019 unsigned RegNum = MatchRegisterName(lowerCase); 2020 if (!RegNum) { 2021 RegNum = StringSwitch<unsigned>(lowerCase) 2022 .Case("r13", ARM::SP) 2023 .Case("r14", ARM::LR) 2024 .Case("r15", ARM::PC) 2025 .Case("ip", ARM::R12) 2026 .Default(0); 2027 } 2028 if (!RegNum) return -1; 2029 2030 Parser.Lex(); // Eat identifier token. 2031 2032 return RegNum; 2033} 2034 2035// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2036// If a recoverable error occurs, return 1. If an irrecoverable error 2037// occurs, return -1. An irrecoverable error is one where tokens have been 2038// consumed in the process of trying to parse the shifter (i.e., when it is 2039// indeed a shifter operand, but malformed). 2040int ARMAsmParser::tryParseShiftRegister( 2041 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2042 SMLoc S = Parser.getTok().getLoc(); 2043 const AsmToken &Tok = Parser.getTok(); 2044 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2045 2046 std::string upperCase = Tok.getString().str(); 2047 std::string lowerCase = LowercaseString(upperCase); 2048 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2049 .Case("lsl", ARM_AM::lsl) 2050 .Case("lsr", ARM_AM::lsr) 2051 .Case("asr", ARM_AM::asr) 2052 .Case("ror", ARM_AM::ror) 2053 .Case("rrx", ARM_AM::rrx) 2054 .Default(ARM_AM::no_shift); 2055 2056 if (ShiftTy == ARM_AM::no_shift) 2057 return 1; 2058 2059 Parser.Lex(); // Eat the operator. 2060 2061 // The source register for the shift has already been added to the 2062 // operand list, so we need to pop it off and combine it into the shifted 2063 // register operand instead. 2064 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2065 if (!PrevOp->isReg()) 2066 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2067 int SrcReg = PrevOp->getReg(); 2068 int64_t Imm = 0; 2069 int ShiftReg = 0; 2070 if (ShiftTy == ARM_AM::rrx) { 2071 // RRX Doesn't have an explicit shift amount. The encoder expects 2072 // the shift register to be the same as the source register. Seems odd, 2073 // but OK. 2074 ShiftReg = SrcReg; 2075 } else { 2076 // Figure out if this is shifted by a constant or a register (for non-RRX). 2077 if (Parser.getTok().is(AsmToken::Hash)) { 2078 Parser.Lex(); // Eat hash. 2079 SMLoc ImmLoc = Parser.getTok().getLoc(); 2080 const MCExpr *ShiftExpr = 0; 2081 if (getParser().ParseExpression(ShiftExpr)) { 2082 Error(ImmLoc, "invalid immediate shift value"); 2083 return -1; 2084 } 2085 // The expression must be evaluatable as an immediate. 2086 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2087 if (!CE) { 2088 Error(ImmLoc, "invalid immediate shift value"); 2089 return -1; 2090 } 2091 // Range check the immediate. 2092 // lsl, ror: 0 <= imm <= 31 2093 // lsr, asr: 0 <= imm <= 32 2094 Imm = CE->getValue(); 2095 if (Imm < 0 || 2096 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2097 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2098 Error(ImmLoc, "immediate shift value out of range"); 2099 return -1; 2100 } 2101 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2102 ShiftReg = tryParseRegister(); 2103 SMLoc L = Parser.getTok().getLoc(); 2104 if (ShiftReg == -1) { 2105 Error (L, "expected immediate or register in shift operand"); 2106 return -1; 2107 } 2108 } else { 2109 Error (Parser.getTok().getLoc(), 2110 "expected immediate or register in shift operand"); 2111 return -1; 2112 } 2113 } 2114 2115 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2116 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2117 ShiftReg, Imm, 2118 S, Parser.getTok().getLoc())); 2119 else 2120 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2121 S, Parser.getTok().getLoc())); 2122 2123 return 0; 2124} 2125 2126 2127/// Try to parse a register name. The token must be an Identifier when called. 2128/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2129/// if there is a "writeback". 'true' if it's not a register. 2130/// 2131/// TODO this is likely to change to allow different register types and or to 2132/// parse for a specific register type. 2133bool ARMAsmParser:: 2134tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2135 SMLoc S = Parser.getTok().getLoc(); 2136 int RegNo = tryParseRegister(); 2137 if (RegNo == -1) 2138 return true; 2139 2140 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2141 2142 const AsmToken &ExclaimTok = Parser.getTok(); 2143 if (ExclaimTok.is(AsmToken::Exclaim)) { 2144 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2145 ExclaimTok.getLoc())); 2146 Parser.Lex(); // Eat exclaim token 2147 return false; 2148 } 2149 2150 // Also check for an index operand. This is only legal for vector registers, 2151 // but that'll get caught OK in operand matching, so we don't need to 2152 // explicitly filter everything else out here. 2153 if (Parser.getTok().is(AsmToken::LBrac)) { 2154 SMLoc SIdx = Parser.getTok().getLoc(); 2155 Parser.Lex(); // Eat left bracket token. 2156 2157 const MCExpr *ImmVal; 2158 if (getParser().ParseExpression(ImmVal)) 2159 return MatchOperand_ParseFail; 2160 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2161 if (!MCE) { 2162 TokError("immediate value expected for vector index"); 2163 return MatchOperand_ParseFail; 2164 } 2165 2166 SMLoc E = Parser.getTok().getLoc(); 2167 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2168 Error(E, "']' expected"); 2169 return MatchOperand_ParseFail; 2170 } 2171 2172 Parser.Lex(); // Eat right bracket token. 2173 2174 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2175 SIdx, E, 2176 getContext())); 2177 } 2178 2179 return false; 2180} 2181 2182/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2183/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2184/// "c5", ... 2185static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2186 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2187 // but efficient. 2188 switch (Name.size()) { 2189 default: break; 2190 case 2: 2191 if (Name[0] != CoprocOp) 2192 return -1; 2193 switch (Name[1]) { 2194 default: return -1; 2195 case '0': return 0; 2196 case '1': return 1; 2197 case '2': return 2; 2198 case '3': return 3; 2199 case '4': return 4; 2200 case '5': return 5; 2201 case '6': return 6; 2202 case '7': return 7; 2203 case '8': return 8; 2204 case '9': return 9; 2205 } 2206 break; 2207 case 3: 2208 if (Name[0] != CoprocOp || Name[1] != '1') 2209 return -1; 2210 switch (Name[2]) { 2211 default: return -1; 2212 case '0': return 10; 2213 case '1': return 11; 2214 case '2': return 12; 2215 case '3': return 13; 2216 case '4': return 14; 2217 case '5': return 15; 2218 } 2219 break; 2220 } 2221 2222 return -1; 2223} 2224 2225/// parseITCondCode - Try to parse a condition code for an IT instruction. 2226ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2227parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2228 SMLoc S = Parser.getTok().getLoc(); 2229 const AsmToken &Tok = Parser.getTok(); 2230 if (!Tok.is(AsmToken::Identifier)) 2231 return MatchOperand_NoMatch; 2232 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2233 .Case("eq", ARMCC::EQ) 2234 .Case("ne", ARMCC::NE) 2235 .Case("hs", ARMCC::HS) 2236 .Case("cs", ARMCC::HS) 2237 .Case("lo", ARMCC::LO) 2238 .Case("cc", ARMCC::LO) 2239 .Case("mi", ARMCC::MI) 2240 .Case("pl", ARMCC::PL) 2241 .Case("vs", ARMCC::VS) 2242 .Case("vc", ARMCC::VC) 2243 .Case("hi", ARMCC::HI) 2244 .Case("ls", ARMCC::LS) 2245 .Case("ge", ARMCC::GE) 2246 .Case("lt", ARMCC::LT) 2247 .Case("gt", ARMCC::GT) 2248 .Case("le", ARMCC::LE) 2249 .Case("al", ARMCC::AL) 2250 .Default(~0U); 2251 if (CC == ~0U) 2252 return MatchOperand_NoMatch; 2253 Parser.Lex(); // Eat the token. 2254 2255 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2256 2257 return MatchOperand_Success; 2258} 2259 2260/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2261/// token must be an Identifier when called, and if it is a coprocessor 2262/// number, the token is eaten and the operand is added to the operand list. 2263ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2264parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2265 SMLoc S = Parser.getTok().getLoc(); 2266 const AsmToken &Tok = Parser.getTok(); 2267 if (Tok.isNot(AsmToken::Identifier)) 2268 return MatchOperand_NoMatch; 2269 2270 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2271 if (Num == -1) 2272 return MatchOperand_NoMatch; 2273 2274 Parser.Lex(); // Eat identifier token. 2275 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2276 return MatchOperand_Success; 2277} 2278 2279/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2280/// token must be an Identifier when called, and if it is a coprocessor 2281/// number, the token is eaten and the operand is added to the operand list. 2282ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2283parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2284 SMLoc S = Parser.getTok().getLoc(); 2285 const AsmToken &Tok = Parser.getTok(); 2286 if (Tok.isNot(AsmToken::Identifier)) 2287 return MatchOperand_NoMatch; 2288 2289 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2290 if (Reg == -1) 2291 return MatchOperand_NoMatch; 2292 2293 Parser.Lex(); // Eat identifier token. 2294 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2295 return MatchOperand_Success; 2296} 2297 2298/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2299/// coproc_option : '{' imm0_255 '}' 2300ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2301parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2302 SMLoc S = Parser.getTok().getLoc(); 2303 2304 // If this isn't a '{', this isn't a coprocessor immediate operand. 2305 if (Parser.getTok().isNot(AsmToken::LCurly)) 2306 return MatchOperand_NoMatch; 2307 Parser.Lex(); // Eat the '{' 2308 2309 const MCExpr *Expr; 2310 SMLoc Loc = Parser.getTok().getLoc(); 2311 if (getParser().ParseExpression(Expr)) { 2312 Error(Loc, "illegal expression"); 2313 return MatchOperand_ParseFail; 2314 } 2315 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2316 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2317 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2318 return MatchOperand_ParseFail; 2319 } 2320 int Val = CE->getValue(); 2321 2322 // Check for and consume the closing '}' 2323 if (Parser.getTok().isNot(AsmToken::RCurly)) 2324 return MatchOperand_ParseFail; 2325 SMLoc E = Parser.getTok().getLoc(); 2326 Parser.Lex(); // Eat the '}' 2327 2328 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2329 return MatchOperand_Success; 2330} 2331 2332// For register list parsing, we need to map from raw GPR register numbering 2333// to the enumeration values. The enumeration values aren't sorted by 2334// register number due to our using "sp", "lr" and "pc" as canonical names. 2335static unsigned getNextRegister(unsigned Reg) { 2336 // If this is a GPR, we need to do it manually, otherwise we can rely 2337 // on the sort ordering of the enumeration since the other reg-classes 2338 // are sane. 2339 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2340 return Reg + 1; 2341 switch(Reg) { 2342 default: assert(0 && "Invalid GPR number!"); 2343 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2344 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2345 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2346 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2347 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2348 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2349 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2350 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2351 } 2352} 2353 2354/// Parse a register list. 2355bool ARMAsmParser:: 2356parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2357 assert(Parser.getTok().is(AsmToken::LCurly) && 2358 "Token is not a Left Curly Brace"); 2359 SMLoc S = Parser.getTok().getLoc(); 2360 Parser.Lex(); // Eat '{' token. 2361 SMLoc RegLoc = Parser.getTok().getLoc(); 2362 2363 // Check the first register in the list to see what register class 2364 // this is a list of. 2365 int Reg = tryParseRegister(); 2366 if (Reg == -1) 2367 return Error(RegLoc, "register expected"); 2368 2369 MCRegisterClass *RC; 2370 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2371 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2372 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2373 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2374 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2375 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2376 else 2377 return Error(RegLoc, "invalid register in register list"); 2378 2379 // The reglist instructions have at most 16 registers, so reserve 2380 // space for that many. 2381 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2382 // Store the first register. 2383 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2384 2385 // This starts immediately after the first register token in the list, 2386 // so we can see either a comma or a minus (range separator) as a legal 2387 // next token. 2388 while (Parser.getTok().is(AsmToken::Comma) || 2389 Parser.getTok().is(AsmToken::Minus)) { 2390 if (Parser.getTok().is(AsmToken::Minus)) { 2391 Parser.Lex(); // Eat the comma. 2392 SMLoc EndLoc = Parser.getTok().getLoc(); 2393 int EndReg = tryParseRegister(); 2394 if (EndReg == -1) 2395 return Error(EndLoc, "register expected"); 2396 // If the register is the same as the start reg, there's nothing 2397 // more to do. 2398 if (Reg == EndReg) 2399 continue; 2400 // The register must be in the same register class as the first. 2401 if (!RC->contains(EndReg)) 2402 return Error(EndLoc, "invalid register in register list"); 2403 // Ranges must go from low to high. 2404 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2405 return Error(EndLoc, "bad range in register list"); 2406 2407 // Add all the registers in the range to the register list. 2408 while (Reg != EndReg) { 2409 Reg = getNextRegister(Reg); 2410 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2411 } 2412 continue; 2413 } 2414 Parser.Lex(); // Eat the comma. 2415 RegLoc = Parser.getTok().getLoc(); 2416 int OldReg = Reg; 2417 Reg = tryParseRegister(); 2418 if (Reg == -1) 2419 return Error(RegLoc, "register expected"); 2420 // The register must be in the same register class as the first. 2421 if (!RC->contains(Reg)) 2422 return Error(RegLoc, "invalid register in register list"); 2423 // List must be monotonically increasing. 2424 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2425 return Error(RegLoc, "register list not in ascending order"); 2426 // VFP register lists must also be contiguous. 2427 // It's OK to use the enumeration values directly here rather, as the 2428 // VFP register classes have the enum sorted properly. 2429 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2430 Reg != OldReg + 1) 2431 return Error(RegLoc, "non-contiguous register range"); 2432 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2433 } 2434 2435 SMLoc E = Parser.getTok().getLoc(); 2436 if (Parser.getTok().isNot(AsmToken::RCurly)) 2437 return Error(E, "'}' expected"); 2438 Parser.Lex(); // Eat '}' token. 2439 2440 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2441 return false; 2442} 2443 2444// parse a vector register list 2445ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2446parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2447 if(Parser.getTok().isNot(AsmToken::LCurly)) 2448 return MatchOperand_NoMatch; 2449 2450 SMLoc S = Parser.getTok().getLoc(); 2451 Parser.Lex(); // Eat '{' token. 2452 SMLoc RegLoc = Parser.getTok().getLoc(); 2453 2454 int Reg = tryParseRegister(); 2455 if (Reg == -1) { 2456 Error(RegLoc, "register expected"); 2457 return MatchOperand_ParseFail; 2458 } 2459 2460 unsigned FirstReg = Reg; 2461 unsigned Count = 1; 2462 while (Parser.getTok().is(AsmToken::Comma)) { 2463 Parser.Lex(); // Eat the comma. 2464 RegLoc = Parser.getTok().getLoc(); 2465 int OldReg = Reg; 2466 Reg = tryParseRegister(); 2467 if (Reg == -1) { 2468 Error(RegLoc, "register expected"); 2469 return MatchOperand_ParseFail; 2470 } 2471 // vector register lists must also be contiguous. 2472 // It's OK to use the enumeration values directly here rather, as the 2473 // VFP register classes have the enum sorted properly. 2474 if (Reg != OldReg + 1) { 2475 Error(RegLoc, "non-contiguous register range"); 2476 return MatchOperand_ParseFail; 2477 } 2478 2479 ++Count; 2480 } 2481 2482 SMLoc E = Parser.getTok().getLoc(); 2483 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2484 Error(E, "'}' expected"); 2485 return MatchOperand_ParseFail; 2486 } 2487 Parser.Lex(); // Eat '}' token. 2488 2489 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2490 return MatchOperand_Success; 2491} 2492 2493/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2494ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2495parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2496 SMLoc S = Parser.getTok().getLoc(); 2497 const AsmToken &Tok = Parser.getTok(); 2498 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2499 StringRef OptStr = Tok.getString(); 2500 2501 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2502 .Case("sy", ARM_MB::SY) 2503 .Case("st", ARM_MB::ST) 2504 .Case("sh", ARM_MB::ISH) 2505 .Case("ish", ARM_MB::ISH) 2506 .Case("shst", ARM_MB::ISHST) 2507 .Case("ishst", ARM_MB::ISHST) 2508 .Case("nsh", ARM_MB::NSH) 2509 .Case("un", ARM_MB::NSH) 2510 .Case("nshst", ARM_MB::NSHST) 2511 .Case("unst", ARM_MB::NSHST) 2512 .Case("osh", ARM_MB::OSH) 2513 .Case("oshst", ARM_MB::OSHST) 2514 .Default(~0U); 2515 2516 if (Opt == ~0U) 2517 return MatchOperand_NoMatch; 2518 2519 Parser.Lex(); // Eat identifier token. 2520 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2521 return MatchOperand_Success; 2522} 2523 2524/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2525ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2526parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2527 SMLoc S = Parser.getTok().getLoc(); 2528 const AsmToken &Tok = Parser.getTok(); 2529 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2530 StringRef IFlagsStr = Tok.getString(); 2531 2532 // An iflags string of "none" is interpreted to mean that none of the AIF 2533 // bits are set. Not a terribly useful instruction, but a valid encoding. 2534 unsigned IFlags = 0; 2535 if (IFlagsStr != "none") { 2536 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2537 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2538 .Case("a", ARM_PROC::A) 2539 .Case("i", ARM_PROC::I) 2540 .Case("f", ARM_PROC::F) 2541 .Default(~0U); 2542 2543 // If some specific iflag is already set, it means that some letter is 2544 // present more than once, this is not acceptable. 2545 if (Flag == ~0U || (IFlags & Flag)) 2546 return MatchOperand_NoMatch; 2547 2548 IFlags |= Flag; 2549 } 2550 } 2551 2552 Parser.Lex(); // Eat identifier token. 2553 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2554 return MatchOperand_Success; 2555} 2556 2557/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2558ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2559parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2560 SMLoc S = Parser.getTok().getLoc(); 2561 const AsmToken &Tok = Parser.getTok(); 2562 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2563 StringRef Mask = Tok.getString(); 2564 2565 if (isMClass()) { 2566 // See ARMv6-M 10.1.1 2567 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2568 .Case("apsr", 0) 2569 .Case("iapsr", 1) 2570 .Case("eapsr", 2) 2571 .Case("xpsr", 3) 2572 .Case("ipsr", 5) 2573 .Case("epsr", 6) 2574 .Case("iepsr", 7) 2575 .Case("msp", 8) 2576 .Case("psp", 9) 2577 .Case("primask", 16) 2578 .Case("basepri", 17) 2579 .Case("basepri_max", 18) 2580 .Case("faultmask", 19) 2581 .Case("control", 20) 2582 .Default(~0U); 2583 2584 if (FlagsVal == ~0U) 2585 return MatchOperand_NoMatch; 2586 2587 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2588 // basepri, basepri_max and faultmask only valid for V7m. 2589 return MatchOperand_NoMatch; 2590 2591 Parser.Lex(); // Eat identifier token. 2592 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2593 return MatchOperand_Success; 2594 } 2595 2596 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2597 size_t Start = 0, Next = Mask.find('_'); 2598 StringRef Flags = ""; 2599 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2600 if (Next != StringRef::npos) 2601 Flags = Mask.slice(Next+1, Mask.size()); 2602 2603 // FlagsVal contains the complete mask: 2604 // 3-0: Mask 2605 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2606 unsigned FlagsVal = 0; 2607 2608 if (SpecReg == "apsr") { 2609 FlagsVal = StringSwitch<unsigned>(Flags) 2610 .Case("nzcvq", 0x8) // same as CPSR_f 2611 .Case("g", 0x4) // same as CPSR_s 2612 .Case("nzcvqg", 0xc) // same as CPSR_fs 2613 .Default(~0U); 2614 2615 if (FlagsVal == ~0U) { 2616 if (!Flags.empty()) 2617 return MatchOperand_NoMatch; 2618 else 2619 FlagsVal = 8; // No flag 2620 } 2621 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2622 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2623 Flags = "fc"; 2624 for (int i = 0, e = Flags.size(); i != e; ++i) { 2625 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2626 .Case("c", 1) 2627 .Case("x", 2) 2628 .Case("s", 4) 2629 .Case("f", 8) 2630 .Default(~0U); 2631 2632 // If some specific flag is already set, it means that some letter is 2633 // present more than once, this is not acceptable. 2634 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2635 return MatchOperand_NoMatch; 2636 FlagsVal |= Flag; 2637 } 2638 } else // No match for special register. 2639 return MatchOperand_NoMatch; 2640 2641 // Special register without flags is NOT equivalent to "fc" flags. 2642 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2643 // two lines would enable gas compatibility at the expense of breaking 2644 // round-tripping. 2645 // 2646 // if (!FlagsVal) 2647 // FlagsVal = 0x9; 2648 2649 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2650 if (SpecReg == "spsr") 2651 FlagsVal |= 16; 2652 2653 Parser.Lex(); // Eat identifier token. 2654 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2655 return MatchOperand_Success; 2656} 2657 2658ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2659parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2660 int Low, int High) { 2661 const AsmToken &Tok = Parser.getTok(); 2662 if (Tok.isNot(AsmToken::Identifier)) { 2663 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2664 return MatchOperand_ParseFail; 2665 } 2666 StringRef ShiftName = Tok.getString(); 2667 std::string LowerOp = LowercaseString(Op); 2668 std::string UpperOp = UppercaseString(Op); 2669 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2670 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2671 return MatchOperand_ParseFail; 2672 } 2673 Parser.Lex(); // Eat shift type token. 2674 2675 // There must be a '#' and a shift amount. 2676 if (Parser.getTok().isNot(AsmToken::Hash)) { 2677 Error(Parser.getTok().getLoc(), "'#' expected"); 2678 return MatchOperand_ParseFail; 2679 } 2680 Parser.Lex(); // Eat hash token. 2681 2682 const MCExpr *ShiftAmount; 2683 SMLoc Loc = Parser.getTok().getLoc(); 2684 if (getParser().ParseExpression(ShiftAmount)) { 2685 Error(Loc, "illegal expression"); 2686 return MatchOperand_ParseFail; 2687 } 2688 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2689 if (!CE) { 2690 Error(Loc, "constant expression expected"); 2691 return MatchOperand_ParseFail; 2692 } 2693 int Val = CE->getValue(); 2694 if (Val < Low || Val > High) { 2695 Error(Loc, "immediate value out of range"); 2696 return MatchOperand_ParseFail; 2697 } 2698 2699 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2700 2701 return MatchOperand_Success; 2702} 2703 2704ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2705parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2706 const AsmToken &Tok = Parser.getTok(); 2707 SMLoc S = Tok.getLoc(); 2708 if (Tok.isNot(AsmToken::Identifier)) { 2709 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2710 return MatchOperand_ParseFail; 2711 } 2712 int Val = StringSwitch<int>(Tok.getString()) 2713 .Case("be", 1) 2714 .Case("le", 0) 2715 .Default(-1); 2716 Parser.Lex(); // Eat the token. 2717 2718 if (Val == -1) { 2719 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2720 return MatchOperand_ParseFail; 2721 } 2722 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2723 getContext()), 2724 S, Parser.getTok().getLoc())); 2725 return MatchOperand_Success; 2726} 2727 2728/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2729/// instructions. Legal values are: 2730/// lsl #n 'n' in [0,31] 2731/// asr #n 'n' in [1,32] 2732/// n == 32 encoded as n == 0. 2733ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2734parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2735 const AsmToken &Tok = Parser.getTok(); 2736 SMLoc S = Tok.getLoc(); 2737 if (Tok.isNot(AsmToken::Identifier)) { 2738 Error(S, "shift operator 'asr' or 'lsl' expected"); 2739 return MatchOperand_ParseFail; 2740 } 2741 StringRef ShiftName = Tok.getString(); 2742 bool isASR; 2743 if (ShiftName == "lsl" || ShiftName == "LSL") 2744 isASR = false; 2745 else if (ShiftName == "asr" || ShiftName == "ASR") 2746 isASR = true; 2747 else { 2748 Error(S, "shift operator 'asr' or 'lsl' expected"); 2749 return MatchOperand_ParseFail; 2750 } 2751 Parser.Lex(); // Eat the operator. 2752 2753 // A '#' and a shift amount. 2754 if (Parser.getTok().isNot(AsmToken::Hash)) { 2755 Error(Parser.getTok().getLoc(), "'#' expected"); 2756 return MatchOperand_ParseFail; 2757 } 2758 Parser.Lex(); // Eat hash token. 2759 2760 const MCExpr *ShiftAmount; 2761 SMLoc E = Parser.getTok().getLoc(); 2762 if (getParser().ParseExpression(ShiftAmount)) { 2763 Error(E, "malformed shift expression"); 2764 return MatchOperand_ParseFail; 2765 } 2766 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2767 if (!CE) { 2768 Error(E, "shift amount must be an immediate"); 2769 return MatchOperand_ParseFail; 2770 } 2771 2772 int64_t Val = CE->getValue(); 2773 if (isASR) { 2774 // Shift amount must be in [1,32] 2775 if (Val < 1 || Val > 32) { 2776 Error(E, "'asr' shift amount must be in range [1,32]"); 2777 return MatchOperand_ParseFail; 2778 } 2779 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2780 if (isThumb() && Val == 32) { 2781 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2782 return MatchOperand_ParseFail; 2783 } 2784 if (Val == 32) Val = 0; 2785 } else { 2786 // Shift amount must be in [1,32] 2787 if (Val < 0 || Val > 31) { 2788 Error(E, "'lsr' shift amount must be in range [0,31]"); 2789 return MatchOperand_ParseFail; 2790 } 2791 } 2792 2793 E = Parser.getTok().getLoc(); 2794 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2795 2796 return MatchOperand_Success; 2797} 2798 2799/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2800/// of instructions. Legal values are: 2801/// ror #n 'n' in {0, 8, 16, 24} 2802ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2803parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2804 const AsmToken &Tok = Parser.getTok(); 2805 SMLoc S = Tok.getLoc(); 2806 if (Tok.isNot(AsmToken::Identifier)) 2807 return MatchOperand_NoMatch; 2808 StringRef ShiftName = Tok.getString(); 2809 if (ShiftName != "ror" && ShiftName != "ROR") 2810 return MatchOperand_NoMatch; 2811 Parser.Lex(); // Eat the operator. 2812 2813 // A '#' and a rotate amount. 2814 if (Parser.getTok().isNot(AsmToken::Hash)) { 2815 Error(Parser.getTok().getLoc(), "'#' expected"); 2816 return MatchOperand_ParseFail; 2817 } 2818 Parser.Lex(); // Eat hash token. 2819 2820 const MCExpr *ShiftAmount; 2821 SMLoc E = Parser.getTok().getLoc(); 2822 if (getParser().ParseExpression(ShiftAmount)) { 2823 Error(E, "malformed rotate expression"); 2824 return MatchOperand_ParseFail; 2825 } 2826 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2827 if (!CE) { 2828 Error(E, "rotate amount must be an immediate"); 2829 return MatchOperand_ParseFail; 2830 } 2831 2832 int64_t Val = CE->getValue(); 2833 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2834 // normally, zero is represented in asm by omitting the rotate operand 2835 // entirely. 2836 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2837 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2838 return MatchOperand_ParseFail; 2839 } 2840 2841 E = Parser.getTok().getLoc(); 2842 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2843 2844 return MatchOperand_Success; 2845} 2846 2847ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2848parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2849 SMLoc S = Parser.getTok().getLoc(); 2850 // The bitfield descriptor is really two operands, the LSB and the width. 2851 if (Parser.getTok().isNot(AsmToken::Hash)) { 2852 Error(Parser.getTok().getLoc(), "'#' expected"); 2853 return MatchOperand_ParseFail; 2854 } 2855 Parser.Lex(); // Eat hash token. 2856 2857 const MCExpr *LSBExpr; 2858 SMLoc E = Parser.getTok().getLoc(); 2859 if (getParser().ParseExpression(LSBExpr)) { 2860 Error(E, "malformed immediate expression"); 2861 return MatchOperand_ParseFail; 2862 } 2863 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2864 if (!CE) { 2865 Error(E, "'lsb' operand must be an immediate"); 2866 return MatchOperand_ParseFail; 2867 } 2868 2869 int64_t LSB = CE->getValue(); 2870 // The LSB must be in the range [0,31] 2871 if (LSB < 0 || LSB > 31) { 2872 Error(E, "'lsb' operand must be in the range [0,31]"); 2873 return MatchOperand_ParseFail; 2874 } 2875 E = Parser.getTok().getLoc(); 2876 2877 // Expect another immediate operand. 2878 if (Parser.getTok().isNot(AsmToken::Comma)) { 2879 Error(Parser.getTok().getLoc(), "too few operands"); 2880 return MatchOperand_ParseFail; 2881 } 2882 Parser.Lex(); // Eat hash token. 2883 if (Parser.getTok().isNot(AsmToken::Hash)) { 2884 Error(Parser.getTok().getLoc(), "'#' expected"); 2885 return MatchOperand_ParseFail; 2886 } 2887 Parser.Lex(); // Eat hash token. 2888 2889 const MCExpr *WidthExpr; 2890 if (getParser().ParseExpression(WidthExpr)) { 2891 Error(E, "malformed immediate expression"); 2892 return MatchOperand_ParseFail; 2893 } 2894 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2895 if (!CE) { 2896 Error(E, "'width' operand must be an immediate"); 2897 return MatchOperand_ParseFail; 2898 } 2899 2900 int64_t Width = CE->getValue(); 2901 // The LSB must be in the range [1,32-lsb] 2902 if (Width < 1 || Width > 32 - LSB) { 2903 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2904 return MatchOperand_ParseFail; 2905 } 2906 E = Parser.getTok().getLoc(); 2907 2908 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2909 2910 return MatchOperand_Success; 2911} 2912 2913ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2914parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2915 // Check for a post-index addressing register operand. Specifically: 2916 // postidx_reg := '+' register {, shift} 2917 // | '-' register {, shift} 2918 // | register {, shift} 2919 2920 // This method must return MatchOperand_NoMatch without consuming any tokens 2921 // in the case where there is no match, as other alternatives take other 2922 // parse methods. 2923 AsmToken Tok = Parser.getTok(); 2924 SMLoc S = Tok.getLoc(); 2925 bool haveEaten = false; 2926 bool isAdd = true; 2927 int Reg = -1; 2928 if (Tok.is(AsmToken::Plus)) { 2929 Parser.Lex(); // Eat the '+' token. 2930 haveEaten = true; 2931 } else if (Tok.is(AsmToken::Minus)) { 2932 Parser.Lex(); // Eat the '-' token. 2933 isAdd = false; 2934 haveEaten = true; 2935 } 2936 if (Parser.getTok().is(AsmToken::Identifier)) 2937 Reg = tryParseRegister(); 2938 if (Reg == -1) { 2939 if (!haveEaten) 2940 return MatchOperand_NoMatch; 2941 Error(Parser.getTok().getLoc(), "register expected"); 2942 return MatchOperand_ParseFail; 2943 } 2944 SMLoc E = Parser.getTok().getLoc(); 2945 2946 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2947 unsigned ShiftImm = 0; 2948 if (Parser.getTok().is(AsmToken::Comma)) { 2949 Parser.Lex(); // Eat the ','. 2950 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2951 return MatchOperand_ParseFail; 2952 } 2953 2954 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2955 ShiftImm, S, E)); 2956 2957 return MatchOperand_Success; 2958} 2959 2960ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2961parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2962 // Check for a post-index addressing register operand. Specifically: 2963 // am3offset := '+' register 2964 // | '-' register 2965 // | register 2966 // | # imm 2967 // | # + imm 2968 // | # - imm 2969 2970 // This method must return MatchOperand_NoMatch without consuming any tokens 2971 // in the case where there is no match, as other alternatives take other 2972 // parse methods. 2973 AsmToken Tok = Parser.getTok(); 2974 SMLoc S = Tok.getLoc(); 2975 2976 // Do immediates first, as we always parse those if we have a '#'. 2977 if (Parser.getTok().is(AsmToken::Hash)) { 2978 Parser.Lex(); // Eat the '#'. 2979 // Explicitly look for a '-', as we need to encode negative zero 2980 // differently. 2981 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2982 const MCExpr *Offset; 2983 if (getParser().ParseExpression(Offset)) 2984 return MatchOperand_ParseFail; 2985 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2986 if (!CE) { 2987 Error(S, "constant expression expected"); 2988 return MatchOperand_ParseFail; 2989 } 2990 SMLoc E = Tok.getLoc(); 2991 // Negative zero is encoded as the flag value INT32_MIN. 2992 int32_t Val = CE->getValue(); 2993 if (isNegative && Val == 0) 2994 Val = INT32_MIN; 2995 2996 Operands.push_back( 2997 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2998 2999 return MatchOperand_Success; 3000 } 3001 3002 3003 bool haveEaten = false; 3004 bool isAdd = true; 3005 int Reg = -1; 3006 if (Tok.is(AsmToken::Plus)) { 3007 Parser.Lex(); // Eat the '+' token. 3008 haveEaten = true; 3009 } else if (Tok.is(AsmToken::Minus)) { 3010 Parser.Lex(); // Eat the '-' token. 3011 isAdd = false; 3012 haveEaten = true; 3013 } 3014 if (Parser.getTok().is(AsmToken::Identifier)) 3015 Reg = tryParseRegister(); 3016 if (Reg == -1) { 3017 if (!haveEaten) 3018 return MatchOperand_NoMatch; 3019 Error(Parser.getTok().getLoc(), "register expected"); 3020 return MatchOperand_ParseFail; 3021 } 3022 SMLoc E = Parser.getTok().getLoc(); 3023 3024 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3025 0, S, E)); 3026 3027 return MatchOperand_Success; 3028} 3029 3030/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3031/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3032/// when they refer multiple MIOperands inside a single one. 3033bool ARMAsmParser:: 3034cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3035 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3036 // Rt, Rt2 3037 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3038 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3039 // Create a writeback register dummy placeholder. 3040 Inst.addOperand(MCOperand::CreateReg(0)); 3041 // addr 3042 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3043 // pred 3044 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3045 return true; 3046} 3047 3048/// cvtT2StrdPre - Convert parsed operands to MCInst. 3049/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3050/// when they refer multiple MIOperands inside a single one. 3051bool ARMAsmParser:: 3052cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3053 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3054 // Create a writeback register dummy placeholder. 3055 Inst.addOperand(MCOperand::CreateReg(0)); 3056 // Rt, Rt2 3057 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3058 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3059 // addr 3060 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3061 // pred 3062 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3063 return true; 3064} 3065 3066/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3067/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3068/// when they refer multiple MIOperands inside a single one. 3069bool ARMAsmParser:: 3070cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3071 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3072 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3073 3074 // Create a writeback register dummy placeholder. 3075 Inst.addOperand(MCOperand::CreateImm(0)); 3076 3077 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3078 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3079 return true; 3080} 3081 3082/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3083/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3084/// when they refer multiple MIOperands inside a single one. 3085bool ARMAsmParser:: 3086cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3087 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3088 // Create a writeback register dummy placeholder. 3089 Inst.addOperand(MCOperand::CreateImm(0)); 3090 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3091 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3092 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3093 return true; 3094} 3095 3096/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3097/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3098/// when they refer multiple MIOperands inside a single one. 3099bool ARMAsmParser:: 3100cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3101 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3102 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3103 3104 // Create a writeback register dummy placeholder. 3105 Inst.addOperand(MCOperand::CreateImm(0)); 3106 3107 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3108 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3109 return true; 3110} 3111 3112/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3113/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3114/// when they refer multiple MIOperands inside a single one. 3115bool ARMAsmParser:: 3116cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3117 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3118 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3119 3120 // Create a writeback register dummy placeholder. 3121 Inst.addOperand(MCOperand::CreateImm(0)); 3122 3123 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3124 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3125 return true; 3126} 3127 3128 3129/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3130/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3131/// when they refer multiple MIOperands inside a single one. 3132bool ARMAsmParser:: 3133cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3134 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3135 // Create a writeback register dummy placeholder. 3136 Inst.addOperand(MCOperand::CreateImm(0)); 3137 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3138 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3139 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3140 return true; 3141} 3142 3143/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3144/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3145/// when they refer multiple MIOperands inside a single one. 3146bool ARMAsmParser:: 3147cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3148 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3149 // Create a writeback register dummy placeholder. 3150 Inst.addOperand(MCOperand::CreateImm(0)); 3151 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3152 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3153 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3154 return true; 3155} 3156 3157/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3158/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3159/// when they refer multiple MIOperands inside a single one. 3160bool ARMAsmParser:: 3161cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3162 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3163 // Create a writeback register dummy placeholder. 3164 Inst.addOperand(MCOperand::CreateImm(0)); 3165 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3166 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3167 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3168 return true; 3169} 3170 3171/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3172/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3173/// when they refer multiple MIOperands inside a single one. 3174bool ARMAsmParser:: 3175cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3176 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3177 // Rt 3178 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3179 // Create a writeback register dummy placeholder. 3180 Inst.addOperand(MCOperand::CreateImm(0)); 3181 // addr 3182 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3183 // offset 3184 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3185 // pred 3186 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3187 return true; 3188} 3189 3190/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3191/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3192/// when they refer multiple MIOperands inside a single one. 3193bool ARMAsmParser:: 3194cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3195 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3196 // Rt 3197 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3198 // Create a writeback register dummy placeholder. 3199 Inst.addOperand(MCOperand::CreateImm(0)); 3200 // addr 3201 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3202 // offset 3203 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3204 // pred 3205 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3206 return true; 3207} 3208 3209/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3210/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3211/// when they refer multiple MIOperands inside a single one. 3212bool ARMAsmParser:: 3213cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3214 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3215 // Create a writeback register dummy placeholder. 3216 Inst.addOperand(MCOperand::CreateImm(0)); 3217 // Rt 3218 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3219 // addr 3220 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3221 // offset 3222 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3223 // pred 3224 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3225 return true; 3226} 3227 3228/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3229/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3230/// when they refer multiple MIOperands inside a single one. 3231bool ARMAsmParser:: 3232cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3233 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3234 // Create a writeback register dummy placeholder. 3235 Inst.addOperand(MCOperand::CreateImm(0)); 3236 // Rt 3237 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3238 // addr 3239 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3240 // offset 3241 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3242 // pred 3243 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3244 return true; 3245} 3246 3247/// cvtLdrdPre - Convert parsed operands to MCInst. 3248/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3249/// when they refer multiple MIOperands inside a single one. 3250bool ARMAsmParser:: 3251cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3252 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3253 // Rt, Rt2 3254 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3255 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3256 // Create a writeback register dummy placeholder. 3257 Inst.addOperand(MCOperand::CreateImm(0)); 3258 // addr 3259 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3260 // pred 3261 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3262 return true; 3263} 3264 3265/// cvtStrdPre - Convert parsed operands to MCInst. 3266/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3267/// when they refer multiple MIOperands inside a single one. 3268bool ARMAsmParser:: 3269cvtStrdPre(MCInst &Inst, unsigned Opcode, 3270 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3271 // Create a writeback register dummy placeholder. 3272 Inst.addOperand(MCOperand::CreateImm(0)); 3273 // Rt, Rt2 3274 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3275 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3276 // addr 3277 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3278 // pred 3279 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3280 return true; 3281} 3282 3283/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3284/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3285/// when they refer multiple MIOperands inside a single one. 3286bool ARMAsmParser:: 3287cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3288 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3289 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3290 // Create a writeback register dummy placeholder. 3291 Inst.addOperand(MCOperand::CreateImm(0)); 3292 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3293 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3294 return true; 3295} 3296 3297/// cvtThumbMultiple- Convert parsed operands to MCInst. 3298/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3299/// when they refer multiple MIOperands inside a single one. 3300bool ARMAsmParser:: 3301cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3302 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3303 // The second source operand must be the same register as the destination 3304 // operand. 3305 if (Operands.size() == 6 && 3306 (((ARMOperand*)Operands[3])->getReg() != 3307 ((ARMOperand*)Operands[5])->getReg()) && 3308 (((ARMOperand*)Operands[3])->getReg() != 3309 ((ARMOperand*)Operands[4])->getReg())) { 3310 Error(Operands[3]->getStartLoc(), 3311 "destination register must match source register"); 3312 return false; 3313 } 3314 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3315 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3316 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3317 // If we have a three-operand form, use that, else the second source operand 3318 // is just the destination operand again. 3319 if (Operands.size() == 6) 3320 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3321 else 3322 Inst.addOperand(Inst.getOperand(0)); 3323 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3324 3325 return true; 3326} 3327 3328/// Parse an ARM memory expression, return false if successful else return true 3329/// or an error. The first token must be a '[' when called. 3330bool ARMAsmParser:: 3331parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3332 SMLoc S, E; 3333 assert(Parser.getTok().is(AsmToken::LBrac) && 3334 "Token is not a Left Bracket"); 3335 S = Parser.getTok().getLoc(); 3336 Parser.Lex(); // Eat left bracket token. 3337 3338 const AsmToken &BaseRegTok = Parser.getTok(); 3339 int BaseRegNum = tryParseRegister(); 3340 if (BaseRegNum == -1) 3341 return Error(BaseRegTok.getLoc(), "register expected"); 3342 3343 // The next token must either be a comma or a closing bracket. 3344 const AsmToken &Tok = Parser.getTok(); 3345 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3346 return Error(Tok.getLoc(), "malformed memory operand"); 3347 3348 if (Tok.is(AsmToken::RBrac)) { 3349 E = Tok.getLoc(); 3350 Parser.Lex(); // Eat right bracket token. 3351 3352 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3353 0, 0, false, S, E)); 3354 3355 // If there's a pre-indexing writeback marker, '!', just add it as a token 3356 // operand. It's rather odd, but syntactically valid. 3357 if (Parser.getTok().is(AsmToken::Exclaim)) { 3358 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3359 Parser.Lex(); // Eat the '!'. 3360 } 3361 3362 return false; 3363 } 3364 3365 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3366 Parser.Lex(); // Eat the comma. 3367 3368 // If we have a ':', it's an alignment specifier. 3369 if (Parser.getTok().is(AsmToken::Colon)) { 3370 Parser.Lex(); // Eat the ':'. 3371 E = Parser.getTok().getLoc(); 3372 3373 const MCExpr *Expr; 3374 if (getParser().ParseExpression(Expr)) 3375 return true; 3376 3377 // The expression has to be a constant. Memory references with relocations 3378 // don't come through here, as they use the <label> forms of the relevant 3379 // instructions. 3380 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3381 if (!CE) 3382 return Error (E, "constant expression expected"); 3383 3384 unsigned Align = 0; 3385 switch (CE->getValue()) { 3386 default: 3387 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3388 case 64: Align = 8; break; 3389 case 128: Align = 16; break; 3390 case 256: Align = 32; break; 3391 } 3392 3393 // Now we should have the closing ']' 3394 E = Parser.getTok().getLoc(); 3395 if (Parser.getTok().isNot(AsmToken::RBrac)) 3396 return Error(E, "']' expected"); 3397 Parser.Lex(); // Eat right bracket token. 3398 3399 // Don't worry about range checking the value here. That's handled by 3400 // the is*() predicates. 3401 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3402 ARM_AM::no_shift, 0, Align, 3403 false, S, E)); 3404 3405 // If there's a pre-indexing writeback marker, '!', just add it as a token 3406 // operand. 3407 if (Parser.getTok().is(AsmToken::Exclaim)) { 3408 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3409 Parser.Lex(); // Eat the '!'. 3410 } 3411 3412 return false; 3413 } 3414 3415 // If we have a '#', it's an immediate offset, else assume it's a register 3416 // offset. 3417 if (Parser.getTok().is(AsmToken::Hash)) { 3418 Parser.Lex(); // Eat the '#'. 3419 E = Parser.getTok().getLoc(); 3420 3421 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3422 const MCExpr *Offset; 3423 if (getParser().ParseExpression(Offset)) 3424 return true; 3425 3426 // The expression has to be a constant. Memory references with relocations 3427 // don't come through here, as they use the <label> forms of the relevant 3428 // instructions. 3429 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3430 if (!CE) 3431 return Error (E, "constant expression expected"); 3432 3433 // If the constant was #-0, represent it as INT32_MIN. 3434 int32_t Val = CE->getValue(); 3435 if (isNegative && Val == 0) 3436 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3437 3438 // Now we should have the closing ']' 3439 E = Parser.getTok().getLoc(); 3440 if (Parser.getTok().isNot(AsmToken::RBrac)) 3441 return Error(E, "']' expected"); 3442 Parser.Lex(); // Eat right bracket token. 3443 3444 // Don't worry about range checking the value here. That's handled by 3445 // the is*() predicates. 3446 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3447 ARM_AM::no_shift, 0, 0, 3448 false, S, E)); 3449 3450 // If there's a pre-indexing writeback marker, '!', just add it as a token 3451 // operand. 3452 if (Parser.getTok().is(AsmToken::Exclaim)) { 3453 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3454 Parser.Lex(); // Eat the '!'. 3455 } 3456 3457 return false; 3458 } 3459 3460 // The register offset is optionally preceded by a '+' or '-' 3461 bool isNegative = false; 3462 if (Parser.getTok().is(AsmToken::Minus)) { 3463 isNegative = true; 3464 Parser.Lex(); // Eat the '-'. 3465 } else if (Parser.getTok().is(AsmToken::Plus)) { 3466 // Nothing to do. 3467 Parser.Lex(); // Eat the '+'. 3468 } 3469 3470 E = Parser.getTok().getLoc(); 3471 int OffsetRegNum = tryParseRegister(); 3472 if (OffsetRegNum == -1) 3473 return Error(E, "register expected"); 3474 3475 // If there's a shift operator, handle it. 3476 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3477 unsigned ShiftImm = 0; 3478 if (Parser.getTok().is(AsmToken::Comma)) { 3479 Parser.Lex(); // Eat the ','. 3480 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3481 return true; 3482 } 3483 3484 // Now we should have the closing ']' 3485 E = Parser.getTok().getLoc(); 3486 if (Parser.getTok().isNot(AsmToken::RBrac)) 3487 return Error(E, "']' expected"); 3488 Parser.Lex(); // Eat right bracket token. 3489 3490 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3491 ShiftType, ShiftImm, 0, isNegative, 3492 S, E)); 3493 3494 // If there's a pre-indexing writeback marker, '!', just add it as a token 3495 // operand. 3496 if (Parser.getTok().is(AsmToken::Exclaim)) { 3497 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3498 Parser.Lex(); // Eat the '!'. 3499 } 3500 3501 return false; 3502} 3503 3504/// parseMemRegOffsetShift - one of these two: 3505/// ( lsl | lsr | asr | ror ) , # shift_amount 3506/// rrx 3507/// return true if it parses a shift otherwise it returns false. 3508bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3509 unsigned &Amount) { 3510 SMLoc Loc = Parser.getTok().getLoc(); 3511 const AsmToken &Tok = Parser.getTok(); 3512 if (Tok.isNot(AsmToken::Identifier)) 3513 return true; 3514 StringRef ShiftName = Tok.getString(); 3515 if (ShiftName == "lsl" || ShiftName == "LSL") 3516 St = ARM_AM::lsl; 3517 else if (ShiftName == "lsr" || ShiftName == "LSR") 3518 St = ARM_AM::lsr; 3519 else if (ShiftName == "asr" || ShiftName == "ASR") 3520 St = ARM_AM::asr; 3521 else if (ShiftName == "ror" || ShiftName == "ROR") 3522 St = ARM_AM::ror; 3523 else if (ShiftName == "rrx" || ShiftName == "RRX") 3524 St = ARM_AM::rrx; 3525 else 3526 return Error(Loc, "illegal shift operator"); 3527 Parser.Lex(); // Eat shift type token. 3528 3529 // rrx stands alone. 3530 Amount = 0; 3531 if (St != ARM_AM::rrx) { 3532 Loc = Parser.getTok().getLoc(); 3533 // A '#' and a shift amount. 3534 const AsmToken &HashTok = Parser.getTok(); 3535 if (HashTok.isNot(AsmToken::Hash)) 3536 return Error(HashTok.getLoc(), "'#' expected"); 3537 Parser.Lex(); // Eat hash token. 3538 3539 const MCExpr *Expr; 3540 if (getParser().ParseExpression(Expr)) 3541 return true; 3542 // Range check the immediate. 3543 // lsl, ror: 0 <= imm <= 31 3544 // lsr, asr: 0 <= imm <= 32 3545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3546 if (!CE) 3547 return Error(Loc, "shift amount must be an immediate"); 3548 int64_t Imm = CE->getValue(); 3549 if (Imm < 0 || 3550 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3551 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3552 return Error(Loc, "immediate shift value out of range"); 3553 Amount = Imm; 3554 } 3555 3556 return false; 3557} 3558 3559/// parseFPImm - A floating point immediate expression operand. 3560ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3561parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3562 SMLoc S = Parser.getTok().getLoc(); 3563 3564 if (Parser.getTok().isNot(AsmToken::Hash)) 3565 return MatchOperand_NoMatch; 3566 3567 // Disambiguate the VMOV forms that can accept an FP immediate. 3568 // vmov.f32 <sreg>, #imm 3569 // vmov.f64 <dreg>, #imm 3570 // vmov.f32 <dreg>, #imm @ vector f32x2 3571 // vmov.f32 <qreg>, #imm @ vector f32x4 3572 // 3573 // There are also the NEON VMOV instructions which expect an 3574 // integer constant. Make sure we don't try to parse an FPImm 3575 // for these: 3576 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3577 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3578 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3579 TyOp->getToken() != ".f64")) 3580 return MatchOperand_NoMatch; 3581 3582 Parser.Lex(); // Eat the '#'. 3583 3584 // Handle negation, as that still comes through as a separate token. 3585 bool isNegative = false; 3586 if (Parser.getTok().is(AsmToken::Minus)) { 3587 isNegative = true; 3588 Parser.Lex(); 3589 } 3590 const AsmToken &Tok = Parser.getTok(); 3591 if (Tok.is(AsmToken::Real)) { 3592 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3593 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3594 // If we had a '-' in front, toggle the sign bit. 3595 IntVal ^= (uint64_t)isNegative << 63; 3596 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3597 Parser.Lex(); // Eat the token. 3598 if (Val == -1) { 3599 TokError("floating point value out of range"); 3600 return MatchOperand_ParseFail; 3601 } 3602 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3603 return MatchOperand_Success; 3604 } 3605 if (Tok.is(AsmToken::Integer)) { 3606 int64_t Val = Tok.getIntVal(); 3607 Parser.Lex(); // Eat the token. 3608 if (Val > 255 || Val < 0) { 3609 TokError("encoded floating point value out of range"); 3610 return MatchOperand_ParseFail; 3611 } 3612 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3613 return MatchOperand_Success; 3614 } 3615 3616 TokError("invalid floating point immediate"); 3617 return MatchOperand_ParseFail; 3618} 3619/// Parse a arm instruction operand. For now this parses the operand regardless 3620/// of the mnemonic. 3621bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3622 StringRef Mnemonic) { 3623 SMLoc S, E; 3624 3625 // Check if the current operand has a custom associated parser, if so, try to 3626 // custom parse the operand, or fallback to the general approach. 3627 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3628 if (ResTy == MatchOperand_Success) 3629 return false; 3630 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3631 // there was a match, but an error occurred, in which case, just return that 3632 // the operand parsing failed. 3633 if (ResTy == MatchOperand_ParseFail) 3634 return true; 3635 3636 switch (getLexer().getKind()) { 3637 default: 3638 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3639 return true; 3640 case AsmToken::Identifier: { 3641 // If this is VMRS, check for the apsr_nzcv operand. 3642 if (!tryParseRegisterWithWriteBack(Operands)) 3643 return false; 3644 int Res = tryParseShiftRegister(Operands); 3645 if (Res == 0) // success 3646 return false; 3647 else if (Res == -1) // irrecoverable error 3648 return true; 3649 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3650 S = Parser.getTok().getLoc(); 3651 Parser.Lex(); 3652 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3653 return false; 3654 } 3655 3656 // Fall though for the Identifier case that is not a register or a 3657 // special name. 3658 } 3659 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3660 case AsmToken::Dot: { // . as a branch target 3661 // This was not a register so parse other operands that start with an 3662 // identifier (like labels) as expressions and create them as immediates. 3663 const MCExpr *IdVal; 3664 S = Parser.getTok().getLoc(); 3665 if (getParser().ParseExpression(IdVal)) 3666 return true; 3667 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3668 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3669 return false; 3670 } 3671 case AsmToken::LBrac: 3672 return parseMemory(Operands); 3673 case AsmToken::LCurly: 3674 return parseRegisterList(Operands); 3675 case AsmToken::Hash: { 3676 // #42 -> immediate. 3677 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3678 S = Parser.getTok().getLoc(); 3679 Parser.Lex(); 3680 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3681 const MCExpr *ImmVal; 3682 if (getParser().ParseExpression(ImmVal)) 3683 return true; 3684 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3685 if (!CE) { 3686 Error(S, "constant expression expected"); 3687 return MatchOperand_ParseFail; 3688 } 3689 int32_t Val = CE->getValue(); 3690 if (isNegative && Val == 0) 3691 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3692 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3693 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3694 return false; 3695 } 3696 case AsmToken::Colon: { 3697 // ":lower16:" and ":upper16:" expression prefixes 3698 // FIXME: Check it's an expression prefix, 3699 // e.g. (FOO - :lower16:BAR) isn't legal. 3700 ARMMCExpr::VariantKind RefKind; 3701 if (parsePrefix(RefKind)) 3702 return true; 3703 3704 const MCExpr *SubExprVal; 3705 if (getParser().ParseExpression(SubExprVal)) 3706 return true; 3707 3708 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3709 getContext()); 3710 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3711 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3712 return false; 3713 } 3714 } 3715} 3716 3717// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3718// :lower16: and :upper16:. 3719bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3720 RefKind = ARMMCExpr::VK_ARM_None; 3721 3722 // :lower16: and :upper16: modifiers 3723 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3724 Parser.Lex(); // Eat ':' 3725 3726 if (getLexer().isNot(AsmToken::Identifier)) { 3727 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3728 return true; 3729 } 3730 3731 StringRef IDVal = Parser.getTok().getIdentifier(); 3732 if (IDVal == "lower16") { 3733 RefKind = ARMMCExpr::VK_ARM_LO16; 3734 } else if (IDVal == "upper16") { 3735 RefKind = ARMMCExpr::VK_ARM_HI16; 3736 } else { 3737 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3738 return true; 3739 } 3740 Parser.Lex(); 3741 3742 if (getLexer().isNot(AsmToken::Colon)) { 3743 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3744 return true; 3745 } 3746 Parser.Lex(); // Eat the last ':' 3747 return false; 3748} 3749 3750/// \brief Given a mnemonic, split out possible predication code and carry 3751/// setting letters to form a canonical mnemonic and flags. 3752// 3753// FIXME: Would be nice to autogen this. 3754// FIXME: This is a bit of a maze of special cases. 3755StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3756 unsigned &PredicationCode, 3757 bool &CarrySetting, 3758 unsigned &ProcessorIMod, 3759 StringRef &ITMask) { 3760 PredicationCode = ARMCC::AL; 3761 CarrySetting = false; 3762 ProcessorIMod = 0; 3763 3764 // Ignore some mnemonics we know aren't predicated forms. 3765 // 3766 // FIXME: Would be nice to autogen this. 3767 if ((Mnemonic == "movs" && isThumb()) || 3768 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3769 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3770 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3771 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3772 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3773 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3774 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3775 return Mnemonic; 3776 3777 // First, split out any predication code. Ignore mnemonics we know aren't 3778 // predicated but do have a carry-set and so weren't caught above. 3779 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3780 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3781 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3782 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3783 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3784 .Case("eq", ARMCC::EQ) 3785 .Case("ne", ARMCC::NE) 3786 .Case("hs", ARMCC::HS) 3787 .Case("cs", ARMCC::HS) 3788 .Case("lo", ARMCC::LO) 3789 .Case("cc", ARMCC::LO) 3790 .Case("mi", ARMCC::MI) 3791 .Case("pl", ARMCC::PL) 3792 .Case("vs", ARMCC::VS) 3793 .Case("vc", ARMCC::VC) 3794 .Case("hi", ARMCC::HI) 3795 .Case("ls", ARMCC::LS) 3796 .Case("ge", ARMCC::GE) 3797 .Case("lt", ARMCC::LT) 3798 .Case("gt", ARMCC::GT) 3799 .Case("le", ARMCC::LE) 3800 .Case("al", ARMCC::AL) 3801 .Default(~0U); 3802 if (CC != ~0U) { 3803 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3804 PredicationCode = CC; 3805 } 3806 } 3807 3808 // Next, determine if we have a carry setting bit. We explicitly ignore all 3809 // the instructions we know end in 's'. 3810 if (Mnemonic.endswith("s") && 3811 !(Mnemonic == "cps" || Mnemonic == "mls" || 3812 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3813 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3814 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3815 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3816 (Mnemonic == "movs" && isThumb()))) { 3817 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3818 CarrySetting = true; 3819 } 3820 3821 // The "cps" instruction can have a interrupt mode operand which is glued into 3822 // the mnemonic. Check if this is the case, split it and parse the imod op 3823 if (Mnemonic.startswith("cps")) { 3824 // Split out any imod code. 3825 unsigned IMod = 3826 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3827 .Case("ie", ARM_PROC::IE) 3828 .Case("id", ARM_PROC::ID) 3829 .Default(~0U); 3830 if (IMod != ~0U) { 3831 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3832 ProcessorIMod = IMod; 3833 } 3834 } 3835 3836 // The "it" instruction has the condition mask on the end of the mnemonic. 3837 if (Mnemonic.startswith("it")) { 3838 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3839 Mnemonic = Mnemonic.slice(0, 2); 3840 } 3841 3842 return Mnemonic; 3843} 3844 3845/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3846/// inclusion of carry set or predication code operands. 3847// 3848// FIXME: It would be nice to autogen this. 3849void ARMAsmParser:: 3850getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3851 bool &CanAcceptPredicationCode) { 3852 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3853 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3854 Mnemonic == "add" || Mnemonic == "adc" || 3855 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3856 Mnemonic == "orr" || Mnemonic == "mvn" || 3857 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3858 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3859 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3860 Mnemonic == "mla" || Mnemonic == "smlal" || 3861 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3862 CanAcceptCarrySet = true; 3863 } else 3864 CanAcceptCarrySet = false; 3865 3866 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3867 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3868 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3869 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3870 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3871 (Mnemonic == "clrex" && !isThumb()) || 3872 (Mnemonic == "nop" && isThumbOne()) || 3873 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3874 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3875 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3876 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3877 !isThumb()) || 3878 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3879 CanAcceptPredicationCode = false; 3880 } else 3881 CanAcceptPredicationCode = true; 3882 3883 if (isThumb()) { 3884 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3885 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3886 CanAcceptPredicationCode = false; 3887 } 3888} 3889 3890bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3891 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3892 // FIXME: This is all horribly hacky. We really need a better way to deal 3893 // with optional operands like this in the matcher table. 3894 3895 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3896 // another does not. Specifically, the MOVW instruction does not. So we 3897 // special case it here and remove the defaulted (non-setting) cc_out 3898 // operand if that's the instruction we're trying to match. 3899 // 3900 // We do this as post-processing of the explicit operands rather than just 3901 // conditionally adding the cc_out in the first place because we need 3902 // to check the type of the parsed immediate operand. 3903 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3904 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3905 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3906 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3907 return true; 3908 3909 // Register-register 'add' for thumb does not have a cc_out operand 3910 // when there are only two register operands. 3911 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3912 static_cast<ARMOperand*>(Operands[3])->isReg() && 3913 static_cast<ARMOperand*>(Operands[4])->isReg() && 3914 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3915 return true; 3916 // Register-register 'add' for thumb does not have a cc_out operand 3917 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3918 // have to check the immediate range here since Thumb2 has a variant 3919 // that can handle a different range and has a cc_out operand. 3920 if (((isThumb() && Mnemonic == "add") || 3921 (isThumbTwo() && Mnemonic == "sub")) && 3922 Operands.size() == 6 && 3923 static_cast<ARMOperand*>(Operands[3])->isReg() && 3924 static_cast<ARMOperand*>(Operands[4])->isReg() && 3925 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3926 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3927 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3928 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3929 return true; 3930 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3931 // imm0_4095 variant. That's the least-preferred variant when 3932 // selecting via the generic "add" mnemonic, so to know that we 3933 // should remove the cc_out operand, we have to explicitly check that 3934 // it's not one of the other variants. Ugh. 3935 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3936 Operands.size() == 6 && 3937 static_cast<ARMOperand*>(Operands[3])->isReg() && 3938 static_cast<ARMOperand*>(Operands[4])->isReg() && 3939 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3940 // Nest conditions rather than one big 'if' statement for readability. 3941 // 3942 // If either register is a high reg, it's either one of the SP 3943 // variants (handled above) or a 32-bit encoding, so we just 3944 // check against T3. 3945 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3946 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3947 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3948 return false; 3949 // If both registers are low, we're in an IT block, and the immediate is 3950 // in range, we should use encoding T1 instead, which has a cc_out. 3951 if (inITBlock() && 3952 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3953 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3954 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3955 return false; 3956 3957 // Otherwise, we use encoding T4, which does not have a cc_out 3958 // operand. 3959 return true; 3960 } 3961 3962 // The thumb2 multiply instruction doesn't have a CCOut register, so 3963 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3964 // use the 16-bit encoding or not. 3965 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3966 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3967 static_cast<ARMOperand*>(Operands[3])->isReg() && 3968 static_cast<ARMOperand*>(Operands[4])->isReg() && 3969 static_cast<ARMOperand*>(Operands[5])->isReg() && 3970 // If the registers aren't low regs, the destination reg isn't the 3971 // same as one of the source regs, or the cc_out operand is zero 3972 // outside of an IT block, we have to use the 32-bit encoding, so 3973 // remove the cc_out operand. 3974 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3975 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 3976 !inITBlock() || 3977 (static_cast<ARMOperand*>(Operands[3])->getReg() != 3978 static_cast<ARMOperand*>(Operands[5])->getReg() && 3979 static_cast<ARMOperand*>(Operands[3])->getReg() != 3980 static_cast<ARMOperand*>(Operands[4])->getReg()))) 3981 return true; 3982 3983 3984 3985 // Register-register 'add/sub' for thumb does not have a cc_out operand 3986 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 3987 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 3988 // right, this will result in better diagnostics (which operand is off) 3989 // anyway. 3990 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 3991 (Operands.size() == 5 || Operands.size() == 6) && 3992 static_cast<ARMOperand*>(Operands[3])->isReg() && 3993 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 3994 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3995 return true; 3996 3997 return false; 3998} 3999 4000/// Parse an arm instruction mnemonic followed by its operands. 4001bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4002 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4003 // Create the leading tokens for the mnemonic, split by '.' characters. 4004 size_t Start = 0, Next = Name.find('.'); 4005 StringRef Mnemonic = Name.slice(Start, Next); 4006 4007 // Split out the predication code and carry setting flag from the mnemonic. 4008 unsigned PredicationCode; 4009 unsigned ProcessorIMod; 4010 bool CarrySetting; 4011 StringRef ITMask; 4012 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4013 ProcessorIMod, ITMask); 4014 4015 // In Thumb1, only the branch (B) instruction can be predicated. 4016 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4017 Parser.EatToEndOfStatement(); 4018 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4019 } 4020 4021 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4022 4023 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4024 // is the mask as it will be for the IT encoding if the conditional 4025 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4026 // where the conditional bit0 is zero, the instruction post-processing 4027 // will adjust the mask accordingly. 4028 if (Mnemonic == "it") { 4029 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4030 if (ITMask.size() > 3) { 4031 Parser.EatToEndOfStatement(); 4032 return Error(Loc, "too many conditions on IT instruction"); 4033 } 4034 unsigned Mask = 8; 4035 for (unsigned i = ITMask.size(); i != 0; --i) { 4036 char pos = ITMask[i - 1]; 4037 if (pos != 't' && pos != 'e') { 4038 Parser.EatToEndOfStatement(); 4039 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4040 } 4041 Mask >>= 1; 4042 if (ITMask[i - 1] == 't') 4043 Mask |= 8; 4044 } 4045 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4046 } 4047 4048 // FIXME: This is all a pretty gross hack. We should automatically handle 4049 // optional operands like this via tblgen. 4050 4051 // Next, add the CCOut and ConditionCode operands, if needed. 4052 // 4053 // For mnemonics which can ever incorporate a carry setting bit or predication 4054 // code, our matching model involves us always generating CCOut and 4055 // ConditionCode operands to match the mnemonic "as written" and then we let 4056 // the matcher deal with finding the right instruction or generating an 4057 // appropriate error. 4058 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4059 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4060 4061 // If we had a carry-set on an instruction that can't do that, issue an 4062 // error. 4063 if (!CanAcceptCarrySet && CarrySetting) { 4064 Parser.EatToEndOfStatement(); 4065 return Error(NameLoc, "instruction '" + Mnemonic + 4066 "' can not set flags, but 's' suffix specified"); 4067 } 4068 // If we had a predication code on an instruction that can't do that, issue an 4069 // error. 4070 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4071 Parser.EatToEndOfStatement(); 4072 return Error(NameLoc, "instruction '" + Mnemonic + 4073 "' is not predicable, but condition code specified"); 4074 } 4075 4076 // Add the carry setting operand, if necessary. 4077 if (CanAcceptCarrySet) { 4078 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4079 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4080 Loc)); 4081 } 4082 4083 // Add the predication code operand, if necessary. 4084 if (CanAcceptPredicationCode) { 4085 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4086 CarrySetting); 4087 Operands.push_back(ARMOperand::CreateCondCode( 4088 ARMCC::CondCodes(PredicationCode), Loc)); 4089 } 4090 4091 // Add the processor imod operand, if necessary. 4092 if (ProcessorIMod) { 4093 Operands.push_back(ARMOperand::CreateImm( 4094 MCConstantExpr::Create(ProcessorIMod, getContext()), 4095 NameLoc, NameLoc)); 4096 } 4097 4098 // Add the remaining tokens in the mnemonic. 4099 while (Next != StringRef::npos) { 4100 Start = Next; 4101 Next = Name.find('.', Start + 1); 4102 StringRef ExtraToken = Name.slice(Start, Next); 4103 4104 // For now, we're only parsing Thumb1 (for the most part), so 4105 // just ignore ".n" qualifiers. We'll use them to restrict 4106 // matching when we do Thumb2. 4107 if (ExtraToken != ".n") { 4108 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4109 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4110 } 4111 } 4112 4113 // Read the remaining operands. 4114 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4115 // Read the first operand. 4116 if (parseOperand(Operands, Mnemonic)) { 4117 Parser.EatToEndOfStatement(); 4118 return true; 4119 } 4120 4121 while (getLexer().is(AsmToken::Comma)) { 4122 Parser.Lex(); // Eat the comma. 4123 4124 // Parse and remember the operand. 4125 if (parseOperand(Operands, Mnemonic)) { 4126 Parser.EatToEndOfStatement(); 4127 return true; 4128 } 4129 } 4130 } 4131 4132 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4133 SMLoc Loc = getLexer().getLoc(); 4134 Parser.EatToEndOfStatement(); 4135 return Error(Loc, "unexpected token in argument list"); 4136 } 4137 4138 Parser.Lex(); // Consume the EndOfStatement 4139 4140 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4141 // do and don't have a cc_out optional-def operand. With some spot-checks 4142 // of the operand list, we can figure out which variant we're trying to 4143 // parse and adjust accordingly before actually matching. We shouldn't ever 4144 // try to remove a cc_out operand that was explicitly set on the the 4145 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4146 // table driven matcher doesn't fit well with the ARM instruction set. 4147 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4148 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4149 Operands.erase(Operands.begin() + 1); 4150 delete Op; 4151 } 4152 4153 // ARM mode 'blx' need special handling, as the register operand version 4154 // is predicable, but the label operand version is not. So, we can't rely 4155 // on the Mnemonic based checking to correctly figure out when to put 4156 // a k_CondCode operand in the list. If we're trying to match the label 4157 // version, remove the k_CondCode operand here. 4158 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4159 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4160 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4161 Operands.erase(Operands.begin() + 1); 4162 delete Op; 4163 } 4164 4165 // The vector-compare-to-zero instructions have a literal token "#0" at 4166 // the end that comes to here as an immediate operand. Convert it to a 4167 // token to play nicely with the matcher. 4168 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4169 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4170 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4171 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4172 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4173 if (CE && CE->getValue() == 0) { 4174 Operands.erase(Operands.begin() + 5); 4175 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4176 delete Op; 4177 } 4178 } 4179 // VCMP{E} does the same thing, but with a different operand count. 4180 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4181 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4182 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4183 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4184 if (CE && CE->getValue() == 0) { 4185 Operands.erase(Operands.begin() + 4); 4186 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4187 delete Op; 4188 } 4189 } 4190 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4191 // end. Convert it to a token here. 4192 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4193 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4194 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4195 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4196 if (CE && CE->getValue() == 0) { 4197 Operands.erase(Operands.begin() + 5); 4198 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4199 delete Op; 4200 } 4201 } 4202 4203 return false; 4204} 4205 4206// Validate context-sensitive operand constraints. 4207 4208// return 'true' if register list contains non-low GPR registers, 4209// 'false' otherwise. If Reg is in the register list or is HiReg, set 4210// 'containsReg' to true. 4211static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4212 unsigned HiReg, bool &containsReg) { 4213 containsReg = false; 4214 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4215 unsigned OpReg = Inst.getOperand(i).getReg(); 4216 if (OpReg == Reg) 4217 containsReg = true; 4218 // Anything other than a low register isn't legal here. 4219 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4220 return true; 4221 } 4222 return false; 4223} 4224 4225// Check if the specified regisgter is in the register list of the inst, 4226// starting at the indicated operand number. 4227static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4228 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4229 unsigned OpReg = Inst.getOperand(i).getReg(); 4230 if (OpReg == Reg) 4231 return true; 4232 } 4233 return false; 4234} 4235 4236// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4237// the ARMInsts array) instead. Getting that here requires awkward 4238// API changes, though. Better way? 4239namespace llvm { 4240extern MCInstrDesc ARMInsts[]; 4241} 4242static MCInstrDesc &getInstDesc(unsigned Opcode) { 4243 return ARMInsts[Opcode]; 4244} 4245 4246// FIXME: We would really like to be able to tablegen'erate this. 4247bool ARMAsmParser:: 4248validateInstruction(MCInst &Inst, 4249 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4250 MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4251 SMLoc Loc = Operands[0]->getStartLoc(); 4252 // Check the IT block state first. 4253 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4254 // being allowed in IT blocks, but not being predicable. It just always 4255 // executes. 4256 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4257 unsigned bit = 1; 4258 if (ITState.FirstCond) 4259 ITState.FirstCond = false; 4260 else 4261 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4262 // The instruction must be predicable. 4263 if (!MCID.isPredicable()) 4264 return Error(Loc, "instructions in IT block must be predicable"); 4265 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4266 unsigned ITCond = bit ? ITState.Cond : 4267 ARMCC::getOppositeCondition(ITState.Cond); 4268 if (Cond != ITCond) { 4269 // Find the condition code Operand to get its SMLoc information. 4270 SMLoc CondLoc; 4271 for (unsigned i = 1; i < Operands.size(); ++i) 4272 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4273 CondLoc = Operands[i]->getStartLoc(); 4274 return Error(CondLoc, "incorrect condition in IT block; got '" + 4275 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4276 "', but expected '" + 4277 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4278 } 4279 // Check for non-'al' condition codes outside of the IT block. 4280 } else if (isThumbTwo() && MCID.isPredicable() && 4281 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4282 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4283 Inst.getOpcode() != ARM::t2B) 4284 return Error(Loc, "predicated instructions must be in IT block"); 4285 4286 switch (Inst.getOpcode()) { 4287 case ARM::LDRD: 4288 case ARM::LDRD_PRE: 4289 case ARM::LDRD_POST: 4290 case ARM::LDREXD: { 4291 // Rt2 must be Rt + 1. 4292 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4293 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4294 if (Rt2 != Rt + 1) 4295 return Error(Operands[3]->getStartLoc(), 4296 "destination operands must be sequential"); 4297 return false; 4298 } 4299 case ARM::STRD: { 4300 // Rt2 must be Rt + 1. 4301 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4302 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4303 if (Rt2 != Rt + 1) 4304 return Error(Operands[3]->getStartLoc(), 4305 "source operands must be sequential"); 4306 return false; 4307 } 4308 case ARM::STRD_PRE: 4309 case ARM::STRD_POST: 4310 case ARM::STREXD: { 4311 // Rt2 must be Rt + 1. 4312 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4313 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4314 if (Rt2 != Rt + 1) 4315 return Error(Operands[3]->getStartLoc(), 4316 "source operands must be sequential"); 4317 return false; 4318 } 4319 case ARM::SBFX: 4320 case ARM::UBFX: { 4321 // width must be in range [1, 32-lsb] 4322 unsigned lsb = Inst.getOperand(2).getImm(); 4323 unsigned widthm1 = Inst.getOperand(3).getImm(); 4324 if (widthm1 >= 32 - lsb) 4325 return Error(Operands[5]->getStartLoc(), 4326 "bitfield width must be in range [1,32-lsb]"); 4327 return false; 4328 } 4329 case ARM::tLDMIA: { 4330 // If we're parsing Thumb2, the .w variant is available and handles 4331 // most cases that are normally illegal for a Thumb1 LDM 4332 // instruction. We'll make the transformation in processInstruction() 4333 // if necessary. 4334 // 4335 // Thumb LDM instructions are writeback iff the base register is not 4336 // in the register list. 4337 unsigned Rn = Inst.getOperand(0).getReg(); 4338 bool hasWritebackToken = 4339 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4340 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4341 bool listContainsBase; 4342 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4343 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4344 "registers must be in range r0-r7"); 4345 // If we should have writeback, then there should be a '!' token. 4346 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4347 return Error(Operands[2]->getStartLoc(), 4348 "writeback operator '!' expected"); 4349 // If we should not have writeback, there must not be a '!'. This is 4350 // true even for the 32-bit wide encodings. 4351 if (listContainsBase && hasWritebackToken) 4352 return Error(Operands[3]->getStartLoc(), 4353 "writeback operator '!' not allowed when base register " 4354 "in register list"); 4355 4356 break; 4357 } 4358 case ARM::t2LDMIA_UPD: { 4359 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4360 return Error(Operands[4]->getStartLoc(), 4361 "writeback operator '!' not allowed when base register " 4362 "in register list"); 4363 break; 4364 } 4365 case ARM::tPOP: { 4366 bool listContainsBase; 4367 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4368 return Error(Operands[2]->getStartLoc(), 4369 "registers must be in range r0-r7 or pc"); 4370 break; 4371 } 4372 case ARM::tPUSH: { 4373 bool listContainsBase; 4374 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4375 return Error(Operands[2]->getStartLoc(), 4376 "registers must be in range r0-r7 or lr"); 4377 break; 4378 } 4379 case ARM::tSTMIA_UPD: { 4380 bool listContainsBase; 4381 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4382 return Error(Operands[4]->getStartLoc(), 4383 "registers must be in range r0-r7"); 4384 break; 4385 } 4386 } 4387 4388 return false; 4389} 4390 4391void ARMAsmParser:: 4392processInstruction(MCInst &Inst, 4393 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4394 switch (Inst.getOpcode()) { 4395 case ARM::LDMIA_UPD: 4396 // If this is a load of a single register via a 'pop', then we should use 4397 // a post-indexed LDR instruction instead, per the ARM ARM. 4398 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4399 Inst.getNumOperands() == 5) { 4400 MCInst TmpInst; 4401 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4402 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4403 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4404 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4405 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4406 TmpInst.addOperand(MCOperand::CreateImm(4)); 4407 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4408 TmpInst.addOperand(Inst.getOperand(3)); 4409 Inst = TmpInst; 4410 } 4411 break; 4412 case ARM::STMDB_UPD: 4413 // If this is a store of a single register via a 'push', then we should use 4414 // a pre-indexed STR instruction instead, per the ARM ARM. 4415 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4416 Inst.getNumOperands() == 5) { 4417 MCInst TmpInst; 4418 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4419 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4420 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4421 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4422 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4423 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4424 TmpInst.addOperand(Inst.getOperand(3)); 4425 Inst = TmpInst; 4426 } 4427 break; 4428 case ARM::tADDi8: 4429 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4430 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4431 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4432 // to encoding T1 if <Rd> is omitted." 4433 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4434 Inst.setOpcode(ARM::tADDi3); 4435 break; 4436 case ARM::tSUBi8: 4437 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4438 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4439 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4440 // to encoding T1 if <Rd> is omitted." 4441 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4442 Inst.setOpcode(ARM::tSUBi3); 4443 break; 4444 case ARM::tB: 4445 // A Thumb conditional branch outside of an IT block is a tBcc. 4446 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4447 Inst.setOpcode(ARM::tBcc); 4448 break; 4449 case ARM::t2B: 4450 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4451 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4452 Inst.setOpcode(ARM::t2Bcc); 4453 break; 4454 case ARM::t2Bcc: 4455 // If the conditional is AL or we're in an IT block, we really want t2B. 4456 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4457 Inst.setOpcode(ARM::t2B); 4458 break; 4459 case ARM::tBcc: 4460 // If the conditional is AL, we really want tB. 4461 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4462 Inst.setOpcode(ARM::tB); 4463 break; 4464 case ARM::tLDMIA: { 4465 // If the register list contains any high registers, or if the writeback 4466 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4467 // instead if we're in Thumb2. Otherwise, this should have generated 4468 // an error in validateInstruction(). 4469 unsigned Rn = Inst.getOperand(0).getReg(); 4470 bool hasWritebackToken = 4471 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4472 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4473 bool listContainsBase; 4474 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4475 (!listContainsBase && !hasWritebackToken) || 4476 (listContainsBase && hasWritebackToken)) { 4477 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4478 assert (isThumbTwo()); 4479 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4480 // If we're switching to the updating version, we need to insert 4481 // the writeback tied operand. 4482 if (hasWritebackToken) 4483 Inst.insert(Inst.begin(), 4484 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4485 } 4486 break; 4487 } 4488 case ARM::tSTMIA_UPD: { 4489 // If the register list contains any high registers, we need to use 4490 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4491 // should have generated an error in validateInstruction(). 4492 unsigned Rn = Inst.getOperand(0).getReg(); 4493 bool listContainsBase; 4494 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4495 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4496 assert (isThumbTwo()); 4497 Inst.setOpcode(ARM::t2STMIA_UPD); 4498 } 4499 break; 4500 } 4501 case ARM::t2MOVi: { 4502 // If we can use the 16-bit encoding and the user didn't explicitly 4503 // request the 32-bit variant, transform it here. 4504 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4505 Inst.getOperand(1).getImm() <= 255 && 4506 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4507 Inst.getOperand(4).getReg() == ARM::CPSR) || 4508 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4509 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4510 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4511 // The operands aren't in the same order for tMOVi8... 4512 MCInst TmpInst; 4513 TmpInst.setOpcode(ARM::tMOVi8); 4514 TmpInst.addOperand(Inst.getOperand(0)); 4515 TmpInst.addOperand(Inst.getOperand(4)); 4516 TmpInst.addOperand(Inst.getOperand(1)); 4517 TmpInst.addOperand(Inst.getOperand(2)); 4518 TmpInst.addOperand(Inst.getOperand(3)); 4519 Inst = TmpInst; 4520 } 4521 break; 4522 } 4523 case ARM::t2MOVr: { 4524 // If we can use the 16-bit encoding and the user didn't explicitly 4525 // request the 32-bit variant, transform it here. 4526 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4527 isARMLowRegister(Inst.getOperand(1).getReg()) && 4528 Inst.getOperand(2).getImm() == ARMCC::AL && 4529 Inst.getOperand(4).getReg() == ARM::CPSR && 4530 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4531 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4532 // The operands aren't the same for tMOV[S]r... (no cc_out) 4533 MCInst TmpInst; 4534 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4535 TmpInst.addOperand(Inst.getOperand(0)); 4536 TmpInst.addOperand(Inst.getOperand(1)); 4537 TmpInst.addOperand(Inst.getOperand(2)); 4538 TmpInst.addOperand(Inst.getOperand(3)); 4539 Inst = TmpInst; 4540 } 4541 break; 4542 } 4543 case ARM::t2SXTH: 4544 case ARM::t2SXTB: 4545 case ARM::t2UXTH: 4546 case ARM::t2UXTB: { 4547 // If we can use the 16-bit encoding and the user didn't explicitly 4548 // request the 32-bit variant, transform it here. 4549 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4550 isARMLowRegister(Inst.getOperand(1).getReg()) && 4551 Inst.getOperand(2).getImm() == 0 && 4552 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4553 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4554 unsigned NewOpc; 4555 switch (Inst.getOpcode()) { 4556 default: llvm_unreachable("Illegal opcode!"); 4557 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4558 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4559 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4560 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4561 } 4562 // The operands aren't the same for thumb1 (no rotate operand). 4563 MCInst TmpInst; 4564 TmpInst.setOpcode(NewOpc); 4565 TmpInst.addOperand(Inst.getOperand(0)); 4566 TmpInst.addOperand(Inst.getOperand(1)); 4567 TmpInst.addOperand(Inst.getOperand(3)); 4568 TmpInst.addOperand(Inst.getOperand(4)); 4569 Inst = TmpInst; 4570 } 4571 break; 4572 } 4573 case ARM::t2IT: { 4574 // The mask bits for all but the first condition are represented as 4575 // the low bit of the condition code value implies 't'. We currently 4576 // always have 1 implies 't', so XOR toggle the bits if the low bit 4577 // of the condition code is zero. The encoding also expects the low 4578 // bit of the condition to be encoded as bit 4 of the mask operand, 4579 // so mask that in if needed 4580 MCOperand &MO = Inst.getOperand(1); 4581 unsigned Mask = MO.getImm(); 4582 unsigned OrigMask = Mask; 4583 unsigned TZ = CountTrailingZeros_32(Mask); 4584 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4585 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4586 for (unsigned i = 3; i != TZ; --i) 4587 Mask ^= 1 << i; 4588 } else 4589 Mask |= 0x10; 4590 MO.setImm(Mask); 4591 4592 // Set up the IT block state according to the IT instruction we just 4593 // matched. 4594 assert(!inITBlock() && "nested IT blocks?!"); 4595 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4596 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4597 ITState.CurPosition = 0; 4598 ITState.FirstCond = true; 4599 break; 4600 } 4601 } 4602} 4603 4604unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4605 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4606 // suffix depending on whether they're in an IT block or not. 4607 unsigned Opc = Inst.getOpcode(); 4608 MCInstrDesc &MCID = getInstDesc(Opc); 4609 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4610 assert(MCID.hasOptionalDef() && 4611 "optionally flag setting instruction missing optional def operand"); 4612 assert(MCID.NumOperands == Inst.getNumOperands() && 4613 "operand count mismatch!"); 4614 // Find the optional-def operand (cc_out). 4615 unsigned OpNo; 4616 for (OpNo = 0; 4617 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4618 ++OpNo) 4619 ; 4620 // If we're parsing Thumb1, reject it completely. 4621 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4622 return Match_MnemonicFail; 4623 // If we're parsing Thumb2, which form is legal depends on whether we're 4624 // in an IT block. 4625 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4626 !inITBlock()) 4627 return Match_RequiresITBlock; 4628 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4629 inITBlock()) 4630 return Match_RequiresNotITBlock; 4631 } 4632 // Some high-register supporting Thumb1 encodings only allow both registers 4633 // to be from r0-r7 when in Thumb2. 4634 else if (Opc == ARM::tADDhirr && isThumbOne() && 4635 isARMLowRegister(Inst.getOperand(1).getReg()) && 4636 isARMLowRegister(Inst.getOperand(2).getReg())) 4637 return Match_RequiresThumb2; 4638 // Others only require ARMv6 or later. 4639 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4640 isARMLowRegister(Inst.getOperand(0).getReg()) && 4641 isARMLowRegister(Inst.getOperand(1).getReg())) 4642 return Match_RequiresV6; 4643 return Match_Success; 4644} 4645 4646bool ARMAsmParser:: 4647MatchAndEmitInstruction(SMLoc IDLoc, 4648 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4649 MCStreamer &Out) { 4650 MCInst Inst; 4651 unsigned ErrorInfo; 4652 unsigned MatchResult; 4653 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4654 switch (MatchResult) { 4655 default: break; 4656 case Match_Success: 4657 // Context sensitive operand constraints aren't handled by the matcher, 4658 // so check them here. 4659 if (validateInstruction(Inst, Operands)) { 4660 // Still progress the IT block, otherwise one wrong condition causes 4661 // nasty cascading errors. 4662 forwardITPosition(); 4663 return true; 4664 } 4665 4666 // Some instructions need post-processing to, for example, tweak which 4667 // encoding is selected. 4668 processInstruction(Inst, Operands); 4669 4670 // Only move forward at the very end so that everything in validate 4671 // and process gets a consistent answer about whether we're in an IT 4672 // block. 4673 forwardITPosition(); 4674 4675 Out.EmitInstruction(Inst); 4676 return false; 4677 case Match_MissingFeature: 4678 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4679 return true; 4680 case Match_InvalidOperand: { 4681 SMLoc ErrorLoc = IDLoc; 4682 if (ErrorInfo != ~0U) { 4683 if (ErrorInfo >= Operands.size()) 4684 return Error(IDLoc, "too few operands for instruction"); 4685 4686 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4687 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4688 } 4689 4690 return Error(ErrorLoc, "invalid operand for instruction"); 4691 } 4692 case Match_MnemonicFail: 4693 return Error(IDLoc, "invalid instruction"); 4694 case Match_ConversionFail: 4695 // The converter function will have already emited a diagnostic. 4696 return true; 4697 case Match_RequiresNotITBlock: 4698 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4699 case Match_RequiresITBlock: 4700 return Error(IDLoc, "instruction only valid inside IT block"); 4701 case Match_RequiresV6: 4702 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4703 case Match_RequiresThumb2: 4704 return Error(IDLoc, "instruction variant requires Thumb2"); 4705 } 4706 4707 llvm_unreachable("Implement any new match types added!"); 4708 return true; 4709} 4710 4711/// parseDirective parses the arm specific directives 4712bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4713 StringRef IDVal = DirectiveID.getIdentifier(); 4714 if (IDVal == ".word") 4715 return parseDirectiveWord(4, DirectiveID.getLoc()); 4716 else if (IDVal == ".thumb") 4717 return parseDirectiveThumb(DirectiveID.getLoc()); 4718 else if (IDVal == ".thumb_func") 4719 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4720 else if (IDVal == ".code") 4721 return parseDirectiveCode(DirectiveID.getLoc()); 4722 else if (IDVal == ".syntax") 4723 return parseDirectiveSyntax(DirectiveID.getLoc()); 4724 return true; 4725} 4726 4727/// parseDirectiveWord 4728/// ::= .word [ expression (, expression)* ] 4729bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4730 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4731 for (;;) { 4732 const MCExpr *Value; 4733 if (getParser().ParseExpression(Value)) 4734 return true; 4735 4736 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4737 4738 if (getLexer().is(AsmToken::EndOfStatement)) 4739 break; 4740 4741 // FIXME: Improve diagnostic. 4742 if (getLexer().isNot(AsmToken::Comma)) 4743 return Error(L, "unexpected token in directive"); 4744 Parser.Lex(); 4745 } 4746 } 4747 4748 Parser.Lex(); 4749 return false; 4750} 4751 4752/// parseDirectiveThumb 4753/// ::= .thumb 4754bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4755 if (getLexer().isNot(AsmToken::EndOfStatement)) 4756 return Error(L, "unexpected token in directive"); 4757 Parser.Lex(); 4758 4759 // TODO: set thumb mode 4760 // TODO: tell the MC streamer the mode 4761 // getParser().getStreamer().Emit???(); 4762 return false; 4763} 4764 4765/// parseDirectiveThumbFunc 4766/// ::= .thumbfunc symbol_name 4767bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4768 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4769 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4770 StringRef Name; 4771 4772 // Darwin asm has function name after .thumb_func direction 4773 // ELF doesn't 4774 if (isMachO) { 4775 const AsmToken &Tok = Parser.getTok(); 4776 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4777 return Error(L, "unexpected token in .thumb_func directive"); 4778 Name = Tok.getString(); 4779 Parser.Lex(); // Consume the identifier token. 4780 } 4781 4782 if (getLexer().isNot(AsmToken::EndOfStatement)) 4783 return Error(L, "unexpected token in directive"); 4784 Parser.Lex(); 4785 4786 // FIXME: assuming function name will be the line following .thumb_func 4787 if (!isMachO) { 4788 Name = Parser.getTok().getString(); 4789 } 4790 4791 // Mark symbol as a thumb symbol. 4792 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4793 getParser().getStreamer().EmitThumbFunc(Func); 4794 return false; 4795} 4796 4797/// parseDirectiveSyntax 4798/// ::= .syntax unified | divided 4799bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4800 const AsmToken &Tok = Parser.getTok(); 4801 if (Tok.isNot(AsmToken::Identifier)) 4802 return Error(L, "unexpected token in .syntax directive"); 4803 StringRef Mode = Tok.getString(); 4804 if (Mode == "unified" || Mode == "UNIFIED") 4805 Parser.Lex(); 4806 else if (Mode == "divided" || Mode == "DIVIDED") 4807 return Error(L, "'.syntax divided' arm asssembly not supported"); 4808 else 4809 return Error(L, "unrecognized syntax mode in .syntax directive"); 4810 4811 if (getLexer().isNot(AsmToken::EndOfStatement)) 4812 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4813 Parser.Lex(); 4814 4815 // TODO tell the MC streamer the mode 4816 // getParser().getStreamer().Emit???(); 4817 return false; 4818} 4819 4820/// parseDirectiveCode 4821/// ::= .code 16 | 32 4822bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4823 const AsmToken &Tok = Parser.getTok(); 4824 if (Tok.isNot(AsmToken::Integer)) 4825 return Error(L, "unexpected token in .code directive"); 4826 int64_t Val = Parser.getTok().getIntVal(); 4827 if (Val == 16) 4828 Parser.Lex(); 4829 else if (Val == 32) 4830 Parser.Lex(); 4831 else 4832 return Error(L, "invalid operand to .code directive"); 4833 4834 if (getLexer().isNot(AsmToken::EndOfStatement)) 4835 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4836 Parser.Lex(); 4837 4838 if (Val == 16) { 4839 if (!isThumb()) 4840 SwitchMode(); 4841 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4842 } else { 4843 if (isThumb()) 4844 SwitchMode(); 4845 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4846 } 4847 4848 return false; 4849} 4850 4851extern "C" void LLVMInitializeARMAsmLexer(); 4852 4853/// Force static initialization. 4854extern "C" void LLVMInitializeARMAsmParser() { 4855 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4856 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4857 LLVMInitializeARMAsmLexer(); 4858} 4859 4860#define GET_REGISTER_MATCHER 4861#define GET_MATCHER_IMPLEMENTATION 4862#include "ARMGenAsmMatcher.inc" 4863