ARMAsmParser.cpp revision b6310316dbaf8716003531d7ed245f77f1a76a11
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringExtras.h" 34#include "llvm/ADT/StringSwitch.h" 35#include "llvm/ADT/Twine.h" 36 37using namespace llvm; 38 39namespace { 40 41class ARMOperand; 42 43class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131#define GET_ASSEMBLER_HEADER 132#include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 165 166 // Asm Match Converter Methods 167 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 168 const SmallVectorImpl<MCParsedAsmOperand*> &); 169 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 170 const SmallVectorImpl<MCParsedAsmOperand*> &); 171 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 172 const SmallVectorImpl<MCParsedAsmOperand*> &); 173 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 174 const SmallVectorImpl<MCParsedAsmOperand*> &); 175 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 202 bool validateInstruction(MCInst &Inst, 203 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 204 void processInstruction(MCInst &Inst, 205 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 206 bool shouldOmitCCOutOperand(StringRef Mnemonic, 207 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 208 209public: 210 enum ARMMatchResultTy { 211 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 212 Match_RequiresNotITBlock, 213 Match_RequiresV6, 214 Match_RequiresThumb2 215 }; 216 217 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 218 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 219 MCAsmParserExtension::Initialize(_Parser); 220 221 // Initialize the set of available features. 222 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 223 224 // Not in an ITBlock to start with. 225 ITState.CurPosition = ~0U; 226 } 227 228 // Implementation of the MCTargetAsmParser interface: 229 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 230 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 231 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 232 bool ParseDirective(AsmToken DirectiveID); 233 234 unsigned checkTargetMatchPredicate(MCInst &Inst); 235 236 bool MatchAndEmitInstruction(SMLoc IDLoc, 237 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 238 MCStreamer &Out); 239}; 240} // end anonymous namespace 241 242namespace { 243 244/// ARMOperand - Instances of this class represent a parsed ARM machine 245/// instruction. 246class ARMOperand : public MCParsedAsmOperand { 247 enum KindTy { 248 k_CondCode, 249 k_CCOut, 250 k_ITCondMask, 251 k_CoprocNum, 252 k_CoprocReg, 253 k_CoprocOption, 254 k_Immediate, 255 k_FPImmediate, 256 k_MemBarrierOpt, 257 k_Memory, 258 k_PostIndexRegister, 259 k_MSRMask, 260 k_ProcIFlags, 261 k_VectorIndex, 262 k_Register, 263 k_RegisterList, 264 k_DPRRegisterList, 265 k_SPRRegisterList, 266 k_VectorList, 267 k_ShiftedRegister, 268 k_ShiftedImmediate, 269 k_ShifterImmediate, 270 k_RotateImmediate, 271 k_BitfieldDescriptor, 272 k_Token 273 } Kind; 274 275 SMLoc StartLoc, EndLoc; 276 SmallVector<unsigned, 8> Registers; 277 278 union { 279 struct { 280 ARMCC::CondCodes Val; 281 } CC; 282 283 struct { 284 unsigned Val; 285 } Cop; 286 287 struct { 288 unsigned Val; 289 } CoprocOption; 290 291 struct { 292 unsigned Mask:4; 293 } ITMask; 294 295 struct { 296 ARM_MB::MemBOpt Val; 297 } MBOpt; 298 299 struct { 300 ARM_PROC::IFlags Val; 301 } IFlags; 302 303 struct { 304 unsigned Val; 305 } MMask; 306 307 struct { 308 const char *Data; 309 unsigned Length; 310 } Tok; 311 312 struct { 313 unsigned RegNum; 314 } Reg; 315 316 // A vector register list is a sequential list of 1 to 4 registers. 317 struct { 318 unsigned RegNum; 319 unsigned Count; 320 } VectorList; 321 322 struct { 323 unsigned Val; 324 } VectorIndex; 325 326 struct { 327 const MCExpr *Val; 328 } Imm; 329 330 struct { 331 unsigned Val; // encoded 8-bit representation 332 } FPImm; 333 334 /// Combined record for all forms of ARM address expressions. 335 struct { 336 unsigned BaseRegNum; 337 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 338 // was specified. 339 const MCConstantExpr *OffsetImm; // Offset immediate value 340 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 341 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 342 unsigned ShiftImm; // shift for OffsetReg. 343 unsigned Alignment; // 0 = no alignment specified 344 // n = alignment in bytes (8, 16, or 32) 345 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 346 } Memory; 347 348 struct { 349 unsigned RegNum; 350 bool isAdd; 351 ARM_AM::ShiftOpc ShiftTy; 352 unsigned ShiftImm; 353 } PostIdxReg; 354 355 struct { 356 bool isASR; 357 unsigned Imm; 358 } ShifterImm; 359 struct { 360 ARM_AM::ShiftOpc ShiftTy; 361 unsigned SrcReg; 362 unsigned ShiftReg; 363 unsigned ShiftImm; 364 } RegShiftedReg; 365 struct { 366 ARM_AM::ShiftOpc ShiftTy; 367 unsigned SrcReg; 368 unsigned ShiftImm; 369 } RegShiftedImm; 370 struct { 371 unsigned Imm; 372 } RotImm; 373 struct { 374 unsigned LSB; 375 unsigned Width; 376 } Bitfield; 377 }; 378 379 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 380public: 381 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 382 Kind = o.Kind; 383 StartLoc = o.StartLoc; 384 EndLoc = o.EndLoc; 385 switch (Kind) { 386 case k_CondCode: 387 CC = o.CC; 388 break; 389 case k_ITCondMask: 390 ITMask = o.ITMask; 391 break; 392 case k_Token: 393 Tok = o.Tok; 394 break; 395 case k_CCOut: 396 case k_Register: 397 Reg = o.Reg; 398 break; 399 case k_RegisterList: 400 case k_DPRRegisterList: 401 case k_SPRRegisterList: 402 Registers = o.Registers; 403 break; 404 case k_VectorList: 405 VectorList = o.VectorList; 406 break; 407 case k_CoprocNum: 408 case k_CoprocReg: 409 Cop = o.Cop; 410 break; 411 case k_CoprocOption: 412 CoprocOption = o.CoprocOption; 413 break; 414 case k_Immediate: 415 Imm = o.Imm; 416 break; 417 case k_FPImmediate: 418 FPImm = o.FPImm; 419 break; 420 case k_MemBarrierOpt: 421 MBOpt = o.MBOpt; 422 break; 423 case k_Memory: 424 Memory = o.Memory; 425 break; 426 case k_PostIndexRegister: 427 PostIdxReg = o.PostIdxReg; 428 break; 429 case k_MSRMask: 430 MMask = o.MMask; 431 break; 432 case k_ProcIFlags: 433 IFlags = o.IFlags; 434 break; 435 case k_ShifterImmediate: 436 ShifterImm = o.ShifterImm; 437 break; 438 case k_ShiftedRegister: 439 RegShiftedReg = o.RegShiftedReg; 440 break; 441 case k_ShiftedImmediate: 442 RegShiftedImm = o.RegShiftedImm; 443 break; 444 case k_RotateImmediate: 445 RotImm = o.RotImm; 446 break; 447 case k_BitfieldDescriptor: 448 Bitfield = o.Bitfield; 449 break; 450 case k_VectorIndex: 451 VectorIndex = o.VectorIndex; 452 break; 453 } 454 } 455 456 /// getStartLoc - Get the location of the first token of this operand. 457 SMLoc getStartLoc() const { return StartLoc; } 458 /// getEndLoc - Get the location of the last token of this operand. 459 SMLoc getEndLoc() const { return EndLoc; } 460 461 ARMCC::CondCodes getCondCode() const { 462 assert(Kind == k_CondCode && "Invalid access!"); 463 return CC.Val; 464 } 465 466 unsigned getCoproc() const { 467 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 468 return Cop.Val; 469 } 470 471 StringRef getToken() const { 472 assert(Kind == k_Token && "Invalid access!"); 473 return StringRef(Tok.Data, Tok.Length); 474 } 475 476 unsigned getReg() const { 477 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 478 return Reg.RegNum; 479 } 480 481 const SmallVectorImpl<unsigned> &getRegList() const { 482 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 483 Kind == k_SPRRegisterList) && "Invalid access!"); 484 return Registers; 485 } 486 487 const MCExpr *getImm() const { 488 assert(Kind == k_Immediate && "Invalid access!"); 489 return Imm.Val; 490 } 491 492 unsigned getFPImm() const { 493 assert(Kind == k_FPImmediate && "Invalid access!"); 494 return FPImm.Val; 495 } 496 497 unsigned getVectorIndex() const { 498 assert(Kind == k_VectorIndex && "Invalid access!"); 499 return VectorIndex.Val; 500 } 501 502 ARM_MB::MemBOpt getMemBarrierOpt() const { 503 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 504 return MBOpt.Val; 505 } 506 507 ARM_PROC::IFlags getProcIFlags() const { 508 assert(Kind == k_ProcIFlags && "Invalid access!"); 509 return IFlags.Val; 510 } 511 512 unsigned getMSRMask() const { 513 assert(Kind == k_MSRMask && "Invalid access!"); 514 return MMask.Val; 515 } 516 517 bool isCoprocNum() const { return Kind == k_CoprocNum; } 518 bool isCoprocReg() const { return Kind == k_CoprocReg; } 519 bool isCoprocOption() const { return Kind == k_CoprocOption; } 520 bool isCondCode() const { return Kind == k_CondCode; } 521 bool isCCOut() const { return Kind == k_CCOut; } 522 bool isITMask() const { return Kind == k_ITCondMask; } 523 bool isITCondCode() const { return Kind == k_CondCode; } 524 bool isImm() const { return Kind == k_Immediate; } 525 bool isFPImm() const { return Kind == k_FPImmediate; } 526 bool isImm8s4() const { 527 if (Kind != k_Immediate) 528 return false; 529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 530 if (!CE) return false; 531 int64_t Value = CE->getValue(); 532 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 533 } 534 bool isImm0_1020s4() const { 535 if (Kind != k_Immediate) 536 return false; 537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 538 if (!CE) return false; 539 int64_t Value = CE->getValue(); 540 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 541 } 542 bool isImm0_508s4() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 549 } 550 bool isImm0_255() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return Value >= 0 && Value < 256; 557 } 558 bool isImm0_7() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return Value >= 0 && Value < 8; 565 } 566 bool isImm0_15() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value >= 0 && Value < 16; 573 } 574 bool isImm0_31() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 32; 581 } 582 bool isImm1_16() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return Value > 0 && Value < 17; 589 } 590 bool isImm1_32() const { 591 if (Kind != k_Immediate) 592 return false; 593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 594 if (!CE) return false; 595 int64_t Value = CE->getValue(); 596 return Value > 0 && Value < 33; 597 } 598 bool isImm0_65535() const { 599 if (Kind != k_Immediate) 600 return false; 601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 602 if (!CE) return false; 603 int64_t Value = CE->getValue(); 604 return Value >= 0 && Value < 65536; 605 } 606 bool isImm0_65535Expr() const { 607 if (Kind != k_Immediate) 608 return false; 609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 610 // If it's not a constant expression, it'll generate a fixup and be 611 // handled later. 612 if (!CE) return true; 613 int64_t Value = CE->getValue(); 614 return Value >= 0 && Value < 65536; 615 } 616 bool isImm24bit() const { 617 if (Kind != k_Immediate) 618 return false; 619 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 620 if (!CE) return false; 621 int64_t Value = CE->getValue(); 622 return Value >= 0 && Value <= 0xffffff; 623 } 624 bool isImmThumbSR() const { 625 if (Kind != k_Immediate) 626 return false; 627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 628 if (!CE) return false; 629 int64_t Value = CE->getValue(); 630 return Value > 0 && Value < 33; 631 } 632 bool isPKHLSLImm() const { 633 if (Kind != k_Immediate) 634 return false; 635 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 636 if (!CE) return false; 637 int64_t Value = CE->getValue(); 638 return Value >= 0 && Value < 32; 639 } 640 bool isPKHASRImm() const { 641 if (Kind != k_Immediate) 642 return false; 643 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 644 if (!CE) return false; 645 int64_t Value = CE->getValue(); 646 return Value > 0 && Value <= 32; 647 } 648 bool isARMSOImm() const { 649 if (Kind != k_Immediate) 650 return false; 651 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 652 if (!CE) return false; 653 int64_t Value = CE->getValue(); 654 return ARM_AM::getSOImmVal(Value) != -1; 655 } 656 bool isT2SOImm() const { 657 if (Kind != k_Immediate) 658 return false; 659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 660 if (!CE) return false; 661 int64_t Value = CE->getValue(); 662 return ARM_AM::getT2SOImmVal(Value) != -1; 663 } 664 bool isSetEndImm() const { 665 if (Kind != k_Immediate) 666 return false; 667 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 668 if (!CE) return false; 669 int64_t Value = CE->getValue(); 670 return Value == 1 || Value == 0; 671 } 672 bool isReg() const { return Kind == k_Register; } 673 bool isRegList() const { return Kind == k_RegisterList; } 674 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 675 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 676 bool isToken() const { return Kind == k_Token; } 677 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 678 bool isMemory() const { return Kind == k_Memory; } 679 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 680 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 681 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 682 bool isRotImm() const { return Kind == k_RotateImmediate; } 683 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 684 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 685 bool isPostIdxReg() const { 686 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 687 } 688 bool isMemNoOffset(bool alignOK = false) const { 689 if (!isMemory()) 690 return false; 691 // No offset of any kind. 692 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 693 (alignOK || Memory.Alignment == 0); 694 } 695 bool isAlignedMemory() const { 696 return isMemNoOffset(true); 697 } 698 bool isAddrMode2() const { 699 if (!isMemory() || Memory.Alignment != 0) return false; 700 // Check for register offset. 701 if (Memory.OffsetRegNum) return true; 702 // Immediate offset in range [-4095, 4095]. 703 if (!Memory.OffsetImm) return true; 704 int64_t Val = Memory.OffsetImm->getValue(); 705 return Val > -4096 && Val < 4096; 706 } 707 bool isAM2OffsetImm() const { 708 if (Kind != k_Immediate) 709 return false; 710 // Immediate offset in range [-4095, 4095]. 711 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 712 if (!CE) return false; 713 int64_t Val = CE->getValue(); 714 return Val > -4096 && Val < 4096; 715 } 716 bool isAddrMode3() const { 717 if (!isMemory() || Memory.Alignment != 0) return false; 718 // No shifts are legal for AM3. 719 if (Memory.ShiftType != ARM_AM::no_shift) return false; 720 // Check for register offset. 721 if (Memory.OffsetRegNum) return true; 722 // Immediate offset in range [-255, 255]. 723 if (!Memory.OffsetImm) return true; 724 int64_t Val = Memory.OffsetImm->getValue(); 725 return Val > -256 && Val < 256; 726 } 727 bool isAM3Offset() const { 728 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 729 return false; 730 if (Kind == k_PostIndexRegister) 731 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 732 // Immediate offset in range [-255, 255]. 733 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 734 if (!CE) return false; 735 int64_t Val = CE->getValue(); 736 // Special case, #-0 is INT32_MIN. 737 return (Val > -256 && Val < 256) || Val == INT32_MIN; 738 } 739 bool isAddrMode5() const { 740 if (!isMemory() || Memory.Alignment != 0) return false; 741 // Check for register offset. 742 if (Memory.OffsetRegNum) return false; 743 // Immediate offset in range [-1020, 1020] and a multiple of 4. 744 if (!Memory.OffsetImm) return true; 745 int64_t Val = Memory.OffsetImm->getValue(); 746 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 747 Val == INT32_MIN; 748 } 749 bool isMemTBB() const { 750 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 751 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 752 return false; 753 return true; 754 } 755 bool isMemTBH() const { 756 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 757 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 758 Memory.Alignment != 0 ) 759 return false; 760 return true; 761 } 762 bool isMemRegOffset() const { 763 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 764 return false; 765 return true; 766 } 767 bool isT2MemRegOffset() const { 768 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 769 Memory.Alignment != 0) 770 return false; 771 // Only lsl #{0, 1, 2, 3} allowed. 772 if (Memory.ShiftType == ARM_AM::no_shift) 773 return true; 774 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 775 return false; 776 return true; 777 } 778 bool isMemThumbRR() const { 779 // Thumb reg+reg addressing is simple. Just two registers, a base and 780 // an offset. No shifts, negations or any other complicating factors. 781 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 782 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 783 return false; 784 return isARMLowRegister(Memory.BaseRegNum) && 785 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 786 } 787 bool isMemThumbRIs4() const { 788 if (!isMemory() || Memory.OffsetRegNum != 0 || 789 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 790 return false; 791 // Immediate offset, multiple of 4 in range [0, 124]. 792 if (!Memory.OffsetImm) return true; 793 int64_t Val = Memory.OffsetImm->getValue(); 794 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 795 } 796 bool isMemThumbRIs2() const { 797 if (!isMemory() || Memory.OffsetRegNum != 0 || 798 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 799 return false; 800 // Immediate offset, multiple of 4 in range [0, 62]. 801 if (!Memory.OffsetImm) return true; 802 int64_t Val = Memory.OffsetImm->getValue(); 803 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 804 } 805 bool isMemThumbRIs1() const { 806 if (!isMemory() || Memory.OffsetRegNum != 0 || 807 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 808 return false; 809 // Immediate offset in range [0, 31]. 810 if (!Memory.OffsetImm) return true; 811 int64_t Val = Memory.OffsetImm->getValue(); 812 return Val >= 0 && Val <= 31; 813 } 814 bool isMemThumbSPI() const { 815 if (!isMemory() || Memory.OffsetRegNum != 0 || 816 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 817 return false; 818 // Immediate offset, multiple of 4 in range [0, 1020]. 819 if (!Memory.OffsetImm) return true; 820 int64_t Val = Memory.OffsetImm->getValue(); 821 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 822 } 823 bool isMemImm8s4Offset() const { 824 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 825 return false; 826 // Immediate offset a multiple of 4 in range [-1020, 1020]. 827 if (!Memory.OffsetImm) return true; 828 int64_t Val = Memory.OffsetImm->getValue(); 829 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 830 } 831 bool isMemImm0_1020s4Offset() const { 832 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 833 return false; 834 // Immediate offset a multiple of 4 in range [0, 1020]. 835 if (!Memory.OffsetImm) return true; 836 int64_t Val = Memory.OffsetImm->getValue(); 837 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 838 } 839 bool isMemImm8Offset() const { 840 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 841 return false; 842 // Immediate offset in range [-255, 255]. 843 if (!Memory.OffsetImm) return true; 844 int64_t Val = Memory.OffsetImm->getValue(); 845 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 846 } 847 bool isMemPosImm8Offset() const { 848 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 849 return false; 850 // Immediate offset in range [0, 255]. 851 if (!Memory.OffsetImm) return true; 852 int64_t Val = Memory.OffsetImm->getValue(); 853 return Val >= 0 && Val < 256; 854 } 855 bool isMemNegImm8Offset() const { 856 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 857 return false; 858 // Immediate offset in range [-255, -1]. 859 if (!Memory.OffsetImm) return true; 860 int64_t Val = Memory.OffsetImm->getValue(); 861 return Val > -256 && Val < 0; 862 } 863 bool isMemUImm12Offset() const { 864 // If we have an immediate that's not a constant, treat it as a label 865 // reference needing a fixup. If it is a constant, it's something else 866 // and we reject it. 867 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 868 return true; 869 870 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 871 return false; 872 // Immediate offset in range [0, 4095]. 873 if (!Memory.OffsetImm) return true; 874 int64_t Val = Memory.OffsetImm->getValue(); 875 return (Val >= 0 && Val < 4096); 876 } 877 bool isMemImm12Offset() const { 878 // If we have an immediate that's not a constant, treat it as a label 879 // reference needing a fixup. If it is a constant, it's something else 880 // and we reject it. 881 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 882 return true; 883 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [-4095, 4095]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 890 } 891 bool isPostIdxImm8() const { 892 if (Kind != k_Immediate) 893 return false; 894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 895 if (!CE) return false; 896 int64_t Val = CE->getValue(); 897 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 898 } 899 bool isPostIdxImm8s4() const { 900 if (Kind != k_Immediate) 901 return false; 902 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 903 if (!CE) return false; 904 int64_t Val = CE->getValue(); 905 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 906 (Val == INT32_MIN); 907 } 908 909 bool isMSRMask() const { return Kind == k_MSRMask; } 910 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 911 912 // NEON operands. 913 bool isVecListOneD() const { 914 if (Kind != k_VectorList) return false; 915 return VectorList.Count == 1; 916 } 917 918 bool isVecListTwoD() const { 919 if (Kind != k_VectorList) return false; 920 return VectorList.Count == 2; 921 } 922 923 bool isVecListThreeD() const { 924 if (Kind != k_VectorList) return false; 925 return VectorList.Count == 3; 926 } 927 928 bool isVecListFourD() const { 929 if (Kind != k_VectorList) return false; 930 return VectorList.Count == 4; 931 } 932 933 bool isVectorIndex8() const { 934 if (Kind != k_VectorIndex) return false; 935 return VectorIndex.Val < 8; 936 } 937 bool isVectorIndex16() const { 938 if (Kind != k_VectorIndex) return false; 939 return VectorIndex.Val < 4; 940 } 941 bool isVectorIndex32() const { 942 if (Kind != k_VectorIndex) return false; 943 return VectorIndex.Val < 2; 944 } 945 946 bool isNEONi8splat() const { 947 if (Kind != k_Immediate) 948 return false; 949 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 950 // Must be a constant. 951 if (!CE) return false; 952 int64_t Value = CE->getValue(); 953 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 954 // value. 955 return Value >= 0 && Value < 256; 956 } 957 958 bool isNEONi16splat() const { 959 if (Kind != k_Immediate) 960 return false; 961 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 962 // Must be a constant. 963 if (!CE) return false; 964 int64_t Value = CE->getValue(); 965 // i16 value in the range [0,255] or [0x0100, 0xff00] 966 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 967 } 968 969 bool isNEONi32splat() const { 970 if (Kind != k_Immediate) 971 return false; 972 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 973 // Must be a constant. 974 if (!CE) return false; 975 int64_t Value = CE->getValue(); 976 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 977 return (Value >= 0 && Value < 256) || 978 (Value >= 0x0100 && Value <= 0xff00) || 979 (Value >= 0x010000 && Value <= 0xff0000) || 980 (Value >= 0x01000000 && Value <= 0xff000000); 981 } 982 983 bool isNEONi32vmov() const { 984 if (Kind != k_Immediate) 985 return false; 986 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 987 // Must be a constant. 988 if (!CE) return false; 989 int64_t Value = CE->getValue(); 990 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 991 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 992 return (Value >= 0 && Value < 256) || 993 (Value >= 0x0100 && Value <= 0xff00) || 994 (Value >= 0x010000 && Value <= 0xff0000) || 995 (Value >= 0x01000000 && Value <= 0xff000000) || 996 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 997 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 998 } 999 1000 bool isNEONi64splat() const { 1001 if (Kind != k_Immediate) 1002 return false; 1003 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1004 // Must be a constant. 1005 if (!CE) return false; 1006 uint64_t Value = CE->getValue(); 1007 // i64 value with each byte being either 0 or 0xff. 1008 for (unsigned i = 0; i < 8; ++i) 1009 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1010 return true; 1011 } 1012 1013 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1014 // Add as immediates when possible. Null MCExpr = 0. 1015 if (Expr == 0) 1016 Inst.addOperand(MCOperand::CreateImm(0)); 1017 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1018 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1019 else 1020 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1021 } 1022 1023 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1024 assert(N == 2 && "Invalid number of operands!"); 1025 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1026 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1027 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1028 } 1029 1030 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1031 assert(N == 1 && "Invalid number of operands!"); 1032 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1033 } 1034 1035 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1036 assert(N == 1 && "Invalid number of operands!"); 1037 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1038 } 1039 1040 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1041 assert(N == 1 && "Invalid number of operands!"); 1042 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1043 } 1044 1045 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1046 assert(N == 1 && "Invalid number of operands!"); 1047 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1048 } 1049 1050 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1051 assert(N == 1 && "Invalid number of operands!"); 1052 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1053 } 1054 1055 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1056 assert(N == 1 && "Invalid number of operands!"); 1057 Inst.addOperand(MCOperand::CreateReg(getReg())); 1058 } 1059 1060 void addRegOperands(MCInst &Inst, unsigned N) const { 1061 assert(N == 1 && "Invalid number of operands!"); 1062 Inst.addOperand(MCOperand::CreateReg(getReg())); 1063 } 1064 1065 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1066 assert(N == 3 && "Invalid number of operands!"); 1067 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1068 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1069 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1070 Inst.addOperand(MCOperand::CreateImm( 1071 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1072 } 1073 1074 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1075 assert(N == 2 && "Invalid number of operands!"); 1076 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1077 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1078 Inst.addOperand(MCOperand::CreateImm( 1079 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1080 } 1081 1082 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1083 assert(N == 1 && "Invalid number of operands!"); 1084 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1085 ShifterImm.Imm)); 1086 } 1087 1088 void addRegListOperands(MCInst &Inst, unsigned N) const { 1089 assert(N == 1 && "Invalid number of operands!"); 1090 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1091 for (SmallVectorImpl<unsigned>::const_iterator 1092 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1093 Inst.addOperand(MCOperand::CreateReg(*I)); 1094 } 1095 1096 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1097 addRegListOperands(Inst, N); 1098 } 1099 1100 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1101 addRegListOperands(Inst, N); 1102 } 1103 1104 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1105 assert(N == 1 && "Invalid number of operands!"); 1106 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1107 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1108 } 1109 1110 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1111 assert(N == 1 && "Invalid number of operands!"); 1112 // Munge the lsb/width into a bitfield mask. 1113 unsigned lsb = Bitfield.LSB; 1114 unsigned width = Bitfield.Width; 1115 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1116 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1117 (32 - (lsb + width))); 1118 Inst.addOperand(MCOperand::CreateImm(Mask)); 1119 } 1120 1121 void addImmOperands(MCInst &Inst, unsigned N) const { 1122 assert(N == 1 && "Invalid number of operands!"); 1123 addExpr(Inst, getImm()); 1124 } 1125 1126 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1127 assert(N == 1 && "Invalid number of operands!"); 1128 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1129 } 1130 1131 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1132 assert(N == 1 && "Invalid number of operands!"); 1133 // FIXME: We really want to scale the value here, but the LDRD/STRD 1134 // instruction don't encode operands that way yet. 1135 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1136 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1137 } 1138 1139 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1140 assert(N == 1 && "Invalid number of operands!"); 1141 // The immediate is scaled by four in the encoding and is stored 1142 // in the MCInst as such. Lop off the low two bits here. 1143 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1144 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1145 } 1146 1147 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1148 assert(N == 1 && "Invalid number of operands!"); 1149 // The immediate is scaled by four in the encoding and is stored 1150 // in the MCInst as such. Lop off the low two bits here. 1151 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1152 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1153 } 1154 1155 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1156 assert(N == 1 && "Invalid number of operands!"); 1157 addExpr(Inst, getImm()); 1158 } 1159 1160 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 addExpr(Inst, getImm()); 1163 } 1164 1165 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1166 assert(N == 1 && "Invalid number of operands!"); 1167 addExpr(Inst, getImm()); 1168 } 1169 1170 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1171 assert(N == 1 && "Invalid number of operands!"); 1172 addExpr(Inst, getImm()); 1173 } 1174 1175 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1176 assert(N == 1 && "Invalid number of operands!"); 1177 // The constant encodes as the immediate-1, and we store in the instruction 1178 // the bits as encoded, so subtract off one here. 1179 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1180 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1181 } 1182 1183 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1184 assert(N == 1 && "Invalid number of operands!"); 1185 // The constant encodes as the immediate-1, and we store in the instruction 1186 // the bits as encoded, so subtract off one here. 1187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1188 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1189 } 1190 1191 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1192 assert(N == 1 && "Invalid number of operands!"); 1193 addExpr(Inst, getImm()); 1194 } 1195 1196 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1197 assert(N == 1 && "Invalid number of operands!"); 1198 addExpr(Inst, getImm()); 1199 } 1200 1201 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1202 assert(N == 1 && "Invalid number of operands!"); 1203 addExpr(Inst, getImm()); 1204 } 1205 1206 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1207 assert(N == 1 && "Invalid number of operands!"); 1208 // The constant encodes as the immediate, except for 32, which encodes as 1209 // zero. 1210 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1211 unsigned Imm = CE->getValue(); 1212 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1213 } 1214 1215 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1216 assert(N == 1 && "Invalid number of operands!"); 1217 addExpr(Inst, getImm()); 1218 } 1219 1220 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1221 assert(N == 1 && "Invalid number of operands!"); 1222 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1223 // the instruction as well. 1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1225 int Val = CE->getValue(); 1226 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1227 } 1228 1229 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1230 assert(N == 1 && "Invalid number of operands!"); 1231 addExpr(Inst, getImm()); 1232 } 1233 1234 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1235 assert(N == 1 && "Invalid number of operands!"); 1236 addExpr(Inst, getImm()); 1237 } 1238 1239 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1240 assert(N == 1 && "Invalid number of operands!"); 1241 addExpr(Inst, getImm()); 1242 } 1243 1244 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1245 assert(N == 1 && "Invalid number of operands!"); 1246 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1247 } 1248 1249 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1250 assert(N == 1 && "Invalid number of operands!"); 1251 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1252 } 1253 1254 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1255 assert(N == 2 && "Invalid number of operands!"); 1256 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1257 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1258 } 1259 1260 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1261 assert(N == 3 && "Invalid number of operands!"); 1262 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1263 if (!Memory.OffsetRegNum) { 1264 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1265 // Special case for #-0 1266 if (Val == INT32_MIN) Val = 0; 1267 if (Val < 0) Val = -Val; 1268 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1269 } else { 1270 // For register offset, we encode the shift type and negation flag 1271 // here. 1272 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1273 Memory.ShiftImm, Memory.ShiftType); 1274 } 1275 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1276 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1277 Inst.addOperand(MCOperand::CreateImm(Val)); 1278 } 1279 1280 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1281 assert(N == 2 && "Invalid number of operands!"); 1282 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1283 assert(CE && "non-constant AM2OffsetImm operand!"); 1284 int32_t Val = CE->getValue(); 1285 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1286 // Special case for #-0 1287 if (Val == INT32_MIN) Val = 0; 1288 if (Val < 0) Val = -Val; 1289 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1290 Inst.addOperand(MCOperand::CreateReg(0)); 1291 Inst.addOperand(MCOperand::CreateImm(Val)); 1292 } 1293 1294 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1295 assert(N == 3 && "Invalid number of operands!"); 1296 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1297 if (!Memory.OffsetRegNum) { 1298 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1299 // Special case for #-0 1300 if (Val == INT32_MIN) Val = 0; 1301 if (Val < 0) Val = -Val; 1302 Val = ARM_AM::getAM3Opc(AddSub, Val); 1303 } else { 1304 // For register offset, we encode the shift type and negation flag 1305 // here. 1306 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1307 } 1308 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1309 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1310 Inst.addOperand(MCOperand::CreateImm(Val)); 1311 } 1312 1313 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1314 assert(N == 2 && "Invalid number of operands!"); 1315 if (Kind == k_PostIndexRegister) { 1316 int32_t Val = 1317 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1318 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1319 Inst.addOperand(MCOperand::CreateImm(Val)); 1320 return; 1321 } 1322 1323 // Constant offset. 1324 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1325 int32_t Val = CE->getValue(); 1326 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1327 // Special case for #-0 1328 if (Val == INT32_MIN) Val = 0; 1329 if (Val < 0) Val = -Val; 1330 Val = ARM_AM::getAM3Opc(AddSub, Val); 1331 Inst.addOperand(MCOperand::CreateReg(0)); 1332 Inst.addOperand(MCOperand::CreateImm(Val)); 1333 } 1334 1335 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1336 assert(N == 2 && "Invalid number of operands!"); 1337 // The lower two bits are always zero and as such are not encoded. 1338 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1339 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1340 // Special case for #-0 1341 if (Val == INT32_MIN) Val = 0; 1342 if (Val < 0) Val = -Val; 1343 Val = ARM_AM::getAM5Opc(AddSub, Val); 1344 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1345 Inst.addOperand(MCOperand::CreateImm(Val)); 1346 } 1347 1348 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1349 assert(N == 2 && "Invalid number of operands!"); 1350 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1351 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1352 Inst.addOperand(MCOperand::CreateImm(Val)); 1353 } 1354 1355 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1356 assert(N == 2 && "Invalid number of operands!"); 1357 // The lower two bits are always zero and as such are not encoded. 1358 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1359 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1360 Inst.addOperand(MCOperand::CreateImm(Val)); 1361 } 1362 1363 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1364 assert(N == 2 && "Invalid number of operands!"); 1365 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1366 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1367 Inst.addOperand(MCOperand::CreateImm(Val)); 1368 } 1369 1370 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1371 addMemImm8OffsetOperands(Inst, N); 1372 } 1373 1374 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1375 addMemImm8OffsetOperands(Inst, N); 1376 } 1377 1378 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1379 assert(N == 2 && "Invalid number of operands!"); 1380 // If this is an immediate, it's a label reference. 1381 if (Kind == k_Immediate) { 1382 addExpr(Inst, getImm()); 1383 Inst.addOperand(MCOperand::CreateImm(0)); 1384 return; 1385 } 1386 1387 // Otherwise, it's a normal memory reg+offset. 1388 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1389 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1390 Inst.addOperand(MCOperand::CreateImm(Val)); 1391 } 1392 1393 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1394 assert(N == 2 && "Invalid number of operands!"); 1395 // If this is an immediate, it's a label reference. 1396 if (Kind == k_Immediate) { 1397 addExpr(Inst, getImm()); 1398 Inst.addOperand(MCOperand::CreateImm(0)); 1399 return; 1400 } 1401 1402 // Otherwise, it's a normal memory reg+offset. 1403 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1404 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1405 Inst.addOperand(MCOperand::CreateImm(Val)); 1406 } 1407 1408 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1409 assert(N == 2 && "Invalid number of operands!"); 1410 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1411 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1412 } 1413 1414 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1415 assert(N == 2 && "Invalid number of operands!"); 1416 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1417 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1418 } 1419 1420 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1421 assert(N == 3 && "Invalid number of operands!"); 1422 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1423 Memory.ShiftImm, Memory.ShiftType); 1424 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1425 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1426 Inst.addOperand(MCOperand::CreateImm(Val)); 1427 } 1428 1429 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1430 assert(N == 3 && "Invalid number of operands!"); 1431 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1432 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1433 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1434 } 1435 1436 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1437 assert(N == 2 && "Invalid number of operands!"); 1438 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1439 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1440 } 1441 1442 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1443 assert(N == 2 && "Invalid number of operands!"); 1444 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1445 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1446 Inst.addOperand(MCOperand::CreateImm(Val)); 1447 } 1448 1449 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1450 assert(N == 2 && "Invalid number of operands!"); 1451 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1452 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1453 Inst.addOperand(MCOperand::CreateImm(Val)); 1454 } 1455 1456 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1457 assert(N == 2 && "Invalid number of operands!"); 1458 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1459 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1460 Inst.addOperand(MCOperand::CreateImm(Val)); 1461 } 1462 1463 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1464 assert(N == 2 && "Invalid number of operands!"); 1465 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1466 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1467 Inst.addOperand(MCOperand::CreateImm(Val)); 1468 } 1469 1470 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1471 assert(N == 1 && "Invalid number of operands!"); 1472 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1473 assert(CE && "non-constant post-idx-imm8 operand!"); 1474 int Imm = CE->getValue(); 1475 bool isAdd = Imm >= 0; 1476 if (Imm == INT32_MIN) Imm = 0; 1477 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1478 Inst.addOperand(MCOperand::CreateImm(Imm)); 1479 } 1480 1481 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1482 assert(N == 1 && "Invalid number of operands!"); 1483 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1484 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1485 int Imm = CE->getValue(); 1486 bool isAdd = Imm >= 0; 1487 if (Imm == INT32_MIN) Imm = 0; 1488 // Immediate is scaled by 4. 1489 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1490 Inst.addOperand(MCOperand::CreateImm(Imm)); 1491 } 1492 1493 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1494 assert(N == 2 && "Invalid number of operands!"); 1495 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1496 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1497 } 1498 1499 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1500 assert(N == 2 && "Invalid number of operands!"); 1501 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1502 // The sign, shift type, and shift amount are encoded in a single operand 1503 // using the AM2 encoding helpers. 1504 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1505 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1506 PostIdxReg.ShiftTy); 1507 Inst.addOperand(MCOperand::CreateImm(Imm)); 1508 } 1509 1510 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1511 assert(N == 1 && "Invalid number of operands!"); 1512 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1513 } 1514 1515 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1516 assert(N == 1 && "Invalid number of operands!"); 1517 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1518 } 1519 1520 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1521 assert(N == 1 && "Invalid number of operands!"); 1522 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1523 } 1524 1525 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1526 assert(N == 1 && "Invalid number of operands!"); 1527 // Only the first register actually goes on the instruction. The rest 1528 // are implied by the opcode. 1529 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1530 } 1531 1532 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1533 assert(N == 1 && "Invalid number of operands!"); 1534 // Only the first register actually goes on the instruction. The rest 1535 // are implied by the opcode. 1536 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1537 } 1538 1539 void addVecListFourDOperands(MCInst &Inst, unsigned N) const { 1540 assert(N == 1 && "Invalid number of operands!"); 1541 // Only the first register actually goes on the instruction. The rest 1542 // are implied by the opcode. 1543 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1544 } 1545 1546 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1547 assert(N == 1 && "Invalid number of operands!"); 1548 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1549 } 1550 1551 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1552 assert(N == 1 && "Invalid number of operands!"); 1553 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1554 } 1555 1556 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1557 assert(N == 1 && "Invalid number of operands!"); 1558 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1559 } 1560 1561 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1562 assert(N == 1 && "Invalid number of operands!"); 1563 // The immediate encodes the type of constant as well as the value. 1564 // Mask in that this is an i8 splat. 1565 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1566 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1567 } 1568 1569 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1570 assert(N == 1 && "Invalid number of operands!"); 1571 // The immediate encodes the type of constant as well as the value. 1572 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1573 unsigned Value = CE->getValue(); 1574 if (Value >= 256) 1575 Value = (Value >> 8) | 0xa00; 1576 else 1577 Value |= 0x800; 1578 Inst.addOperand(MCOperand::CreateImm(Value)); 1579 } 1580 1581 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1582 assert(N == 1 && "Invalid number of operands!"); 1583 // The immediate encodes the type of constant as well as the value. 1584 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1585 unsigned Value = CE->getValue(); 1586 if (Value >= 256 && Value <= 0xff00) 1587 Value = (Value >> 8) | 0x200; 1588 else if (Value > 0xffff && Value <= 0xff0000) 1589 Value = (Value >> 16) | 0x400; 1590 else if (Value > 0xffffff) 1591 Value = (Value >> 24) | 0x600; 1592 Inst.addOperand(MCOperand::CreateImm(Value)); 1593 } 1594 1595 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1596 assert(N == 1 && "Invalid number of operands!"); 1597 // The immediate encodes the type of constant as well as the value. 1598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1599 unsigned Value = CE->getValue(); 1600 if (Value >= 256 && Value <= 0xffff) 1601 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1602 else if (Value > 0xffff && Value <= 0xffffff) 1603 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1604 else if (Value > 0xffffff) 1605 Value = (Value >> 24) | 0x600; 1606 Inst.addOperand(MCOperand::CreateImm(Value)); 1607 } 1608 1609 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1610 assert(N == 1 && "Invalid number of operands!"); 1611 // The immediate encodes the type of constant as well as the value. 1612 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1613 uint64_t Value = CE->getValue(); 1614 unsigned Imm = 0; 1615 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1616 Imm |= (Value & 1) << i; 1617 } 1618 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1619 } 1620 1621 virtual void print(raw_ostream &OS) const; 1622 1623 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1624 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1625 Op->ITMask.Mask = Mask; 1626 Op->StartLoc = S; 1627 Op->EndLoc = S; 1628 return Op; 1629 } 1630 1631 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1632 ARMOperand *Op = new ARMOperand(k_CondCode); 1633 Op->CC.Val = CC; 1634 Op->StartLoc = S; 1635 Op->EndLoc = S; 1636 return Op; 1637 } 1638 1639 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1640 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1641 Op->Cop.Val = CopVal; 1642 Op->StartLoc = S; 1643 Op->EndLoc = S; 1644 return Op; 1645 } 1646 1647 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1648 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1649 Op->Cop.Val = CopVal; 1650 Op->StartLoc = S; 1651 Op->EndLoc = S; 1652 return Op; 1653 } 1654 1655 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1656 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1657 Op->Cop.Val = Val; 1658 Op->StartLoc = S; 1659 Op->EndLoc = E; 1660 return Op; 1661 } 1662 1663 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1664 ARMOperand *Op = new ARMOperand(k_CCOut); 1665 Op->Reg.RegNum = RegNum; 1666 Op->StartLoc = S; 1667 Op->EndLoc = S; 1668 return Op; 1669 } 1670 1671 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1672 ARMOperand *Op = new ARMOperand(k_Token); 1673 Op->Tok.Data = Str.data(); 1674 Op->Tok.Length = Str.size(); 1675 Op->StartLoc = S; 1676 Op->EndLoc = S; 1677 return Op; 1678 } 1679 1680 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1681 ARMOperand *Op = new ARMOperand(k_Register); 1682 Op->Reg.RegNum = RegNum; 1683 Op->StartLoc = S; 1684 Op->EndLoc = E; 1685 return Op; 1686 } 1687 1688 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1689 unsigned SrcReg, 1690 unsigned ShiftReg, 1691 unsigned ShiftImm, 1692 SMLoc S, SMLoc E) { 1693 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1694 Op->RegShiftedReg.ShiftTy = ShTy; 1695 Op->RegShiftedReg.SrcReg = SrcReg; 1696 Op->RegShiftedReg.ShiftReg = ShiftReg; 1697 Op->RegShiftedReg.ShiftImm = ShiftImm; 1698 Op->StartLoc = S; 1699 Op->EndLoc = E; 1700 return Op; 1701 } 1702 1703 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1704 unsigned SrcReg, 1705 unsigned ShiftImm, 1706 SMLoc S, SMLoc E) { 1707 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1708 Op->RegShiftedImm.ShiftTy = ShTy; 1709 Op->RegShiftedImm.SrcReg = SrcReg; 1710 Op->RegShiftedImm.ShiftImm = ShiftImm; 1711 Op->StartLoc = S; 1712 Op->EndLoc = E; 1713 return Op; 1714 } 1715 1716 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1717 SMLoc S, SMLoc E) { 1718 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1719 Op->ShifterImm.isASR = isASR; 1720 Op->ShifterImm.Imm = Imm; 1721 Op->StartLoc = S; 1722 Op->EndLoc = E; 1723 return Op; 1724 } 1725 1726 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1727 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1728 Op->RotImm.Imm = Imm; 1729 Op->StartLoc = S; 1730 Op->EndLoc = E; 1731 return Op; 1732 } 1733 1734 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1735 SMLoc S, SMLoc E) { 1736 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1737 Op->Bitfield.LSB = LSB; 1738 Op->Bitfield.Width = Width; 1739 Op->StartLoc = S; 1740 Op->EndLoc = E; 1741 return Op; 1742 } 1743 1744 static ARMOperand * 1745 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1746 SMLoc StartLoc, SMLoc EndLoc) { 1747 KindTy Kind = k_RegisterList; 1748 1749 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1750 Kind = k_DPRRegisterList; 1751 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1752 contains(Regs.front().first)) 1753 Kind = k_SPRRegisterList; 1754 1755 ARMOperand *Op = new ARMOperand(Kind); 1756 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1757 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1758 Op->Registers.push_back(I->first); 1759 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1760 Op->StartLoc = StartLoc; 1761 Op->EndLoc = EndLoc; 1762 return Op; 1763 } 1764 1765 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1766 SMLoc S, SMLoc E) { 1767 ARMOperand *Op = new ARMOperand(k_VectorList); 1768 Op->VectorList.RegNum = RegNum; 1769 Op->VectorList.Count = Count; 1770 Op->StartLoc = S; 1771 Op->EndLoc = E; 1772 return Op; 1773 } 1774 1775 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1776 MCContext &Ctx) { 1777 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1778 Op->VectorIndex.Val = Idx; 1779 Op->StartLoc = S; 1780 Op->EndLoc = E; 1781 return Op; 1782 } 1783 1784 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1785 ARMOperand *Op = new ARMOperand(k_Immediate); 1786 Op->Imm.Val = Val; 1787 Op->StartLoc = S; 1788 Op->EndLoc = E; 1789 return Op; 1790 } 1791 1792 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1793 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1794 Op->FPImm.Val = Val; 1795 Op->StartLoc = S; 1796 Op->EndLoc = S; 1797 return Op; 1798 } 1799 1800 static ARMOperand *CreateMem(unsigned BaseRegNum, 1801 const MCConstantExpr *OffsetImm, 1802 unsigned OffsetRegNum, 1803 ARM_AM::ShiftOpc ShiftType, 1804 unsigned ShiftImm, 1805 unsigned Alignment, 1806 bool isNegative, 1807 SMLoc S, SMLoc E) { 1808 ARMOperand *Op = new ARMOperand(k_Memory); 1809 Op->Memory.BaseRegNum = BaseRegNum; 1810 Op->Memory.OffsetImm = OffsetImm; 1811 Op->Memory.OffsetRegNum = OffsetRegNum; 1812 Op->Memory.ShiftType = ShiftType; 1813 Op->Memory.ShiftImm = ShiftImm; 1814 Op->Memory.Alignment = Alignment; 1815 Op->Memory.isNegative = isNegative; 1816 Op->StartLoc = S; 1817 Op->EndLoc = E; 1818 return Op; 1819 } 1820 1821 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1822 ARM_AM::ShiftOpc ShiftTy, 1823 unsigned ShiftImm, 1824 SMLoc S, SMLoc E) { 1825 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1826 Op->PostIdxReg.RegNum = RegNum; 1827 Op->PostIdxReg.isAdd = isAdd; 1828 Op->PostIdxReg.ShiftTy = ShiftTy; 1829 Op->PostIdxReg.ShiftImm = ShiftImm; 1830 Op->StartLoc = S; 1831 Op->EndLoc = E; 1832 return Op; 1833 } 1834 1835 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1836 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1837 Op->MBOpt.Val = Opt; 1838 Op->StartLoc = S; 1839 Op->EndLoc = S; 1840 return Op; 1841 } 1842 1843 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1844 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1845 Op->IFlags.Val = IFlags; 1846 Op->StartLoc = S; 1847 Op->EndLoc = S; 1848 return Op; 1849 } 1850 1851 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1852 ARMOperand *Op = new ARMOperand(k_MSRMask); 1853 Op->MMask.Val = MMask; 1854 Op->StartLoc = S; 1855 Op->EndLoc = S; 1856 return Op; 1857 } 1858}; 1859 1860} // end anonymous namespace. 1861 1862void ARMOperand::print(raw_ostream &OS) const { 1863 switch (Kind) { 1864 case k_FPImmediate: 1865 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1866 << ") >"; 1867 break; 1868 case k_CondCode: 1869 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1870 break; 1871 case k_CCOut: 1872 OS << "<ccout " << getReg() << ">"; 1873 break; 1874 case k_ITCondMask: { 1875 static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)", 1876 "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)", 1877 "(tee)", "(eee)" }; 1878 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1879 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1880 break; 1881 } 1882 case k_CoprocNum: 1883 OS << "<coprocessor number: " << getCoproc() << ">"; 1884 break; 1885 case k_CoprocReg: 1886 OS << "<coprocessor register: " << getCoproc() << ">"; 1887 break; 1888 case k_CoprocOption: 1889 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1890 break; 1891 case k_MSRMask: 1892 OS << "<mask: " << getMSRMask() << ">"; 1893 break; 1894 case k_Immediate: 1895 getImm()->print(OS); 1896 break; 1897 case k_MemBarrierOpt: 1898 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1899 break; 1900 case k_Memory: 1901 OS << "<memory " 1902 << " base:" << Memory.BaseRegNum; 1903 OS << ">"; 1904 break; 1905 case k_PostIndexRegister: 1906 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1907 << PostIdxReg.RegNum; 1908 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1909 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1910 << PostIdxReg.ShiftImm; 1911 OS << ">"; 1912 break; 1913 case k_ProcIFlags: { 1914 OS << "<ARM_PROC::"; 1915 unsigned IFlags = getProcIFlags(); 1916 for (int i=2; i >= 0; --i) 1917 if (IFlags & (1 << i)) 1918 OS << ARM_PROC::IFlagsToString(1 << i); 1919 OS << ">"; 1920 break; 1921 } 1922 case k_Register: 1923 OS << "<register " << getReg() << ">"; 1924 break; 1925 case k_ShifterImmediate: 1926 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1927 << " #" << ShifterImm.Imm << ">"; 1928 break; 1929 case k_ShiftedRegister: 1930 OS << "<so_reg_reg " 1931 << RegShiftedReg.SrcReg 1932 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1933 << ", " << RegShiftedReg.ShiftReg << ", " 1934 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1935 << ">"; 1936 break; 1937 case k_ShiftedImmediate: 1938 OS << "<so_reg_imm " 1939 << RegShiftedImm.SrcReg 1940 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1941 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1942 << ">"; 1943 break; 1944 case k_RotateImmediate: 1945 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1946 break; 1947 case k_BitfieldDescriptor: 1948 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1949 << ", width: " << Bitfield.Width << ">"; 1950 break; 1951 case k_RegisterList: 1952 case k_DPRRegisterList: 1953 case k_SPRRegisterList: { 1954 OS << "<register_list "; 1955 1956 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1957 for (SmallVectorImpl<unsigned>::const_iterator 1958 I = RegList.begin(), E = RegList.end(); I != E; ) { 1959 OS << *I; 1960 if (++I < E) OS << ", "; 1961 } 1962 1963 OS << ">"; 1964 break; 1965 } 1966 case k_VectorList: 1967 OS << "<vector_list " << VectorList.Count << " * " 1968 << VectorList.RegNum << ">"; 1969 break; 1970 case k_Token: 1971 OS << "'" << getToken() << "'"; 1972 break; 1973 case k_VectorIndex: 1974 OS << "<vectorindex " << getVectorIndex() << ">"; 1975 break; 1976 } 1977} 1978 1979/// @name Auto-generated Match Functions 1980/// { 1981 1982static unsigned MatchRegisterName(StringRef Name); 1983 1984/// } 1985 1986bool ARMAsmParser::ParseRegister(unsigned &RegNo, 1987 SMLoc &StartLoc, SMLoc &EndLoc) { 1988 RegNo = tryParseRegister(); 1989 1990 return (RegNo == (unsigned)-1); 1991} 1992 1993/// Try to parse a register name. The token must be an Identifier when called, 1994/// and if it is a register name the token is eaten and the register number is 1995/// returned. Otherwise return -1. 1996/// 1997int ARMAsmParser::tryParseRegister() { 1998 const AsmToken &Tok = Parser.getTok(); 1999 if (Tok.isNot(AsmToken::Identifier)) return -1; 2000 2001 // FIXME: Validate register for the current architecture; we have to do 2002 // validation later, so maybe there is no need for this here. 2003 std::string upperCase = Tok.getString().str(); 2004 std::string lowerCase = LowercaseString(upperCase); 2005 unsigned RegNum = MatchRegisterName(lowerCase); 2006 if (!RegNum) { 2007 RegNum = StringSwitch<unsigned>(lowerCase) 2008 .Case("r13", ARM::SP) 2009 .Case("r14", ARM::LR) 2010 .Case("r15", ARM::PC) 2011 .Case("ip", ARM::R12) 2012 .Default(0); 2013 } 2014 if (!RegNum) return -1; 2015 2016 Parser.Lex(); // Eat identifier token. 2017 2018 return RegNum; 2019} 2020 2021// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2022// If a recoverable error occurs, return 1. If an irrecoverable error 2023// occurs, return -1. An irrecoverable error is one where tokens have been 2024// consumed in the process of trying to parse the shifter (i.e., when it is 2025// indeed a shifter operand, but malformed). 2026int ARMAsmParser::tryParseShiftRegister( 2027 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2028 SMLoc S = Parser.getTok().getLoc(); 2029 const AsmToken &Tok = Parser.getTok(); 2030 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2031 2032 std::string upperCase = Tok.getString().str(); 2033 std::string lowerCase = LowercaseString(upperCase); 2034 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2035 .Case("lsl", ARM_AM::lsl) 2036 .Case("lsr", ARM_AM::lsr) 2037 .Case("asr", ARM_AM::asr) 2038 .Case("ror", ARM_AM::ror) 2039 .Case("rrx", ARM_AM::rrx) 2040 .Default(ARM_AM::no_shift); 2041 2042 if (ShiftTy == ARM_AM::no_shift) 2043 return 1; 2044 2045 Parser.Lex(); // Eat the operator. 2046 2047 // The source register for the shift has already been added to the 2048 // operand list, so we need to pop it off and combine it into the shifted 2049 // register operand instead. 2050 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2051 if (!PrevOp->isReg()) 2052 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2053 int SrcReg = PrevOp->getReg(); 2054 int64_t Imm = 0; 2055 int ShiftReg = 0; 2056 if (ShiftTy == ARM_AM::rrx) { 2057 // RRX Doesn't have an explicit shift amount. The encoder expects 2058 // the shift register to be the same as the source register. Seems odd, 2059 // but OK. 2060 ShiftReg = SrcReg; 2061 } else { 2062 // Figure out if this is shifted by a constant or a register (for non-RRX). 2063 if (Parser.getTok().is(AsmToken::Hash)) { 2064 Parser.Lex(); // Eat hash. 2065 SMLoc ImmLoc = Parser.getTok().getLoc(); 2066 const MCExpr *ShiftExpr = 0; 2067 if (getParser().ParseExpression(ShiftExpr)) { 2068 Error(ImmLoc, "invalid immediate shift value"); 2069 return -1; 2070 } 2071 // The expression must be evaluatable as an immediate. 2072 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2073 if (!CE) { 2074 Error(ImmLoc, "invalid immediate shift value"); 2075 return -1; 2076 } 2077 // Range check the immediate. 2078 // lsl, ror: 0 <= imm <= 31 2079 // lsr, asr: 0 <= imm <= 32 2080 Imm = CE->getValue(); 2081 if (Imm < 0 || 2082 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2083 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2084 Error(ImmLoc, "immediate shift value out of range"); 2085 return -1; 2086 } 2087 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2088 ShiftReg = tryParseRegister(); 2089 SMLoc L = Parser.getTok().getLoc(); 2090 if (ShiftReg == -1) { 2091 Error (L, "expected immediate or register in shift operand"); 2092 return -1; 2093 } 2094 } else { 2095 Error (Parser.getTok().getLoc(), 2096 "expected immediate or register in shift operand"); 2097 return -1; 2098 } 2099 } 2100 2101 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2102 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2103 ShiftReg, Imm, 2104 S, Parser.getTok().getLoc())); 2105 else 2106 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2107 S, Parser.getTok().getLoc())); 2108 2109 return 0; 2110} 2111 2112 2113/// Try to parse a register name. The token must be an Identifier when called. 2114/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2115/// if there is a "writeback". 'true' if it's not a register. 2116/// 2117/// TODO this is likely to change to allow different register types and or to 2118/// parse for a specific register type. 2119bool ARMAsmParser:: 2120tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2121 SMLoc S = Parser.getTok().getLoc(); 2122 int RegNo = tryParseRegister(); 2123 if (RegNo == -1) 2124 return true; 2125 2126 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2127 2128 const AsmToken &ExclaimTok = Parser.getTok(); 2129 if (ExclaimTok.is(AsmToken::Exclaim)) { 2130 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2131 ExclaimTok.getLoc())); 2132 Parser.Lex(); // Eat exclaim token 2133 return false; 2134 } 2135 2136 // Also check for an index operand. This is only legal for vector registers, 2137 // but that'll get caught OK in operand matching, so we don't need to 2138 // explicitly filter everything else out here. 2139 if (Parser.getTok().is(AsmToken::LBrac)) { 2140 SMLoc SIdx = Parser.getTok().getLoc(); 2141 Parser.Lex(); // Eat left bracket token. 2142 2143 const MCExpr *ImmVal; 2144 if (getParser().ParseExpression(ImmVal)) 2145 return MatchOperand_ParseFail; 2146 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2147 if (!MCE) { 2148 TokError("immediate value expected for vector index"); 2149 return MatchOperand_ParseFail; 2150 } 2151 2152 SMLoc E = Parser.getTok().getLoc(); 2153 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2154 Error(E, "']' expected"); 2155 return MatchOperand_ParseFail; 2156 } 2157 2158 Parser.Lex(); // Eat right bracket token. 2159 2160 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2161 SIdx, E, 2162 getContext())); 2163 } 2164 2165 return false; 2166} 2167 2168/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2169/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2170/// "c5", ... 2171static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2172 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2173 // but efficient. 2174 switch (Name.size()) { 2175 default: break; 2176 case 2: 2177 if (Name[0] != CoprocOp) 2178 return -1; 2179 switch (Name[1]) { 2180 default: return -1; 2181 case '0': return 0; 2182 case '1': return 1; 2183 case '2': return 2; 2184 case '3': return 3; 2185 case '4': return 4; 2186 case '5': return 5; 2187 case '6': return 6; 2188 case '7': return 7; 2189 case '8': return 8; 2190 case '9': return 9; 2191 } 2192 break; 2193 case 3: 2194 if (Name[0] != CoprocOp || Name[1] != '1') 2195 return -1; 2196 switch (Name[2]) { 2197 default: return -1; 2198 case '0': return 10; 2199 case '1': return 11; 2200 case '2': return 12; 2201 case '3': return 13; 2202 case '4': return 14; 2203 case '5': return 15; 2204 } 2205 break; 2206 } 2207 2208 return -1; 2209} 2210 2211/// parseITCondCode - Try to parse a condition code for an IT instruction. 2212ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2213parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2214 SMLoc S = Parser.getTok().getLoc(); 2215 const AsmToken &Tok = Parser.getTok(); 2216 if (!Tok.is(AsmToken::Identifier)) 2217 return MatchOperand_NoMatch; 2218 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2219 .Case("eq", ARMCC::EQ) 2220 .Case("ne", ARMCC::NE) 2221 .Case("hs", ARMCC::HS) 2222 .Case("cs", ARMCC::HS) 2223 .Case("lo", ARMCC::LO) 2224 .Case("cc", ARMCC::LO) 2225 .Case("mi", ARMCC::MI) 2226 .Case("pl", ARMCC::PL) 2227 .Case("vs", ARMCC::VS) 2228 .Case("vc", ARMCC::VC) 2229 .Case("hi", ARMCC::HI) 2230 .Case("ls", ARMCC::LS) 2231 .Case("ge", ARMCC::GE) 2232 .Case("lt", ARMCC::LT) 2233 .Case("gt", ARMCC::GT) 2234 .Case("le", ARMCC::LE) 2235 .Case("al", ARMCC::AL) 2236 .Default(~0U); 2237 if (CC == ~0U) 2238 return MatchOperand_NoMatch; 2239 Parser.Lex(); // Eat the token. 2240 2241 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2242 2243 return MatchOperand_Success; 2244} 2245 2246/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2247/// token must be an Identifier when called, and if it is a coprocessor 2248/// number, the token is eaten and the operand is added to the operand list. 2249ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2250parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2251 SMLoc S = Parser.getTok().getLoc(); 2252 const AsmToken &Tok = Parser.getTok(); 2253 if (Tok.isNot(AsmToken::Identifier)) 2254 return MatchOperand_NoMatch; 2255 2256 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2257 if (Num == -1) 2258 return MatchOperand_NoMatch; 2259 2260 Parser.Lex(); // Eat identifier token. 2261 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2262 return MatchOperand_Success; 2263} 2264 2265/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2266/// token must be an Identifier when called, and if it is a coprocessor 2267/// number, the token is eaten and the operand is added to the operand list. 2268ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2269parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2270 SMLoc S = Parser.getTok().getLoc(); 2271 const AsmToken &Tok = Parser.getTok(); 2272 if (Tok.isNot(AsmToken::Identifier)) 2273 return MatchOperand_NoMatch; 2274 2275 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2276 if (Reg == -1) 2277 return MatchOperand_NoMatch; 2278 2279 Parser.Lex(); // Eat identifier token. 2280 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2281 return MatchOperand_Success; 2282} 2283 2284/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2285/// coproc_option : '{' imm0_255 '}' 2286ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2287parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2288 SMLoc S = Parser.getTok().getLoc(); 2289 2290 // If this isn't a '{', this isn't a coprocessor immediate operand. 2291 if (Parser.getTok().isNot(AsmToken::LCurly)) 2292 return MatchOperand_NoMatch; 2293 Parser.Lex(); // Eat the '{' 2294 2295 const MCExpr *Expr; 2296 SMLoc Loc = Parser.getTok().getLoc(); 2297 if (getParser().ParseExpression(Expr)) { 2298 Error(Loc, "illegal expression"); 2299 return MatchOperand_ParseFail; 2300 } 2301 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2302 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2303 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2304 return MatchOperand_ParseFail; 2305 } 2306 int Val = CE->getValue(); 2307 2308 // Check for and consume the closing '}' 2309 if (Parser.getTok().isNot(AsmToken::RCurly)) 2310 return MatchOperand_ParseFail; 2311 SMLoc E = Parser.getTok().getLoc(); 2312 Parser.Lex(); // Eat the '}' 2313 2314 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2315 return MatchOperand_Success; 2316} 2317 2318// For register list parsing, we need to map from raw GPR register numbering 2319// to the enumeration values. The enumeration values aren't sorted by 2320// register number due to our using "sp", "lr" and "pc" as canonical names. 2321static unsigned getNextRegister(unsigned Reg) { 2322 // If this is a GPR, we need to do it manually, otherwise we can rely 2323 // on the sort ordering of the enumeration since the other reg-classes 2324 // are sane. 2325 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2326 return Reg + 1; 2327 switch(Reg) { 2328 default: assert(0 && "Invalid GPR number!"); 2329 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2330 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2331 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2332 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2333 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2334 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2335 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2336 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2337 } 2338} 2339 2340/// Parse a register list. 2341bool ARMAsmParser:: 2342parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2343 assert(Parser.getTok().is(AsmToken::LCurly) && 2344 "Token is not a Left Curly Brace"); 2345 SMLoc S = Parser.getTok().getLoc(); 2346 Parser.Lex(); // Eat '{' token. 2347 SMLoc RegLoc = Parser.getTok().getLoc(); 2348 2349 // Check the first register in the list to see what register class 2350 // this is a list of. 2351 int Reg = tryParseRegister(); 2352 if (Reg == -1) 2353 return Error(RegLoc, "register expected"); 2354 2355 MCRegisterClass *RC; 2356 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2357 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2358 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2359 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2360 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2361 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2362 else 2363 return Error(RegLoc, "invalid register in register list"); 2364 2365 // The reglist instructions have at most 16 registers, so reserve 2366 // space for that many. 2367 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2368 // Store the first register. 2369 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2370 2371 // This starts immediately after the first register token in the list, 2372 // so we can see either a comma or a minus (range separator) as a legal 2373 // next token. 2374 while (Parser.getTok().is(AsmToken::Comma) || 2375 Parser.getTok().is(AsmToken::Minus)) { 2376 if (Parser.getTok().is(AsmToken::Minus)) { 2377 Parser.Lex(); // Eat the comma. 2378 SMLoc EndLoc = Parser.getTok().getLoc(); 2379 int EndReg = tryParseRegister(); 2380 if (EndReg == -1) 2381 return Error(EndLoc, "register expected"); 2382 // If the register is the same as the start reg, there's nothing 2383 // more to do. 2384 if (Reg == EndReg) 2385 continue; 2386 // The register must be in the same register class as the first. 2387 if (!RC->contains(EndReg)) 2388 return Error(EndLoc, "invalid register in register list"); 2389 // Ranges must go from low to high. 2390 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2391 return Error(EndLoc, "bad range in register list"); 2392 2393 // Add all the registers in the range to the register list. 2394 while (Reg != EndReg) { 2395 Reg = getNextRegister(Reg); 2396 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2397 } 2398 continue; 2399 } 2400 Parser.Lex(); // Eat the comma. 2401 RegLoc = Parser.getTok().getLoc(); 2402 int OldReg = Reg; 2403 Reg = tryParseRegister(); 2404 if (Reg == -1) 2405 return Error(RegLoc, "register expected"); 2406 // The register must be in the same register class as the first. 2407 if (!RC->contains(Reg)) 2408 return Error(RegLoc, "invalid register in register list"); 2409 // List must be monotonically increasing. 2410 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2411 return Error(RegLoc, "register list not in ascending order"); 2412 // VFP register lists must also be contiguous. 2413 // It's OK to use the enumeration values directly here rather, as the 2414 // VFP register classes have the enum sorted properly. 2415 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2416 Reg != OldReg + 1) 2417 return Error(RegLoc, "non-contiguous register range"); 2418 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2419 } 2420 2421 SMLoc E = Parser.getTok().getLoc(); 2422 if (Parser.getTok().isNot(AsmToken::RCurly)) 2423 return Error(E, "'}' expected"); 2424 Parser.Lex(); // Eat '}' token. 2425 2426 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2427 return false; 2428} 2429 2430// parse a vector register list 2431ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2432parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2433 if(Parser.getTok().isNot(AsmToken::LCurly)) 2434 return MatchOperand_NoMatch; 2435 2436 SMLoc S = Parser.getTok().getLoc(); 2437 Parser.Lex(); // Eat '{' token. 2438 SMLoc RegLoc = Parser.getTok().getLoc(); 2439 2440 int Reg = tryParseRegister(); 2441 if (Reg == -1) { 2442 Error(RegLoc, "register expected"); 2443 return MatchOperand_ParseFail; 2444 } 2445 2446 unsigned FirstReg = Reg; 2447 unsigned Count = 1; 2448 while (Parser.getTok().is(AsmToken::Comma)) { 2449 Parser.Lex(); // Eat the comma. 2450 RegLoc = Parser.getTok().getLoc(); 2451 int OldReg = Reg; 2452 Reg = tryParseRegister(); 2453 if (Reg == -1) { 2454 Error(RegLoc, "register expected"); 2455 return MatchOperand_ParseFail; 2456 } 2457 // vector register lists must also be contiguous. 2458 // It's OK to use the enumeration values directly here rather, as the 2459 // VFP register classes have the enum sorted properly. 2460 if (Reg != OldReg + 1) { 2461 Error(RegLoc, "non-contiguous register range"); 2462 return MatchOperand_ParseFail; 2463 } 2464 2465 ++Count; 2466 } 2467 2468 SMLoc E = Parser.getTok().getLoc(); 2469 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2470 Error(E, "'}' expected"); 2471 return MatchOperand_ParseFail; 2472 } 2473 Parser.Lex(); // Eat '}' token. 2474 2475 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2476 return MatchOperand_Success; 2477} 2478 2479/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2480ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2481parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2482 SMLoc S = Parser.getTok().getLoc(); 2483 const AsmToken &Tok = Parser.getTok(); 2484 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2485 StringRef OptStr = Tok.getString(); 2486 2487 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2488 .Case("sy", ARM_MB::SY) 2489 .Case("st", ARM_MB::ST) 2490 .Case("sh", ARM_MB::ISH) 2491 .Case("ish", ARM_MB::ISH) 2492 .Case("shst", ARM_MB::ISHST) 2493 .Case("ishst", ARM_MB::ISHST) 2494 .Case("nsh", ARM_MB::NSH) 2495 .Case("un", ARM_MB::NSH) 2496 .Case("nshst", ARM_MB::NSHST) 2497 .Case("unst", ARM_MB::NSHST) 2498 .Case("osh", ARM_MB::OSH) 2499 .Case("oshst", ARM_MB::OSHST) 2500 .Default(~0U); 2501 2502 if (Opt == ~0U) 2503 return MatchOperand_NoMatch; 2504 2505 Parser.Lex(); // Eat identifier token. 2506 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2507 return MatchOperand_Success; 2508} 2509 2510/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2511ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2512parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2513 SMLoc S = Parser.getTok().getLoc(); 2514 const AsmToken &Tok = Parser.getTok(); 2515 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2516 StringRef IFlagsStr = Tok.getString(); 2517 2518 // An iflags string of "none" is interpreted to mean that none of the AIF 2519 // bits are set. Not a terribly useful instruction, but a valid encoding. 2520 unsigned IFlags = 0; 2521 if (IFlagsStr != "none") { 2522 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2523 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2524 .Case("a", ARM_PROC::A) 2525 .Case("i", ARM_PROC::I) 2526 .Case("f", ARM_PROC::F) 2527 .Default(~0U); 2528 2529 // If some specific iflag is already set, it means that some letter is 2530 // present more than once, this is not acceptable. 2531 if (Flag == ~0U || (IFlags & Flag)) 2532 return MatchOperand_NoMatch; 2533 2534 IFlags |= Flag; 2535 } 2536 } 2537 2538 Parser.Lex(); // Eat identifier token. 2539 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2540 return MatchOperand_Success; 2541} 2542 2543/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2544ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2545parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2546 SMLoc S = Parser.getTok().getLoc(); 2547 const AsmToken &Tok = Parser.getTok(); 2548 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2549 StringRef Mask = Tok.getString(); 2550 2551 if (isMClass()) { 2552 // See ARMv6-M 10.1.1 2553 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2554 .Case("apsr", 0) 2555 .Case("iapsr", 1) 2556 .Case("eapsr", 2) 2557 .Case("xpsr", 3) 2558 .Case("ipsr", 5) 2559 .Case("epsr", 6) 2560 .Case("iepsr", 7) 2561 .Case("msp", 8) 2562 .Case("psp", 9) 2563 .Case("primask", 16) 2564 .Case("basepri", 17) 2565 .Case("basepri_max", 18) 2566 .Case("faultmask", 19) 2567 .Case("control", 20) 2568 .Default(~0U); 2569 2570 if (FlagsVal == ~0U) 2571 return MatchOperand_NoMatch; 2572 2573 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2574 // basepri, basepri_max and faultmask only valid for V7m. 2575 return MatchOperand_NoMatch; 2576 2577 Parser.Lex(); // Eat identifier token. 2578 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2579 return MatchOperand_Success; 2580 } 2581 2582 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2583 size_t Start = 0, Next = Mask.find('_'); 2584 StringRef Flags = ""; 2585 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2586 if (Next != StringRef::npos) 2587 Flags = Mask.slice(Next+1, Mask.size()); 2588 2589 // FlagsVal contains the complete mask: 2590 // 3-0: Mask 2591 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2592 unsigned FlagsVal = 0; 2593 2594 if (SpecReg == "apsr") { 2595 FlagsVal = StringSwitch<unsigned>(Flags) 2596 .Case("nzcvq", 0x8) // same as CPSR_f 2597 .Case("g", 0x4) // same as CPSR_s 2598 .Case("nzcvqg", 0xc) // same as CPSR_fs 2599 .Default(~0U); 2600 2601 if (FlagsVal == ~0U) { 2602 if (!Flags.empty()) 2603 return MatchOperand_NoMatch; 2604 else 2605 FlagsVal = 8; // No flag 2606 } 2607 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2608 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2609 Flags = "fc"; 2610 for (int i = 0, e = Flags.size(); i != e; ++i) { 2611 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2612 .Case("c", 1) 2613 .Case("x", 2) 2614 .Case("s", 4) 2615 .Case("f", 8) 2616 .Default(~0U); 2617 2618 // If some specific flag is already set, it means that some letter is 2619 // present more than once, this is not acceptable. 2620 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2621 return MatchOperand_NoMatch; 2622 FlagsVal |= Flag; 2623 } 2624 } else // No match for special register. 2625 return MatchOperand_NoMatch; 2626 2627 // Special register without flags is NOT equivalent to "fc" flags. 2628 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2629 // two lines would enable gas compatibility at the expense of breaking 2630 // round-tripping. 2631 // 2632 // if (!FlagsVal) 2633 // FlagsVal = 0x9; 2634 2635 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2636 if (SpecReg == "spsr") 2637 FlagsVal |= 16; 2638 2639 Parser.Lex(); // Eat identifier token. 2640 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2641 return MatchOperand_Success; 2642} 2643 2644ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2645parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2646 int Low, int High) { 2647 const AsmToken &Tok = Parser.getTok(); 2648 if (Tok.isNot(AsmToken::Identifier)) { 2649 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2650 return MatchOperand_ParseFail; 2651 } 2652 StringRef ShiftName = Tok.getString(); 2653 std::string LowerOp = LowercaseString(Op); 2654 std::string UpperOp = UppercaseString(Op); 2655 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2656 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2657 return MatchOperand_ParseFail; 2658 } 2659 Parser.Lex(); // Eat shift type token. 2660 2661 // There must be a '#' and a shift amount. 2662 if (Parser.getTok().isNot(AsmToken::Hash)) { 2663 Error(Parser.getTok().getLoc(), "'#' expected"); 2664 return MatchOperand_ParseFail; 2665 } 2666 Parser.Lex(); // Eat hash token. 2667 2668 const MCExpr *ShiftAmount; 2669 SMLoc Loc = Parser.getTok().getLoc(); 2670 if (getParser().ParseExpression(ShiftAmount)) { 2671 Error(Loc, "illegal expression"); 2672 return MatchOperand_ParseFail; 2673 } 2674 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2675 if (!CE) { 2676 Error(Loc, "constant expression expected"); 2677 return MatchOperand_ParseFail; 2678 } 2679 int Val = CE->getValue(); 2680 if (Val < Low || Val > High) { 2681 Error(Loc, "immediate value out of range"); 2682 return MatchOperand_ParseFail; 2683 } 2684 2685 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2686 2687 return MatchOperand_Success; 2688} 2689 2690ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2691parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2692 const AsmToken &Tok = Parser.getTok(); 2693 SMLoc S = Tok.getLoc(); 2694 if (Tok.isNot(AsmToken::Identifier)) { 2695 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2696 return MatchOperand_ParseFail; 2697 } 2698 int Val = StringSwitch<int>(Tok.getString()) 2699 .Case("be", 1) 2700 .Case("le", 0) 2701 .Default(-1); 2702 Parser.Lex(); // Eat the token. 2703 2704 if (Val == -1) { 2705 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2706 return MatchOperand_ParseFail; 2707 } 2708 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2709 getContext()), 2710 S, Parser.getTok().getLoc())); 2711 return MatchOperand_Success; 2712} 2713 2714/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2715/// instructions. Legal values are: 2716/// lsl #n 'n' in [0,31] 2717/// asr #n 'n' in [1,32] 2718/// n == 32 encoded as n == 0. 2719ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2720parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2721 const AsmToken &Tok = Parser.getTok(); 2722 SMLoc S = Tok.getLoc(); 2723 if (Tok.isNot(AsmToken::Identifier)) { 2724 Error(S, "shift operator 'asr' or 'lsl' expected"); 2725 return MatchOperand_ParseFail; 2726 } 2727 StringRef ShiftName = Tok.getString(); 2728 bool isASR; 2729 if (ShiftName == "lsl" || ShiftName == "LSL") 2730 isASR = false; 2731 else if (ShiftName == "asr" || ShiftName == "ASR") 2732 isASR = true; 2733 else { 2734 Error(S, "shift operator 'asr' or 'lsl' expected"); 2735 return MatchOperand_ParseFail; 2736 } 2737 Parser.Lex(); // Eat the operator. 2738 2739 // A '#' and a shift amount. 2740 if (Parser.getTok().isNot(AsmToken::Hash)) { 2741 Error(Parser.getTok().getLoc(), "'#' expected"); 2742 return MatchOperand_ParseFail; 2743 } 2744 Parser.Lex(); // Eat hash token. 2745 2746 const MCExpr *ShiftAmount; 2747 SMLoc E = Parser.getTok().getLoc(); 2748 if (getParser().ParseExpression(ShiftAmount)) { 2749 Error(E, "malformed shift expression"); 2750 return MatchOperand_ParseFail; 2751 } 2752 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2753 if (!CE) { 2754 Error(E, "shift amount must be an immediate"); 2755 return MatchOperand_ParseFail; 2756 } 2757 2758 int64_t Val = CE->getValue(); 2759 if (isASR) { 2760 // Shift amount must be in [1,32] 2761 if (Val < 1 || Val > 32) { 2762 Error(E, "'asr' shift amount must be in range [1,32]"); 2763 return MatchOperand_ParseFail; 2764 } 2765 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2766 if (isThumb() && Val == 32) { 2767 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2768 return MatchOperand_ParseFail; 2769 } 2770 if (Val == 32) Val = 0; 2771 } else { 2772 // Shift amount must be in [1,32] 2773 if (Val < 0 || Val > 31) { 2774 Error(E, "'lsr' shift amount must be in range [0,31]"); 2775 return MatchOperand_ParseFail; 2776 } 2777 } 2778 2779 E = Parser.getTok().getLoc(); 2780 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2781 2782 return MatchOperand_Success; 2783} 2784 2785/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2786/// of instructions. Legal values are: 2787/// ror #n 'n' in {0, 8, 16, 24} 2788ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2789parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2790 const AsmToken &Tok = Parser.getTok(); 2791 SMLoc S = Tok.getLoc(); 2792 if (Tok.isNot(AsmToken::Identifier)) 2793 return MatchOperand_NoMatch; 2794 StringRef ShiftName = Tok.getString(); 2795 if (ShiftName != "ror" && ShiftName != "ROR") 2796 return MatchOperand_NoMatch; 2797 Parser.Lex(); // Eat the operator. 2798 2799 // A '#' and a rotate amount. 2800 if (Parser.getTok().isNot(AsmToken::Hash)) { 2801 Error(Parser.getTok().getLoc(), "'#' expected"); 2802 return MatchOperand_ParseFail; 2803 } 2804 Parser.Lex(); // Eat hash token. 2805 2806 const MCExpr *ShiftAmount; 2807 SMLoc E = Parser.getTok().getLoc(); 2808 if (getParser().ParseExpression(ShiftAmount)) { 2809 Error(E, "malformed rotate expression"); 2810 return MatchOperand_ParseFail; 2811 } 2812 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2813 if (!CE) { 2814 Error(E, "rotate amount must be an immediate"); 2815 return MatchOperand_ParseFail; 2816 } 2817 2818 int64_t Val = CE->getValue(); 2819 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2820 // normally, zero is represented in asm by omitting the rotate operand 2821 // entirely. 2822 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2823 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2824 return MatchOperand_ParseFail; 2825 } 2826 2827 E = Parser.getTok().getLoc(); 2828 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2829 2830 return MatchOperand_Success; 2831} 2832 2833ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2834parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2835 SMLoc S = Parser.getTok().getLoc(); 2836 // The bitfield descriptor is really two operands, the LSB and the width. 2837 if (Parser.getTok().isNot(AsmToken::Hash)) { 2838 Error(Parser.getTok().getLoc(), "'#' expected"); 2839 return MatchOperand_ParseFail; 2840 } 2841 Parser.Lex(); // Eat hash token. 2842 2843 const MCExpr *LSBExpr; 2844 SMLoc E = Parser.getTok().getLoc(); 2845 if (getParser().ParseExpression(LSBExpr)) { 2846 Error(E, "malformed immediate expression"); 2847 return MatchOperand_ParseFail; 2848 } 2849 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2850 if (!CE) { 2851 Error(E, "'lsb' operand must be an immediate"); 2852 return MatchOperand_ParseFail; 2853 } 2854 2855 int64_t LSB = CE->getValue(); 2856 // The LSB must be in the range [0,31] 2857 if (LSB < 0 || LSB > 31) { 2858 Error(E, "'lsb' operand must be in the range [0,31]"); 2859 return MatchOperand_ParseFail; 2860 } 2861 E = Parser.getTok().getLoc(); 2862 2863 // Expect another immediate operand. 2864 if (Parser.getTok().isNot(AsmToken::Comma)) { 2865 Error(Parser.getTok().getLoc(), "too few operands"); 2866 return MatchOperand_ParseFail; 2867 } 2868 Parser.Lex(); // Eat hash token. 2869 if (Parser.getTok().isNot(AsmToken::Hash)) { 2870 Error(Parser.getTok().getLoc(), "'#' expected"); 2871 return MatchOperand_ParseFail; 2872 } 2873 Parser.Lex(); // Eat hash token. 2874 2875 const MCExpr *WidthExpr; 2876 if (getParser().ParseExpression(WidthExpr)) { 2877 Error(E, "malformed immediate expression"); 2878 return MatchOperand_ParseFail; 2879 } 2880 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2881 if (!CE) { 2882 Error(E, "'width' operand must be an immediate"); 2883 return MatchOperand_ParseFail; 2884 } 2885 2886 int64_t Width = CE->getValue(); 2887 // The LSB must be in the range [1,32-lsb] 2888 if (Width < 1 || Width > 32 - LSB) { 2889 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2890 return MatchOperand_ParseFail; 2891 } 2892 E = Parser.getTok().getLoc(); 2893 2894 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2895 2896 return MatchOperand_Success; 2897} 2898 2899ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2900parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2901 // Check for a post-index addressing register operand. Specifically: 2902 // postidx_reg := '+' register {, shift} 2903 // | '-' register {, shift} 2904 // | register {, shift} 2905 2906 // This method must return MatchOperand_NoMatch without consuming any tokens 2907 // in the case where there is no match, as other alternatives take other 2908 // parse methods. 2909 AsmToken Tok = Parser.getTok(); 2910 SMLoc S = Tok.getLoc(); 2911 bool haveEaten = false; 2912 bool isAdd = true; 2913 int Reg = -1; 2914 if (Tok.is(AsmToken::Plus)) { 2915 Parser.Lex(); // Eat the '+' token. 2916 haveEaten = true; 2917 } else if (Tok.is(AsmToken::Minus)) { 2918 Parser.Lex(); // Eat the '-' token. 2919 isAdd = false; 2920 haveEaten = true; 2921 } 2922 if (Parser.getTok().is(AsmToken::Identifier)) 2923 Reg = tryParseRegister(); 2924 if (Reg == -1) { 2925 if (!haveEaten) 2926 return MatchOperand_NoMatch; 2927 Error(Parser.getTok().getLoc(), "register expected"); 2928 return MatchOperand_ParseFail; 2929 } 2930 SMLoc E = Parser.getTok().getLoc(); 2931 2932 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2933 unsigned ShiftImm = 0; 2934 if (Parser.getTok().is(AsmToken::Comma)) { 2935 Parser.Lex(); // Eat the ','. 2936 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2937 return MatchOperand_ParseFail; 2938 } 2939 2940 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2941 ShiftImm, S, E)); 2942 2943 return MatchOperand_Success; 2944} 2945 2946ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2947parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2948 // Check for a post-index addressing register operand. Specifically: 2949 // am3offset := '+' register 2950 // | '-' register 2951 // | register 2952 // | # imm 2953 // | # + imm 2954 // | # - imm 2955 2956 // This method must return MatchOperand_NoMatch without consuming any tokens 2957 // in the case where there is no match, as other alternatives take other 2958 // parse methods. 2959 AsmToken Tok = Parser.getTok(); 2960 SMLoc S = Tok.getLoc(); 2961 2962 // Do immediates first, as we always parse those if we have a '#'. 2963 if (Parser.getTok().is(AsmToken::Hash)) { 2964 Parser.Lex(); // Eat the '#'. 2965 // Explicitly look for a '-', as we need to encode negative zero 2966 // differently. 2967 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2968 const MCExpr *Offset; 2969 if (getParser().ParseExpression(Offset)) 2970 return MatchOperand_ParseFail; 2971 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2972 if (!CE) { 2973 Error(S, "constant expression expected"); 2974 return MatchOperand_ParseFail; 2975 } 2976 SMLoc E = Tok.getLoc(); 2977 // Negative zero is encoded as the flag value INT32_MIN. 2978 int32_t Val = CE->getValue(); 2979 if (isNegative && Val == 0) 2980 Val = INT32_MIN; 2981 2982 Operands.push_back( 2983 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2984 2985 return MatchOperand_Success; 2986 } 2987 2988 2989 bool haveEaten = false; 2990 bool isAdd = true; 2991 int Reg = -1; 2992 if (Tok.is(AsmToken::Plus)) { 2993 Parser.Lex(); // Eat the '+' token. 2994 haveEaten = true; 2995 } else if (Tok.is(AsmToken::Minus)) { 2996 Parser.Lex(); // Eat the '-' token. 2997 isAdd = false; 2998 haveEaten = true; 2999 } 3000 if (Parser.getTok().is(AsmToken::Identifier)) 3001 Reg = tryParseRegister(); 3002 if (Reg == -1) { 3003 if (!haveEaten) 3004 return MatchOperand_NoMatch; 3005 Error(Parser.getTok().getLoc(), "register expected"); 3006 return MatchOperand_ParseFail; 3007 } 3008 SMLoc E = Parser.getTok().getLoc(); 3009 3010 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3011 0, S, E)); 3012 3013 return MatchOperand_Success; 3014} 3015 3016/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3017/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3018/// when they refer multiple MIOperands inside a single one. 3019bool ARMAsmParser:: 3020cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3021 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3022 // Rt, Rt2 3023 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3024 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3025 // Create a writeback register dummy placeholder. 3026 Inst.addOperand(MCOperand::CreateReg(0)); 3027 // addr 3028 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3029 // pred 3030 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3031 return true; 3032} 3033 3034/// cvtT2StrdPre - Convert parsed operands to MCInst. 3035/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3036/// when they refer multiple MIOperands inside a single one. 3037bool ARMAsmParser:: 3038cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3039 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3040 // Create a writeback register dummy placeholder. 3041 Inst.addOperand(MCOperand::CreateReg(0)); 3042 // Rt, Rt2 3043 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3044 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3045 // addr 3046 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3047 // pred 3048 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3049 return true; 3050} 3051 3052/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3053/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3054/// when they refer multiple MIOperands inside a single one. 3055bool ARMAsmParser:: 3056cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3057 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3058 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3059 3060 // Create a writeback register dummy placeholder. 3061 Inst.addOperand(MCOperand::CreateImm(0)); 3062 3063 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3064 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3065 return true; 3066} 3067 3068/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3069/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3070/// when they refer multiple MIOperands inside a single one. 3071bool ARMAsmParser:: 3072cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3073 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3074 // Create a writeback register dummy placeholder. 3075 Inst.addOperand(MCOperand::CreateImm(0)); 3076 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3077 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3078 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3079 return true; 3080} 3081 3082/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3083/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3084/// when they refer multiple MIOperands inside a single one. 3085bool ARMAsmParser:: 3086cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3087 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3088 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3089 3090 // Create a writeback register dummy placeholder. 3091 Inst.addOperand(MCOperand::CreateImm(0)); 3092 3093 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3094 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3095 return true; 3096} 3097 3098/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3099/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3100/// when they refer multiple MIOperands inside a single one. 3101bool ARMAsmParser:: 3102cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3103 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3104 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3105 3106 // Create a writeback register dummy placeholder. 3107 Inst.addOperand(MCOperand::CreateImm(0)); 3108 3109 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3110 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3111 return true; 3112} 3113 3114 3115/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3116/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3117/// when they refer multiple MIOperands inside a single one. 3118bool ARMAsmParser:: 3119cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3120 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3121 // Create a writeback register dummy placeholder. 3122 Inst.addOperand(MCOperand::CreateImm(0)); 3123 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3124 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3125 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3126 return true; 3127} 3128 3129/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3130/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3131/// when they refer multiple MIOperands inside a single one. 3132bool ARMAsmParser:: 3133cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3134 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3135 // Create a writeback register dummy placeholder. 3136 Inst.addOperand(MCOperand::CreateImm(0)); 3137 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3138 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3139 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3140 return true; 3141} 3142 3143/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3144/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3145/// when they refer multiple MIOperands inside a single one. 3146bool ARMAsmParser:: 3147cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3148 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3149 // Create a writeback register dummy placeholder. 3150 Inst.addOperand(MCOperand::CreateImm(0)); 3151 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3152 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3153 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3154 return true; 3155} 3156 3157/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3158/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3159/// when they refer multiple MIOperands inside a single one. 3160bool ARMAsmParser:: 3161cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3162 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3163 // Rt 3164 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3165 // Create a writeback register dummy placeholder. 3166 Inst.addOperand(MCOperand::CreateImm(0)); 3167 // addr 3168 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3169 // offset 3170 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3171 // pred 3172 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3173 return true; 3174} 3175 3176/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3177/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3178/// when they refer multiple MIOperands inside a single one. 3179bool ARMAsmParser:: 3180cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3181 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3182 // Rt 3183 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3184 // Create a writeback register dummy placeholder. 3185 Inst.addOperand(MCOperand::CreateImm(0)); 3186 // addr 3187 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3188 // offset 3189 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3190 // pred 3191 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3192 return true; 3193} 3194 3195/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3196/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3197/// when they refer multiple MIOperands inside a single one. 3198bool ARMAsmParser:: 3199cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3200 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3201 // Create a writeback register dummy placeholder. 3202 Inst.addOperand(MCOperand::CreateImm(0)); 3203 // Rt 3204 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3205 // addr 3206 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3207 // offset 3208 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3209 // pred 3210 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3211 return true; 3212} 3213 3214/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3215/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3216/// when they refer multiple MIOperands inside a single one. 3217bool ARMAsmParser:: 3218cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3219 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3220 // Create a writeback register dummy placeholder. 3221 Inst.addOperand(MCOperand::CreateImm(0)); 3222 // Rt 3223 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3224 // addr 3225 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3226 // offset 3227 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3228 // pred 3229 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3230 return true; 3231} 3232 3233/// cvtLdrdPre - Convert parsed operands to MCInst. 3234/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3235/// when they refer multiple MIOperands inside a single one. 3236bool ARMAsmParser:: 3237cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3238 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3239 // Rt, Rt2 3240 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3241 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3242 // Create a writeback register dummy placeholder. 3243 Inst.addOperand(MCOperand::CreateImm(0)); 3244 // addr 3245 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3246 // pred 3247 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3248 return true; 3249} 3250 3251/// cvtStrdPre - Convert parsed operands to MCInst. 3252/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3253/// when they refer multiple MIOperands inside a single one. 3254bool ARMAsmParser:: 3255cvtStrdPre(MCInst &Inst, unsigned Opcode, 3256 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3257 // Create a writeback register dummy placeholder. 3258 Inst.addOperand(MCOperand::CreateImm(0)); 3259 // Rt, Rt2 3260 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3261 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3262 // addr 3263 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3264 // pred 3265 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3266 return true; 3267} 3268 3269/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3270/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3271/// when they refer multiple MIOperands inside a single one. 3272bool ARMAsmParser:: 3273cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3274 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3275 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3276 // Create a writeback register dummy placeholder. 3277 Inst.addOperand(MCOperand::CreateImm(0)); 3278 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3279 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3280 return true; 3281} 3282 3283/// cvtThumbMultiple- Convert parsed operands to MCInst. 3284/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3285/// when they refer multiple MIOperands inside a single one. 3286bool ARMAsmParser:: 3287cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3288 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3289 // The second source operand must be the same register as the destination 3290 // operand. 3291 if (Operands.size() == 6 && 3292 (((ARMOperand*)Operands[3])->getReg() != 3293 ((ARMOperand*)Operands[5])->getReg()) && 3294 (((ARMOperand*)Operands[3])->getReg() != 3295 ((ARMOperand*)Operands[4])->getReg())) { 3296 Error(Operands[3]->getStartLoc(), 3297 "destination register must match source register"); 3298 return false; 3299 } 3300 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3301 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3302 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3303 // If we have a three-operand form, use that, else the second source operand 3304 // is just the destination operand again. 3305 if (Operands.size() == 6) 3306 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3307 else 3308 Inst.addOperand(Inst.getOperand(0)); 3309 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3310 3311 return true; 3312} 3313 3314/// Parse an ARM memory expression, return false if successful else return true 3315/// or an error. The first token must be a '[' when called. 3316bool ARMAsmParser:: 3317parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3318 SMLoc S, E; 3319 assert(Parser.getTok().is(AsmToken::LBrac) && 3320 "Token is not a Left Bracket"); 3321 S = Parser.getTok().getLoc(); 3322 Parser.Lex(); // Eat left bracket token. 3323 3324 const AsmToken &BaseRegTok = Parser.getTok(); 3325 int BaseRegNum = tryParseRegister(); 3326 if (BaseRegNum == -1) 3327 return Error(BaseRegTok.getLoc(), "register expected"); 3328 3329 // The next token must either be a comma or a closing bracket. 3330 const AsmToken &Tok = Parser.getTok(); 3331 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3332 return Error(Tok.getLoc(), "malformed memory operand"); 3333 3334 if (Tok.is(AsmToken::RBrac)) { 3335 E = Tok.getLoc(); 3336 Parser.Lex(); // Eat right bracket token. 3337 3338 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3339 0, 0, false, S, E)); 3340 3341 // If there's a pre-indexing writeback marker, '!', just add it as a token 3342 // operand. It's rather odd, but syntactically valid. 3343 if (Parser.getTok().is(AsmToken::Exclaim)) { 3344 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3345 Parser.Lex(); // Eat the '!'. 3346 } 3347 3348 return false; 3349 } 3350 3351 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3352 Parser.Lex(); // Eat the comma. 3353 3354 // If we have a ':', it's an alignment specifier. 3355 if (Parser.getTok().is(AsmToken::Colon)) { 3356 Parser.Lex(); // Eat the ':'. 3357 E = Parser.getTok().getLoc(); 3358 3359 const MCExpr *Expr; 3360 if (getParser().ParseExpression(Expr)) 3361 return true; 3362 3363 // The expression has to be a constant. Memory references with relocations 3364 // don't come through here, as they use the <label> forms of the relevant 3365 // instructions. 3366 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3367 if (!CE) 3368 return Error (E, "constant expression expected"); 3369 3370 unsigned Align = 0; 3371 switch (CE->getValue()) { 3372 default: 3373 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3374 case 64: Align = 8; break; 3375 case 128: Align = 16; break; 3376 case 256: Align = 32; break; 3377 } 3378 3379 // Now we should have the closing ']' 3380 E = Parser.getTok().getLoc(); 3381 if (Parser.getTok().isNot(AsmToken::RBrac)) 3382 return Error(E, "']' expected"); 3383 Parser.Lex(); // Eat right bracket token. 3384 3385 // Don't worry about range checking the value here. That's handled by 3386 // the is*() predicates. 3387 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3388 ARM_AM::no_shift, 0, Align, 3389 false, S, E)); 3390 3391 // If there's a pre-indexing writeback marker, '!', just add it as a token 3392 // operand. 3393 if (Parser.getTok().is(AsmToken::Exclaim)) { 3394 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3395 Parser.Lex(); // Eat the '!'. 3396 } 3397 3398 return false; 3399 } 3400 3401 // If we have a '#', it's an immediate offset, else assume it's a register 3402 // offset. 3403 if (Parser.getTok().is(AsmToken::Hash)) { 3404 Parser.Lex(); // Eat the '#'. 3405 E = Parser.getTok().getLoc(); 3406 3407 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3408 const MCExpr *Offset; 3409 if (getParser().ParseExpression(Offset)) 3410 return true; 3411 3412 // The expression has to be a constant. Memory references with relocations 3413 // don't come through here, as they use the <label> forms of the relevant 3414 // instructions. 3415 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3416 if (!CE) 3417 return Error (E, "constant expression expected"); 3418 3419 // If the constant was #-0, represent it as INT32_MIN. 3420 int32_t Val = CE->getValue(); 3421 if (isNegative && Val == 0) 3422 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3423 3424 // Now we should have the closing ']' 3425 E = Parser.getTok().getLoc(); 3426 if (Parser.getTok().isNot(AsmToken::RBrac)) 3427 return Error(E, "']' expected"); 3428 Parser.Lex(); // Eat right bracket token. 3429 3430 // Don't worry about range checking the value here. That's handled by 3431 // the is*() predicates. 3432 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3433 ARM_AM::no_shift, 0, 0, 3434 false, S, E)); 3435 3436 // If there's a pre-indexing writeback marker, '!', just add it as a token 3437 // operand. 3438 if (Parser.getTok().is(AsmToken::Exclaim)) { 3439 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3440 Parser.Lex(); // Eat the '!'. 3441 } 3442 3443 return false; 3444 } 3445 3446 // The register offset is optionally preceded by a '+' or '-' 3447 bool isNegative = false; 3448 if (Parser.getTok().is(AsmToken::Minus)) { 3449 isNegative = true; 3450 Parser.Lex(); // Eat the '-'. 3451 } else if (Parser.getTok().is(AsmToken::Plus)) { 3452 // Nothing to do. 3453 Parser.Lex(); // Eat the '+'. 3454 } 3455 3456 E = Parser.getTok().getLoc(); 3457 int OffsetRegNum = tryParseRegister(); 3458 if (OffsetRegNum == -1) 3459 return Error(E, "register expected"); 3460 3461 // If there's a shift operator, handle it. 3462 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3463 unsigned ShiftImm = 0; 3464 if (Parser.getTok().is(AsmToken::Comma)) { 3465 Parser.Lex(); // Eat the ','. 3466 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3467 return true; 3468 } 3469 3470 // Now we should have the closing ']' 3471 E = Parser.getTok().getLoc(); 3472 if (Parser.getTok().isNot(AsmToken::RBrac)) 3473 return Error(E, "']' expected"); 3474 Parser.Lex(); // Eat right bracket token. 3475 3476 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3477 ShiftType, ShiftImm, 0, isNegative, 3478 S, E)); 3479 3480 // If there's a pre-indexing writeback marker, '!', just add it as a token 3481 // operand. 3482 if (Parser.getTok().is(AsmToken::Exclaim)) { 3483 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3484 Parser.Lex(); // Eat the '!'. 3485 } 3486 3487 return false; 3488} 3489 3490/// parseMemRegOffsetShift - one of these two: 3491/// ( lsl | lsr | asr | ror ) , # shift_amount 3492/// rrx 3493/// return true if it parses a shift otherwise it returns false. 3494bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3495 unsigned &Amount) { 3496 SMLoc Loc = Parser.getTok().getLoc(); 3497 const AsmToken &Tok = Parser.getTok(); 3498 if (Tok.isNot(AsmToken::Identifier)) 3499 return true; 3500 StringRef ShiftName = Tok.getString(); 3501 if (ShiftName == "lsl" || ShiftName == "LSL") 3502 St = ARM_AM::lsl; 3503 else if (ShiftName == "lsr" || ShiftName == "LSR") 3504 St = ARM_AM::lsr; 3505 else if (ShiftName == "asr" || ShiftName == "ASR") 3506 St = ARM_AM::asr; 3507 else if (ShiftName == "ror" || ShiftName == "ROR") 3508 St = ARM_AM::ror; 3509 else if (ShiftName == "rrx" || ShiftName == "RRX") 3510 St = ARM_AM::rrx; 3511 else 3512 return Error(Loc, "illegal shift operator"); 3513 Parser.Lex(); // Eat shift type token. 3514 3515 // rrx stands alone. 3516 Amount = 0; 3517 if (St != ARM_AM::rrx) { 3518 Loc = Parser.getTok().getLoc(); 3519 // A '#' and a shift amount. 3520 const AsmToken &HashTok = Parser.getTok(); 3521 if (HashTok.isNot(AsmToken::Hash)) 3522 return Error(HashTok.getLoc(), "'#' expected"); 3523 Parser.Lex(); // Eat hash token. 3524 3525 const MCExpr *Expr; 3526 if (getParser().ParseExpression(Expr)) 3527 return true; 3528 // Range check the immediate. 3529 // lsl, ror: 0 <= imm <= 31 3530 // lsr, asr: 0 <= imm <= 32 3531 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3532 if (!CE) 3533 return Error(Loc, "shift amount must be an immediate"); 3534 int64_t Imm = CE->getValue(); 3535 if (Imm < 0 || 3536 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3537 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3538 return Error(Loc, "immediate shift value out of range"); 3539 Amount = Imm; 3540 } 3541 3542 return false; 3543} 3544 3545/// parseFPImm - A floating point immediate expression operand. 3546ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3547parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3548 SMLoc S = Parser.getTok().getLoc(); 3549 3550 if (Parser.getTok().isNot(AsmToken::Hash)) 3551 return MatchOperand_NoMatch; 3552 3553 // Disambiguate the VMOV forms that can accept an FP immediate. 3554 // vmov.f32 <sreg>, #imm 3555 // vmov.f64 <dreg>, #imm 3556 // vmov.f32 <dreg>, #imm @ vector f32x2 3557 // vmov.f32 <qreg>, #imm @ vector f32x4 3558 // 3559 // There are also the NEON VMOV instructions which expect an 3560 // integer constant. Make sure we don't try to parse an FPImm 3561 // for these: 3562 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3563 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3564 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3565 TyOp->getToken() != ".f64")) 3566 return MatchOperand_NoMatch; 3567 3568 Parser.Lex(); // Eat the '#'. 3569 3570 // Handle negation, as that still comes through as a separate token. 3571 bool isNegative = false; 3572 if (Parser.getTok().is(AsmToken::Minus)) { 3573 isNegative = true; 3574 Parser.Lex(); 3575 } 3576 const AsmToken &Tok = Parser.getTok(); 3577 if (Tok.is(AsmToken::Real)) { 3578 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3579 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3580 // If we had a '-' in front, toggle the sign bit. 3581 IntVal ^= (uint64_t)isNegative << 63; 3582 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3583 Parser.Lex(); // Eat the token. 3584 if (Val == -1) { 3585 TokError("floating point value out of range"); 3586 return MatchOperand_ParseFail; 3587 } 3588 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3589 return MatchOperand_Success; 3590 } 3591 if (Tok.is(AsmToken::Integer)) { 3592 int64_t Val = Tok.getIntVal(); 3593 Parser.Lex(); // Eat the token. 3594 if (Val > 255 || Val < 0) { 3595 TokError("encoded floating point value out of range"); 3596 return MatchOperand_ParseFail; 3597 } 3598 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3599 return MatchOperand_Success; 3600 } 3601 3602 TokError("invalid floating point immediate"); 3603 return MatchOperand_ParseFail; 3604} 3605/// Parse a arm instruction operand. For now this parses the operand regardless 3606/// of the mnemonic. 3607bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3608 StringRef Mnemonic) { 3609 SMLoc S, E; 3610 3611 // Check if the current operand has a custom associated parser, if so, try to 3612 // custom parse the operand, or fallback to the general approach. 3613 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3614 if (ResTy == MatchOperand_Success) 3615 return false; 3616 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3617 // there was a match, but an error occurred, in which case, just return that 3618 // the operand parsing failed. 3619 if (ResTy == MatchOperand_ParseFail) 3620 return true; 3621 3622 switch (getLexer().getKind()) { 3623 default: 3624 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3625 return true; 3626 case AsmToken::Identifier: { 3627 // If this is VMRS, check for the apsr_nzcv operand. 3628 if (!tryParseRegisterWithWriteBack(Operands)) 3629 return false; 3630 int Res = tryParseShiftRegister(Operands); 3631 if (Res == 0) // success 3632 return false; 3633 else if (Res == -1) // irrecoverable error 3634 return true; 3635 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3636 S = Parser.getTok().getLoc(); 3637 Parser.Lex(); 3638 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3639 return false; 3640 } 3641 3642 // Fall though for the Identifier case that is not a register or a 3643 // special name. 3644 } 3645 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3646 case AsmToken::Dot: { // . as a branch target 3647 // This was not a register so parse other operands that start with an 3648 // identifier (like labels) as expressions and create them as immediates. 3649 const MCExpr *IdVal; 3650 S = Parser.getTok().getLoc(); 3651 if (getParser().ParseExpression(IdVal)) 3652 return true; 3653 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3654 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3655 return false; 3656 } 3657 case AsmToken::LBrac: 3658 return parseMemory(Operands); 3659 case AsmToken::LCurly: 3660 return parseRegisterList(Operands); 3661 case AsmToken::Hash: { 3662 // #42 -> immediate. 3663 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3664 S = Parser.getTok().getLoc(); 3665 Parser.Lex(); 3666 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3667 const MCExpr *ImmVal; 3668 if (getParser().ParseExpression(ImmVal)) 3669 return true; 3670 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3671 if (!CE) { 3672 Error(S, "constant expression expected"); 3673 return MatchOperand_ParseFail; 3674 } 3675 int32_t Val = CE->getValue(); 3676 if (isNegative && Val == 0) 3677 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3678 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3679 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3680 return false; 3681 } 3682 case AsmToken::Colon: { 3683 // ":lower16:" and ":upper16:" expression prefixes 3684 // FIXME: Check it's an expression prefix, 3685 // e.g. (FOO - :lower16:BAR) isn't legal. 3686 ARMMCExpr::VariantKind RefKind; 3687 if (parsePrefix(RefKind)) 3688 return true; 3689 3690 const MCExpr *SubExprVal; 3691 if (getParser().ParseExpression(SubExprVal)) 3692 return true; 3693 3694 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3695 getContext()); 3696 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3697 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3698 return false; 3699 } 3700 } 3701} 3702 3703// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3704// :lower16: and :upper16:. 3705bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3706 RefKind = ARMMCExpr::VK_ARM_None; 3707 3708 // :lower16: and :upper16: modifiers 3709 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3710 Parser.Lex(); // Eat ':' 3711 3712 if (getLexer().isNot(AsmToken::Identifier)) { 3713 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3714 return true; 3715 } 3716 3717 StringRef IDVal = Parser.getTok().getIdentifier(); 3718 if (IDVal == "lower16") { 3719 RefKind = ARMMCExpr::VK_ARM_LO16; 3720 } else if (IDVal == "upper16") { 3721 RefKind = ARMMCExpr::VK_ARM_HI16; 3722 } else { 3723 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3724 return true; 3725 } 3726 Parser.Lex(); 3727 3728 if (getLexer().isNot(AsmToken::Colon)) { 3729 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3730 return true; 3731 } 3732 Parser.Lex(); // Eat the last ':' 3733 return false; 3734} 3735 3736/// \brief Given a mnemonic, split out possible predication code and carry 3737/// setting letters to form a canonical mnemonic and flags. 3738// 3739// FIXME: Would be nice to autogen this. 3740// FIXME: This is a bit of a maze of special cases. 3741StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3742 unsigned &PredicationCode, 3743 bool &CarrySetting, 3744 unsigned &ProcessorIMod, 3745 StringRef &ITMask) { 3746 PredicationCode = ARMCC::AL; 3747 CarrySetting = false; 3748 ProcessorIMod = 0; 3749 3750 // Ignore some mnemonics we know aren't predicated forms. 3751 // 3752 // FIXME: Would be nice to autogen this. 3753 if ((Mnemonic == "movs" && isThumb()) || 3754 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3755 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3756 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3757 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3758 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3759 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3760 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3761 return Mnemonic; 3762 3763 // First, split out any predication code. Ignore mnemonics we know aren't 3764 // predicated but do have a carry-set and so weren't caught above. 3765 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3766 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3767 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3768 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3769 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3770 .Case("eq", ARMCC::EQ) 3771 .Case("ne", ARMCC::NE) 3772 .Case("hs", ARMCC::HS) 3773 .Case("cs", ARMCC::HS) 3774 .Case("lo", ARMCC::LO) 3775 .Case("cc", ARMCC::LO) 3776 .Case("mi", ARMCC::MI) 3777 .Case("pl", ARMCC::PL) 3778 .Case("vs", ARMCC::VS) 3779 .Case("vc", ARMCC::VC) 3780 .Case("hi", ARMCC::HI) 3781 .Case("ls", ARMCC::LS) 3782 .Case("ge", ARMCC::GE) 3783 .Case("lt", ARMCC::LT) 3784 .Case("gt", ARMCC::GT) 3785 .Case("le", ARMCC::LE) 3786 .Case("al", ARMCC::AL) 3787 .Default(~0U); 3788 if (CC != ~0U) { 3789 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3790 PredicationCode = CC; 3791 } 3792 } 3793 3794 // Next, determine if we have a carry setting bit. We explicitly ignore all 3795 // the instructions we know end in 's'. 3796 if (Mnemonic.endswith("s") && 3797 !(Mnemonic == "cps" || Mnemonic == "mls" || 3798 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3799 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3800 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3801 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3802 (Mnemonic == "movs" && isThumb()))) { 3803 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3804 CarrySetting = true; 3805 } 3806 3807 // The "cps" instruction can have a interrupt mode operand which is glued into 3808 // the mnemonic. Check if this is the case, split it and parse the imod op 3809 if (Mnemonic.startswith("cps")) { 3810 // Split out any imod code. 3811 unsigned IMod = 3812 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3813 .Case("ie", ARM_PROC::IE) 3814 .Case("id", ARM_PROC::ID) 3815 .Default(~0U); 3816 if (IMod != ~0U) { 3817 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3818 ProcessorIMod = IMod; 3819 } 3820 } 3821 3822 // The "it" instruction has the condition mask on the end of the mnemonic. 3823 if (Mnemonic.startswith("it")) { 3824 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3825 Mnemonic = Mnemonic.slice(0, 2); 3826 } 3827 3828 return Mnemonic; 3829} 3830 3831/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3832/// inclusion of carry set or predication code operands. 3833// 3834// FIXME: It would be nice to autogen this. 3835void ARMAsmParser:: 3836getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3837 bool &CanAcceptPredicationCode) { 3838 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3839 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3840 Mnemonic == "add" || Mnemonic == "adc" || 3841 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3842 Mnemonic == "orr" || Mnemonic == "mvn" || 3843 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3844 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3845 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3846 Mnemonic == "mla" || Mnemonic == "smlal" || 3847 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3848 CanAcceptCarrySet = true; 3849 } else 3850 CanAcceptCarrySet = false; 3851 3852 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3853 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3854 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3855 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3856 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3857 (Mnemonic == "clrex" && !isThumb()) || 3858 (Mnemonic == "nop" && isThumbOne()) || 3859 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3860 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3861 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3862 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3863 !isThumb()) || 3864 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3865 CanAcceptPredicationCode = false; 3866 } else 3867 CanAcceptPredicationCode = true; 3868 3869 if (isThumb()) { 3870 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3871 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3872 CanAcceptPredicationCode = false; 3873 } 3874} 3875 3876bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3877 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3878 // FIXME: This is all horribly hacky. We really need a better way to deal 3879 // with optional operands like this in the matcher table. 3880 3881 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3882 // another does not. Specifically, the MOVW instruction does not. So we 3883 // special case it here and remove the defaulted (non-setting) cc_out 3884 // operand if that's the instruction we're trying to match. 3885 // 3886 // We do this as post-processing of the explicit operands rather than just 3887 // conditionally adding the cc_out in the first place because we need 3888 // to check the type of the parsed immediate operand. 3889 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3890 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3891 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3892 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3893 return true; 3894 3895 // Register-register 'add' for thumb does not have a cc_out operand 3896 // when there are only two register operands. 3897 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3898 static_cast<ARMOperand*>(Operands[3])->isReg() && 3899 static_cast<ARMOperand*>(Operands[4])->isReg() && 3900 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3901 return true; 3902 // Register-register 'add' for thumb does not have a cc_out operand 3903 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3904 // have to check the immediate range here since Thumb2 has a variant 3905 // that can handle a different range and has a cc_out operand. 3906 if (((isThumb() && Mnemonic == "add") || 3907 (isThumbTwo() && Mnemonic == "sub")) && 3908 Operands.size() == 6 && 3909 static_cast<ARMOperand*>(Operands[3])->isReg() && 3910 static_cast<ARMOperand*>(Operands[4])->isReg() && 3911 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3912 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3913 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3914 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3915 return true; 3916 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3917 // imm0_4095 variant. That's the least-preferred variant when 3918 // selecting via the generic "add" mnemonic, so to know that we 3919 // should remove the cc_out operand, we have to explicitly check that 3920 // it's not one of the other variants. Ugh. 3921 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3922 Operands.size() == 6 && 3923 static_cast<ARMOperand*>(Operands[3])->isReg() && 3924 static_cast<ARMOperand*>(Operands[4])->isReg() && 3925 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3926 // Nest conditions rather than one big 'if' statement for readability. 3927 // 3928 // If either register is a high reg, it's either one of the SP 3929 // variants (handled above) or a 32-bit encoding, so we just 3930 // check against T3. 3931 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3932 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3933 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3934 return false; 3935 // If both registers are low, we're in an IT block, and the immediate is 3936 // in range, we should use encoding T1 instead, which has a cc_out. 3937 if (inITBlock() && 3938 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3939 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3940 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3941 return false; 3942 3943 // Otherwise, we use encoding T4, which does not have a cc_out 3944 // operand. 3945 return true; 3946 } 3947 3948 // The thumb2 multiply instruction doesn't have a CCOut register, so 3949 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3950 // use the 16-bit encoding or not. 3951 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3952 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3953 static_cast<ARMOperand*>(Operands[3])->isReg() && 3954 static_cast<ARMOperand*>(Operands[4])->isReg() && 3955 static_cast<ARMOperand*>(Operands[5])->isReg() && 3956 // If the registers aren't low regs, the destination reg isn't the 3957 // same as one of the source regs, or the cc_out operand is zero 3958 // outside of an IT block, we have to use the 32-bit encoding, so 3959 // remove the cc_out operand. 3960 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3961 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 3962 !inITBlock() || 3963 (static_cast<ARMOperand*>(Operands[3])->getReg() != 3964 static_cast<ARMOperand*>(Operands[5])->getReg() && 3965 static_cast<ARMOperand*>(Operands[3])->getReg() != 3966 static_cast<ARMOperand*>(Operands[4])->getReg()))) 3967 return true; 3968 3969 3970 3971 // Register-register 'add/sub' for thumb does not have a cc_out operand 3972 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 3973 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 3974 // right, this will result in better diagnostics (which operand is off) 3975 // anyway. 3976 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 3977 (Operands.size() == 5 || Operands.size() == 6) && 3978 static_cast<ARMOperand*>(Operands[3])->isReg() && 3979 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 3980 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3981 return true; 3982 3983 return false; 3984} 3985 3986/// Parse an arm instruction mnemonic followed by its operands. 3987bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 3988 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3989 // Create the leading tokens for the mnemonic, split by '.' characters. 3990 size_t Start = 0, Next = Name.find('.'); 3991 StringRef Mnemonic = Name.slice(Start, Next); 3992 3993 // Split out the predication code and carry setting flag from the mnemonic. 3994 unsigned PredicationCode; 3995 unsigned ProcessorIMod; 3996 bool CarrySetting; 3997 StringRef ITMask; 3998 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 3999 ProcessorIMod, ITMask); 4000 4001 // In Thumb1, only the branch (B) instruction can be predicated. 4002 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4003 Parser.EatToEndOfStatement(); 4004 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4005 } 4006 4007 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4008 4009 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4010 // is the mask as it will be for the IT encoding if the conditional 4011 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4012 // where the conditional bit0 is zero, the instruction post-processing 4013 // will adjust the mask accordingly. 4014 if (Mnemonic == "it") { 4015 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4016 if (ITMask.size() > 3) { 4017 Parser.EatToEndOfStatement(); 4018 return Error(Loc, "too many conditions on IT instruction"); 4019 } 4020 unsigned Mask = 8; 4021 for (unsigned i = ITMask.size(); i != 0; --i) { 4022 char pos = ITMask[i - 1]; 4023 if (pos != 't' && pos != 'e') { 4024 Parser.EatToEndOfStatement(); 4025 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4026 } 4027 Mask >>= 1; 4028 if (ITMask[i - 1] == 't') 4029 Mask |= 8; 4030 } 4031 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4032 } 4033 4034 // FIXME: This is all a pretty gross hack. We should automatically handle 4035 // optional operands like this via tblgen. 4036 4037 // Next, add the CCOut and ConditionCode operands, if needed. 4038 // 4039 // For mnemonics which can ever incorporate a carry setting bit or predication 4040 // code, our matching model involves us always generating CCOut and 4041 // ConditionCode operands to match the mnemonic "as written" and then we let 4042 // the matcher deal with finding the right instruction or generating an 4043 // appropriate error. 4044 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4045 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4046 4047 // If we had a carry-set on an instruction that can't do that, issue an 4048 // error. 4049 if (!CanAcceptCarrySet && CarrySetting) { 4050 Parser.EatToEndOfStatement(); 4051 return Error(NameLoc, "instruction '" + Mnemonic + 4052 "' can not set flags, but 's' suffix specified"); 4053 } 4054 // If we had a predication code on an instruction that can't do that, issue an 4055 // error. 4056 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4057 Parser.EatToEndOfStatement(); 4058 return Error(NameLoc, "instruction '" + Mnemonic + 4059 "' is not predicable, but condition code specified"); 4060 } 4061 4062 // Add the carry setting operand, if necessary. 4063 if (CanAcceptCarrySet) { 4064 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4065 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4066 Loc)); 4067 } 4068 4069 // Add the predication code operand, if necessary. 4070 if (CanAcceptPredicationCode) { 4071 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4072 CarrySetting); 4073 Operands.push_back(ARMOperand::CreateCondCode( 4074 ARMCC::CondCodes(PredicationCode), Loc)); 4075 } 4076 4077 // Add the processor imod operand, if necessary. 4078 if (ProcessorIMod) { 4079 Operands.push_back(ARMOperand::CreateImm( 4080 MCConstantExpr::Create(ProcessorIMod, getContext()), 4081 NameLoc, NameLoc)); 4082 } 4083 4084 // Add the remaining tokens in the mnemonic. 4085 while (Next != StringRef::npos) { 4086 Start = Next; 4087 Next = Name.find('.', Start + 1); 4088 StringRef ExtraToken = Name.slice(Start, Next); 4089 4090 // For now, we're only parsing Thumb1 (for the most part), so 4091 // just ignore ".n" qualifiers. We'll use them to restrict 4092 // matching when we do Thumb2. 4093 if (ExtraToken != ".n") { 4094 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4095 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4096 } 4097 } 4098 4099 // Read the remaining operands. 4100 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4101 // Read the first operand. 4102 if (parseOperand(Operands, Mnemonic)) { 4103 Parser.EatToEndOfStatement(); 4104 return true; 4105 } 4106 4107 while (getLexer().is(AsmToken::Comma)) { 4108 Parser.Lex(); // Eat the comma. 4109 4110 // Parse and remember the operand. 4111 if (parseOperand(Operands, Mnemonic)) { 4112 Parser.EatToEndOfStatement(); 4113 return true; 4114 } 4115 } 4116 } 4117 4118 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4119 SMLoc Loc = getLexer().getLoc(); 4120 Parser.EatToEndOfStatement(); 4121 return Error(Loc, "unexpected token in argument list"); 4122 } 4123 4124 Parser.Lex(); // Consume the EndOfStatement 4125 4126 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4127 // do and don't have a cc_out optional-def operand. With some spot-checks 4128 // of the operand list, we can figure out which variant we're trying to 4129 // parse and adjust accordingly before actually matching. We shouldn't ever 4130 // try to remove a cc_out operand that was explicitly set on the the 4131 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4132 // table driven matcher doesn't fit well with the ARM instruction set. 4133 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4134 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4135 Operands.erase(Operands.begin() + 1); 4136 delete Op; 4137 } 4138 4139 // ARM mode 'blx' need special handling, as the register operand version 4140 // is predicable, but the label operand version is not. So, we can't rely 4141 // on the Mnemonic based checking to correctly figure out when to put 4142 // a k_CondCode operand in the list. If we're trying to match the label 4143 // version, remove the k_CondCode operand here. 4144 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4145 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4146 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4147 Operands.erase(Operands.begin() + 1); 4148 delete Op; 4149 } 4150 4151 // The vector-compare-to-zero instructions have a literal token "#0" at 4152 // the end that comes to here as an immediate operand. Convert it to a 4153 // token to play nicely with the matcher. 4154 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4155 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4156 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4157 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4158 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4159 if (CE && CE->getValue() == 0) { 4160 Operands.erase(Operands.begin() + 5); 4161 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4162 delete Op; 4163 } 4164 } 4165 // VCMP{E} does the same thing, but with a different operand count. 4166 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4167 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4168 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4169 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4170 if (CE && CE->getValue() == 0) { 4171 Operands.erase(Operands.begin() + 4); 4172 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4173 delete Op; 4174 } 4175 } 4176 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4177 // end. Convert it to a token here. 4178 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4179 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4180 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4181 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4182 if (CE && CE->getValue() == 0) { 4183 Operands.erase(Operands.begin() + 5); 4184 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4185 delete Op; 4186 } 4187 } 4188 4189 return false; 4190} 4191 4192// Validate context-sensitive operand constraints. 4193 4194// return 'true' if register list contains non-low GPR registers, 4195// 'false' otherwise. If Reg is in the register list or is HiReg, set 4196// 'containsReg' to true. 4197static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4198 unsigned HiReg, bool &containsReg) { 4199 containsReg = false; 4200 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4201 unsigned OpReg = Inst.getOperand(i).getReg(); 4202 if (OpReg == Reg) 4203 containsReg = true; 4204 // Anything other than a low register isn't legal here. 4205 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4206 return true; 4207 } 4208 return false; 4209} 4210 4211// Check if the specified regisgter is in the register list of the inst, 4212// starting at the indicated operand number. 4213static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4214 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4215 unsigned OpReg = Inst.getOperand(i).getReg(); 4216 if (OpReg == Reg) 4217 return true; 4218 } 4219 return false; 4220} 4221 4222// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4223// the ARMInsts array) instead. Getting that here requires awkward 4224// API changes, though. Better way? 4225namespace llvm { 4226extern MCInstrDesc ARMInsts[]; 4227} 4228static MCInstrDesc &getInstDesc(unsigned Opcode) { 4229 return ARMInsts[Opcode]; 4230} 4231 4232// FIXME: We would really like to be able to tablegen'erate this. 4233bool ARMAsmParser:: 4234validateInstruction(MCInst &Inst, 4235 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4236 MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4237 SMLoc Loc = Operands[0]->getStartLoc(); 4238 // Check the IT block state first. 4239 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4240 // being allowed in IT blocks, but not being predicable. It just always 4241 // executes. 4242 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4243 unsigned bit = 1; 4244 if (ITState.FirstCond) 4245 ITState.FirstCond = false; 4246 else 4247 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4248 // The instruction must be predicable. 4249 if (!MCID.isPredicable()) 4250 return Error(Loc, "instructions in IT block must be predicable"); 4251 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4252 unsigned ITCond = bit ? ITState.Cond : 4253 ARMCC::getOppositeCondition(ITState.Cond); 4254 if (Cond != ITCond) { 4255 // Find the condition code Operand to get its SMLoc information. 4256 SMLoc CondLoc; 4257 for (unsigned i = 1; i < Operands.size(); ++i) 4258 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4259 CondLoc = Operands[i]->getStartLoc(); 4260 return Error(CondLoc, "incorrect condition in IT block; got '" + 4261 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4262 "', but expected '" + 4263 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4264 } 4265 // Check for non-'al' condition codes outside of the IT block. 4266 } else if (isThumbTwo() && MCID.isPredicable() && 4267 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4268 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4269 Inst.getOpcode() != ARM::t2B) 4270 return Error(Loc, "predicated instructions must be in IT block"); 4271 4272 switch (Inst.getOpcode()) { 4273 case ARM::LDRD: 4274 case ARM::LDRD_PRE: 4275 case ARM::LDRD_POST: 4276 case ARM::LDREXD: { 4277 // Rt2 must be Rt + 1. 4278 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4279 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4280 if (Rt2 != Rt + 1) 4281 return Error(Operands[3]->getStartLoc(), 4282 "destination operands must be sequential"); 4283 return false; 4284 } 4285 case ARM::STRD: { 4286 // Rt2 must be Rt + 1. 4287 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4288 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4289 if (Rt2 != Rt + 1) 4290 return Error(Operands[3]->getStartLoc(), 4291 "source operands must be sequential"); 4292 return false; 4293 } 4294 case ARM::STRD_PRE: 4295 case ARM::STRD_POST: 4296 case ARM::STREXD: { 4297 // Rt2 must be Rt + 1. 4298 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4299 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4300 if (Rt2 != Rt + 1) 4301 return Error(Operands[3]->getStartLoc(), 4302 "source operands must be sequential"); 4303 return false; 4304 } 4305 case ARM::SBFX: 4306 case ARM::UBFX: { 4307 // width must be in range [1, 32-lsb] 4308 unsigned lsb = Inst.getOperand(2).getImm(); 4309 unsigned widthm1 = Inst.getOperand(3).getImm(); 4310 if (widthm1 >= 32 - lsb) 4311 return Error(Operands[5]->getStartLoc(), 4312 "bitfield width must be in range [1,32-lsb]"); 4313 return false; 4314 } 4315 case ARM::tLDMIA: { 4316 // If we're parsing Thumb2, the .w variant is available and handles 4317 // most cases that are normally illegal for a Thumb1 LDM 4318 // instruction. We'll make the transformation in processInstruction() 4319 // if necessary. 4320 // 4321 // Thumb LDM instructions are writeback iff the base register is not 4322 // in the register list. 4323 unsigned Rn = Inst.getOperand(0).getReg(); 4324 bool hasWritebackToken = 4325 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4326 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4327 bool listContainsBase; 4328 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4329 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4330 "registers must be in range r0-r7"); 4331 // If we should have writeback, then there should be a '!' token. 4332 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4333 return Error(Operands[2]->getStartLoc(), 4334 "writeback operator '!' expected"); 4335 // If we should not have writeback, there must not be a '!'. This is 4336 // true even for the 32-bit wide encodings. 4337 if (listContainsBase && hasWritebackToken) 4338 return Error(Operands[3]->getStartLoc(), 4339 "writeback operator '!' not allowed when base register " 4340 "in register list"); 4341 4342 break; 4343 } 4344 case ARM::t2LDMIA_UPD: { 4345 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4346 return Error(Operands[4]->getStartLoc(), 4347 "writeback operator '!' not allowed when base register " 4348 "in register list"); 4349 break; 4350 } 4351 case ARM::tPOP: { 4352 bool listContainsBase; 4353 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4354 return Error(Operands[2]->getStartLoc(), 4355 "registers must be in range r0-r7 or pc"); 4356 break; 4357 } 4358 case ARM::tPUSH: { 4359 bool listContainsBase; 4360 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4361 return Error(Operands[2]->getStartLoc(), 4362 "registers must be in range r0-r7 or lr"); 4363 break; 4364 } 4365 case ARM::tSTMIA_UPD: { 4366 bool listContainsBase; 4367 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4368 return Error(Operands[4]->getStartLoc(), 4369 "registers must be in range r0-r7"); 4370 break; 4371 } 4372 } 4373 4374 return false; 4375} 4376 4377void ARMAsmParser:: 4378processInstruction(MCInst &Inst, 4379 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4380 switch (Inst.getOpcode()) { 4381 case ARM::LDMIA_UPD: 4382 // If this is a load of a single register via a 'pop', then we should use 4383 // a post-indexed LDR instruction instead, per the ARM ARM. 4384 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4385 Inst.getNumOperands() == 5) { 4386 MCInst TmpInst; 4387 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4388 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4389 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4390 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4391 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4392 TmpInst.addOperand(MCOperand::CreateImm(4)); 4393 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4394 TmpInst.addOperand(Inst.getOperand(3)); 4395 Inst = TmpInst; 4396 } 4397 break; 4398 case ARM::STMDB_UPD: 4399 // If this is a store of a single register via a 'push', then we should use 4400 // a pre-indexed STR instruction instead, per the ARM ARM. 4401 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4402 Inst.getNumOperands() == 5) { 4403 MCInst TmpInst; 4404 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4405 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4406 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4407 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4408 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4409 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4410 TmpInst.addOperand(Inst.getOperand(3)); 4411 Inst = TmpInst; 4412 } 4413 break; 4414 case ARM::tADDi8: 4415 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4416 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4417 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4418 // to encoding T1 if <Rd> is omitted." 4419 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4420 Inst.setOpcode(ARM::tADDi3); 4421 break; 4422 case ARM::tSUBi8: 4423 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4424 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4425 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4426 // to encoding T1 if <Rd> is omitted." 4427 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4428 Inst.setOpcode(ARM::tSUBi3); 4429 break; 4430 case ARM::tB: 4431 // A Thumb conditional branch outside of an IT block is a tBcc. 4432 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4433 Inst.setOpcode(ARM::tBcc); 4434 break; 4435 case ARM::t2B: 4436 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4437 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4438 Inst.setOpcode(ARM::t2Bcc); 4439 break; 4440 case ARM::t2Bcc: 4441 // If the conditional is AL or we're in an IT block, we really want t2B. 4442 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4443 Inst.setOpcode(ARM::t2B); 4444 break; 4445 case ARM::tBcc: 4446 // If the conditional is AL, we really want tB. 4447 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4448 Inst.setOpcode(ARM::tB); 4449 break; 4450 case ARM::tLDMIA: { 4451 // If the register list contains any high registers, or if the writeback 4452 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4453 // instead if we're in Thumb2. Otherwise, this should have generated 4454 // an error in validateInstruction(). 4455 unsigned Rn = Inst.getOperand(0).getReg(); 4456 bool hasWritebackToken = 4457 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4458 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4459 bool listContainsBase; 4460 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4461 (!listContainsBase && !hasWritebackToken) || 4462 (listContainsBase && hasWritebackToken)) { 4463 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4464 assert (isThumbTwo()); 4465 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4466 // If we're switching to the updating version, we need to insert 4467 // the writeback tied operand. 4468 if (hasWritebackToken) 4469 Inst.insert(Inst.begin(), 4470 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4471 } 4472 break; 4473 } 4474 case ARM::tSTMIA_UPD: { 4475 // If the register list contains any high registers, we need to use 4476 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4477 // should have generated an error in validateInstruction(). 4478 unsigned Rn = Inst.getOperand(0).getReg(); 4479 bool listContainsBase; 4480 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4481 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4482 assert (isThumbTwo()); 4483 Inst.setOpcode(ARM::t2STMIA_UPD); 4484 } 4485 break; 4486 } 4487 case ARM::t2MOVi: { 4488 // If we can use the 16-bit encoding and the user didn't explicitly 4489 // request the 32-bit variant, transform it here. 4490 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4491 Inst.getOperand(1).getImm() <= 255 && 4492 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4493 Inst.getOperand(4).getReg() == ARM::CPSR) || 4494 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4495 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4496 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4497 // The operands aren't in the same order for tMOVi8... 4498 MCInst TmpInst; 4499 TmpInst.setOpcode(ARM::tMOVi8); 4500 TmpInst.addOperand(Inst.getOperand(0)); 4501 TmpInst.addOperand(Inst.getOperand(4)); 4502 TmpInst.addOperand(Inst.getOperand(1)); 4503 TmpInst.addOperand(Inst.getOperand(2)); 4504 TmpInst.addOperand(Inst.getOperand(3)); 4505 Inst = TmpInst; 4506 } 4507 break; 4508 } 4509 case ARM::t2MOVr: { 4510 // If we can use the 16-bit encoding and the user didn't explicitly 4511 // request the 32-bit variant, transform it here. 4512 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4513 isARMLowRegister(Inst.getOperand(1).getReg()) && 4514 Inst.getOperand(2).getImm() == ARMCC::AL && 4515 Inst.getOperand(4).getReg() == ARM::CPSR && 4516 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4517 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4518 // The operands aren't the same for tMOV[S]r... (no cc_out) 4519 MCInst TmpInst; 4520 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4521 TmpInst.addOperand(Inst.getOperand(0)); 4522 TmpInst.addOperand(Inst.getOperand(1)); 4523 TmpInst.addOperand(Inst.getOperand(2)); 4524 TmpInst.addOperand(Inst.getOperand(3)); 4525 Inst = TmpInst; 4526 } 4527 break; 4528 } 4529 case ARM::t2SXTH: 4530 case ARM::t2SXTB: 4531 case ARM::t2UXTH: 4532 case ARM::t2UXTB: { 4533 // If we can use the 16-bit encoding and the user didn't explicitly 4534 // request the 32-bit variant, transform it here. 4535 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4536 isARMLowRegister(Inst.getOperand(1).getReg()) && 4537 Inst.getOperand(2).getImm() == 0 && 4538 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4539 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4540 unsigned NewOpc; 4541 switch (Inst.getOpcode()) { 4542 default: llvm_unreachable("Illegal opcode!"); 4543 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4544 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4545 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4546 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4547 } 4548 // The operands aren't the same for thumb1 (no rotate operand). 4549 MCInst TmpInst; 4550 TmpInst.setOpcode(NewOpc); 4551 TmpInst.addOperand(Inst.getOperand(0)); 4552 TmpInst.addOperand(Inst.getOperand(1)); 4553 TmpInst.addOperand(Inst.getOperand(3)); 4554 TmpInst.addOperand(Inst.getOperand(4)); 4555 Inst = TmpInst; 4556 } 4557 break; 4558 } 4559 case ARM::t2IT: { 4560 // The mask bits for all but the first condition are represented as 4561 // the low bit of the condition code value implies 't'. We currently 4562 // always have 1 implies 't', so XOR toggle the bits if the low bit 4563 // of the condition code is zero. The encoding also expects the low 4564 // bit of the condition to be encoded as bit 4 of the mask operand, 4565 // so mask that in if needed 4566 MCOperand &MO = Inst.getOperand(1); 4567 unsigned Mask = MO.getImm(); 4568 unsigned OrigMask = Mask; 4569 unsigned TZ = CountTrailingZeros_32(Mask); 4570 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4571 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4572 for (unsigned i = 3; i != TZ; --i) 4573 Mask ^= 1 << i; 4574 } else 4575 Mask |= 0x10; 4576 MO.setImm(Mask); 4577 4578 // Set up the IT block state according to the IT instruction we just 4579 // matched. 4580 assert(!inITBlock() && "nested IT blocks?!"); 4581 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4582 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4583 ITState.CurPosition = 0; 4584 ITState.FirstCond = true; 4585 break; 4586 } 4587 } 4588} 4589 4590unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4591 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4592 // suffix depending on whether they're in an IT block or not. 4593 unsigned Opc = Inst.getOpcode(); 4594 MCInstrDesc &MCID = getInstDesc(Opc); 4595 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4596 assert(MCID.hasOptionalDef() && 4597 "optionally flag setting instruction missing optional def operand"); 4598 assert(MCID.NumOperands == Inst.getNumOperands() && 4599 "operand count mismatch!"); 4600 // Find the optional-def operand (cc_out). 4601 unsigned OpNo; 4602 for (OpNo = 0; 4603 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4604 ++OpNo) 4605 ; 4606 // If we're parsing Thumb1, reject it completely. 4607 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4608 return Match_MnemonicFail; 4609 // If we're parsing Thumb2, which form is legal depends on whether we're 4610 // in an IT block. 4611 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4612 !inITBlock()) 4613 return Match_RequiresITBlock; 4614 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4615 inITBlock()) 4616 return Match_RequiresNotITBlock; 4617 } 4618 // Some high-register supporting Thumb1 encodings only allow both registers 4619 // to be from r0-r7 when in Thumb2. 4620 else if (Opc == ARM::tADDhirr && isThumbOne() && 4621 isARMLowRegister(Inst.getOperand(1).getReg()) && 4622 isARMLowRegister(Inst.getOperand(2).getReg())) 4623 return Match_RequiresThumb2; 4624 // Others only require ARMv6 or later. 4625 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4626 isARMLowRegister(Inst.getOperand(0).getReg()) && 4627 isARMLowRegister(Inst.getOperand(1).getReg())) 4628 return Match_RequiresV6; 4629 return Match_Success; 4630} 4631 4632bool ARMAsmParser:: 4633MatchAndEmitInstruction(SMLoc IDLoc, 4634 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4635 MCStreamer &Out) { 4636 MCInst Inst; 4637 unsigned ErrorInfo; 4638 unsigned MatchResult; 4639 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4640 switch (MatchResult) { 4641 default: break; 4642 case Match_Success: 4643 // Context sensitive operand constraints aren't handled by the matcher, 4644 // so check them here. 4645 if (validateInstruction(Inst, Operands)) { 4646 // Still progress the IT block, otherwise one wrong condition causes 4647 // nasty cascading errors. 4648 forwardITPosition(); 4649 return true; 4650 } 4651 4652 // Some instructions need post-processing to, for example, tweak which 4653 // encoding is selected. 4654 processInstruction(Inst, Operands); 4655 4656 // Only move forward at the very end so that everything in validate 4657 // and process gets a consistent answer about whether we're in an IT 4658 // block. 4659 forwardITPosition(); 4660 4661 Out.EmitInstruction(Inst); 4662 return false; 4663 case Match_MissingFeature: 4664 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4665 return true; 4666 case Match_InvalidOperand: { 4667 SMLoc ErrorLoc = IDLoc; 4668 if (ErrorInfo != ~0U) { 4669 if (ErrorInfo >= Operands.size()) 4670 return Error(IDLoc, "too few operands for instruction"); 4671 4672 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4673 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4674 } 4675 4676 return Error(ErrorLoc, "invalid operand for instruction"); 4677 } 4678 case Match_MnemonicFail: 4679 return Error(IDLoc, "invalid instruction"); 4680 case Match_ConversionFail: 4681 // The converter function will have already emited a diagnostic. 4682 return true; 4683 case Match_RequiresNotITBlock: 4684 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4685 case Match_RequiresITBlock: 4686 return Error(IDLoc, "instruction only valid inside IT block"); 4687 case Match_RequiresV6: 4688 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4689 case Match_RequiresThumb2: 4690 return Error(IDLoc, "instruction variant requires Thumb2"); 4691 } 4692 4693 llvm_unreachable("Implement any new match types added!"); 4694 return true; 4695} 4696 4697/// parseDirective parses the arm specific directives 4698bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4699 StringRef IDVal = DirectiveID.getIdentifier(); 4700 if (IDVal == ".word") 4701 return parseDirectiveWord(4, DirectiveID.getLoc()); 4702 else if (IDVal == ".thumb") 4703 return parseDirectiveThumb(DirectiveID.getLoc()); 4704 else if (IDVal == ".thumb_func") 4705 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4706 else if (IDVal == ".code") 4707 return parseDirectiveCode(DirectiveID.getLoc()); 4708 else if (IDVal == ".syntax") 4709 return parseDirectiveSyntax(DirectiveID.getLoc()); 4710 return true; 4711} 4712 4713/// parseDirectiveWord 4714/// ::= .word [ expression (, expression)* ] 4715bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4716 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4717 for (;;) { 4718 const MCExpr *Value; 4719 if (getParser().ParseExpression(Value)) 4720 return true; 4721 4722 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4723 4724 if (getLexer().is(AsmToken::EndOfStatement)) 4725 break; 4726 4727 // FIXME: Improve diagnostic. 4728 if (getLexer().isNot(AsmToken::Comma)) 4729 return Error(L, "unexpected token in directive"); 4730 Parser.Lex(); 4731 } 4732 } 4733 4734 Parser.Lex(); 4735 return false; 4736} 4737 4738/// parseDirectiveThumb 4739/// ::= .thumb 4740bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4741 if (getLexer().isNot(AsmToken::EndOfStatement)) 4742 return Error(L, "unexpected token in directive"); 4743 Parser.Lex(); 4744 4745 // TODO: set thumb mode 4746 // TODO: tell the MC streamer the mode 4747 // getParser().getStreamer().Emit???(); 4748 return false; 4749} 4750 4751/// parseDirectiveThumbFunc 4752/// ::= .thumbfunc symbol_name 4753bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4754 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4755 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4756 StringRef Name; 4757 4758 // Darwin asm has function name after .thumb_func direction 4759 // ELF doesn't 4760 if (isMachO) { 4761 const AsmToken &Tok = Parser.getTok(); 4762 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4763 return Error(L, "unexpected token in .thumb_func directive"); 4764 Name = Tok.getString(); 4765 Parser.Lex(); // Consume the identifier token. 4766 } 4767 4768 if (getLexer().isNot(AsmToken::EndOfStatement)) 4769 return Error(L, "unexpected token in directive"); 4770 Parser.Lex(); 4771 4772 // FIXME: assuming function name will be the line following .thumb_func 4773 if (!isMachO) { 4774 Name = Parser.getTok().getString(); 4775 } 4776 4777 // Mark symbol as a thumb symbol. 4778 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4779 getParser().getStreamer().EmitThumbFunc(Func); 4780 return false; 4781} 4782 4783/// parseDirectiveSyntax 4784/// ::= .syntax unified | divided 4785bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4786 const AsmToken &Tok = Parser.getTok(); 4787 if (Tok.isNot(AsmToken::Identifier)) 4788 return Error(L, "unexpected token in .syntax directive"); 4789 StringRef Mode = Tok.getString(); 4790 if (Mode == "unified" || Mode == "UNIFIED") 4791 Parser.Lex(); 4792 else if (Mode == "divided" || Mode == "DIVIDED") 4793 return Error(L, "'.syntax divided' arm asssembly not supported"); 4794 else 4795 return Error(L, "unrecognized syntax mode in .syntax directive"); 4796 4797 if (getLexer().isNot(AsmToken::EndOfStatement)) 4798 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4799 Parser.Lex(); 4800 4801 // TODO tell the MC streamer the mode 4802 // getParser().getStreamer().Emit???(); 4803 return false; 4804} 4805 4806/// parseDirectiveCode 4807/// ::= .code 16 | 32 4808bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4809 const AsmToken &Tok = Parser.getTok(); 4810 if (Tok.isNot(AsmToken::Integer)) 4811 return Error(L, "unexpected token in .code directive"); 4812 int64_t Val = Parser.getTok().getIntVal(); 4813 if (Val == 16) 4814 Parser.Lex(); 4815 else if (Val == 32) 4816 Parser.Lex(); 4817 else 4818 return Error(L, "invalid operand to .code directive"); 4819 4820 if (getLexer().isNot(AsmToken::EndOfStatement)) 4821 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4822 Parser.Lex(); 4823 4824 if (Val == 16) { 4825 if (!isThumb()) 4826 SwitchMode(); 4827 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4828 } else { 4829 if (isThumb()) 4830 SwitchMode(); 4831 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4832 } 4833 4834 return false; 4835} 4836 4837extern "C" void LLVMInitializeARMAsmLexer(); 4838 4839/// Force static initialization. 4840extern "C" void LLVMInitializeARMAsmParser() { 4841 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4842 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4843 LLVMInitializeARMAsmLexer(); 4844} 4845 4846#define GET_REGISTER_MATCHER 4847#define GET_MATCHER_IMPLEMENTATION 4848#include "ARMGenAsmMatcher.inc" 4849