ARMAsmParser.cpp revision 71810ab7c0ecd6927dde1eee0c73169642f3764d
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42class ARMAsmParser : public MCTargetAsmParser { 43 MCSubtargetInfo &STI; 44 MCAsmParser &Parser; 45 46 struct { 47 ARMCC::CondCodes Cond; // Condition for IT block. 48 unsigned Mask:4; // Condition mask for instructions. 49 // Starting at first 1 (from lsb). 50 // '1' condition as indicated in IT. 51 // '0' inverse of condition (else). 52 // Count of instructions in IT block is 53 // 4 - trailingzeroes(mask) 54 55 bool FirstCond; // Explicit flag for when we're parsing the 56 // First instruction in the IT block. It's 57 // implied in the mask, so needs special 58 // handling. 59 60 unsigned CurPosition; // Current position in parsing of IT 61 // block. In range [0,3]. Initialized 62 // according to count of instructions in block. 63 // ~0U if no active IT block. 64 } ITState; 65 bool inITBlock() { return ITState.CurPosition != ~0U;} 66 void forwardITPosition() { 67 if (!inITBlock()) return; 68 // Move to the next instruction in the IT block, if there is one. If not, 69 // mark the block as done. 70 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 71 if (++ITState.CurPosition == 5 - TZ) 72 ITState.CurPosition = ~0U; // Done with the IT block after this. 73 } 74 75 76 MCAsmParser &getParser() const { return Parser; } 77 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 78 79 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 80 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 81 82 int tryParseRegister(); 83 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 84 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 85 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 88 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 89 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 90 unsigned &ShiftAmount); 91 bool parseDirectiveWord(unsigned Size, SMLoc L); 92 bool parseDirectiveThumb(SMLoc L); 93 bool parseDirectiveThumbFunc(SMLoc L); 94 bool parseDirectiveCode(SMLoc L); 95 bool parseDirectiveSyntax(SMLoc L); 96 97 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 98 bool &CarrySetting, unsigned &ProcessorIMod, 99 StringRef &ITMask); 100 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 101 bool &CanAcceptPredicationCode); 102 103 bool isThumb() const { 104 // FIXME: Can tablegen auto-generate this? 105 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 106 } 107 bool isThumbOne() const { 108 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 109 } 110 bool isThumbTwo() const { 111 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 112 } 113 bool hasV6Ops() const { 114 return STI.getFeatureBits() & ARM::HasV6Ops; 115 } 116 bool hasV7Ops() const { 117 return STI.getFeatureBits() & ARM::HasV7Ops; 118 } 119 void SwitchMode() { 120 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 121 setAvailableFeatures(FB); 122 } 123 bool isMClass() const { 124 return STI.getFeatureBits() & ARM::FeatureMClass; 125 } 126 127 /// @name Auto-generated Match Functions 128 /// { 129 130#define GET_ASSEMBLER_HEADER 131#include "ARMGenAsmMatcher.inc" 132 133 /// } 134 135 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 136 OperandMatchResultTy parseCoprocNumOperand( 137 SmallVectorImpl<MCParsedAsmOperand*>&); 138 OperandMatchResultTy parseCoprocRegOperand( 139 SmallVectorImpl<MCParsedAsmOperand*>&); 140 OperandMatchResultTy parseCoprocOptionOperand( 141 SmallVectorImpl<MCParsedAsmOperand*>&); 142 OperandMatchResultTy parseMemBarrierOptOperand( 143 SmallVectorImpl<MCParsedAsmOperand*>&); 144 OperandMatchResultTy parseProcIFlagsOperand( 145 SmallVectorImpl<MCParsedAsmOperand*>&); 146 OperandMatchResultTy parseMSRMaskOperand( 147 SmallVectorImpl<MCParsedAsmOperand*>&); 148 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 149 StringRef Op, int Low, int High); 150 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 151 return parsePKHImm(O, "lsl", 0, 31); 152 } 153 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 154 return parsePKHImm(O, "asr", 1, 32); 155 } 156 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 157 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 164 165 // Asm Match Converter Methods 166 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 167 const SmallVectorImpl<MCParsedAsmOperand*> &); 168 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 169 const SmallVectorImpl<MCParsedAsmOperand*> &); 170 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 209 bool validateInstruction(MCInst &Inst, 210 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 211 void processInstruction(MCInst &Inst, 212 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 213 bool shouldOmitCCOutOperand(StringRef Mnemonic, 214 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 215 216public: 217 enum ARMMatchResultTy { 218 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 219 Match_RequiresNotITBlock, 220 Match_RequiresV6, 221 Match_RequiresThumb2 222 }; 223 224 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 225 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 226 MCAsmParserExtension::Initialize(_Parser); 227 228 // Initialize the set of available features. 229 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 230 231 // Not in an ITBlock to start with. 232 ITState.CurPosition = ~0U; 233 } 234 235 // Implementation of the MCTargetAsmParser interface: 236 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 237 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 238 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 239 bool ParseDirective(AsmToken DirectiveID); 240 241 unsigned checkTargetMatchPredicate(MCInst &Inst); 242 243 bool MatchAndEmitInstruction(SMLoc IDLoc, 244 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 245 MCStreamer &Out); 246}; 247} // end anonymous namespace 248 249namespace { 250 251/// ARMOperand - Instances of this class represent a parsed ARM machine 252/// instruction. 253class ARMOperand : public MCParsedAsmOperand { 254 enum KindTy { 255 k_CondCode, 256 k_CCOut, 257 k_ITCondMask, 258 k_CoprocNum, 259 k_CoprocReg, 260 k_CoprocOption, 261 k_Immediate, 262 k_FPImmediate, 263 k_MemBarrierOpt, 264 k_Memory, 265 k_PostIndexRegister, 266 k_MSRMask, 267 k_ProcIFlags, 268 k_VectorIndex, 269 k_Register, 270 k_RegisterList, 271 k_DPRRegisterList, 272 k_SPRRegisterList, 273 k_VectorList, 274 k_ShiftedRegister, 275 k_ShiftedImmediate, 276 k_ShifterImmediate, 277 k_RotateImmediate, 278 k_BitfieldDescriptor, 279 k_Token 280 } Kind; 281 282 SMLoc StartLoc, EndLoc; 283 SmallVector<unsigned, 8> Registers; 284 285 union { 286 struct { 287 ARMCC::CondCodes Val; 288 } CC; 289 290 struct { 291 unsigned Val; 292 } Cop; 293 294 struct { 295 unsigned Val; 296 } CoprocOption; 297 298 struct { 299 unsigned Mask:4; 300 } ITMask; 301 302 struct { 303 ARM_MB::MemBOpt Val; 304 } MBOpt; 305 306 struct { 307 ARM_PROC::IFlags Val; 308 } IFlags; 309 310 struct { 311 unsigned Val; 312 } MMask; 313 314 struct { 315 const char *Data; 316 unsigned Length; 317 } Tok; 318 319 struct { 320 unsigned RegNum; 321 } Reg; 322 323 // A vector register list is a sequential list of 1 to 4 registers. 324 struct { 325 unsigned RegNum; 326 unsigned Count; 327 } VectorList; 328 329 struct { 330 unsigned Val; 331 } VectorIndex; 332 333 struct { 334 const MCExpr *Val; 335 } Imm; 336 337 struct { 338 unsigned Val; // encoded 8-bit representation 339 } FPImm; 340 341 /// Combined record for all forms of ARM address expressions. 342 struct { 343 unsigned BaseRegNum; 344 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 345 // was specified. 346 const MCConstantExpr *OffsetImm; // Offset immediate value 347 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 348 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 349 unsigned ShiftImm; // shift for OffsetReg. 350 unsigned Alignment; // 0 = no alignment specified 351 // n = alignment in bytes (8, 16, or 32) 352 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 353 } Memory; 354 355 struct { 356 unsigned RegNum; 357 bool isAdd; 358 ARM_AM::ShiftOpc ShiftTy; 359 unsigned ShiftImm; 360 } PostIdxReg; 361 362 struct { 363 bool isASR; 364 unsigned Imm; 365 } ShifterImm; 366 struct { 367 ARM_AM::ShiftOpc ShiftTy; 368 unsigned SrcReg; 369 unsigned ShiftReg; 370 unsigned ShiftImm; 371 } RegShiftedReg; 372 struct { 373 ARM_AM::ShiftOpc ShiftTy; 374 unsigned SrcReg; 375 unsigned ShiftImm; 376 } RegShiftedImm; 377 struct { 378 unsigned Imm; 379 } RotImm; 380 struct { 381 unsigned LSB; 382 unsigned Width; 383 } Bitfield; 384 }; 385 386 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 387public: 388 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 389 Kind = o.Kind; 390 StartLoc = o.StartLoc; 391 EndLoc = o.EndLoc; 392 switch (Kind) { 393 case k_CondCode: 394 CC = o.CC; 395 break; 396 case k_ITCondMask: 397 ITMask = o.ITMask; 398 break; 399 case k_Token: 400 Tok = o.Tok; 401 break; 402 case k_CCOut: 403 case k_Register: 404 Reg = o.Reg; 405 break; 406 case k_RegisterList: 407 case k_DPRRegisterList: 408 case k_SPRRegisterList: 409 Registers = o.Registers; 410 break; 411 case k_VectorList: 412 VectorList = o.VectorList; 413 break; 414 case k_CoprocNum: 415 case k_CoprocReg: 416 Cop = o.Cop; 417 break; 418 case k_CoprocOption: 419 CoprocOption = o.CoprocOption; 420 break; 421 case k_Immediate: 422 Imm = o.Imm; 423 break; 424 case k_FPImmediate: 425 FPImm = o.FPImm; 426 break; 427 case k_MemBarrierOpt: 428 MBOpt = o.MBOpt; 429 break; 430 case k_Memory: 431 Memory = o.Memory; 432 break; 433 case k_PostIndexRegister: 434 PostIdxReg = o.PostIdxReg; 435 break; 436 case k_MSRMask: 437 MMask = o.MMask; 438 break; 439 case k_ProcIFlags: 440 IFlags = o.IFlags; 441 break; 442 case k_ShifterImmediate: 443 ShifterImm = o.ShifterImm; 444 break; 445 case k_ShiftedRegister: 446 RegShiftedReg = o.RegShiftedReg; 447 break; 448 case k_ShiftedImmediate: 449 RegShiftedImm = o.RegShiftedImm; 450 break; 451 case k_RotateImmediate: 452 RotImm = o.RotImm; 453 break; 454 case k_BitfieldDescriptor: 455 Bitfield = o.Bitfield; 456 break; 457 case k_VectorIndex: 458 VectorIndex = o.VectorIndex; 459 break; 460 } 461 } 462 463 /// getStartLoc - Get the location of the first token of this operand. 464 SMLoc getStartLoc() const { return StartLoc; } 465 /// getEndLoc - Get the location of the last token of this operand. 466 SMLoc getEndLoc() const { return EndLoc; } 467 468 ARMCC::CondCodes getCondCode() const { 469 assert(Kind == k_CondCode && "Invalid access!"); 470 return CC.Val; 471 } 472 473 unsigned getCoproc() const { 474 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 475 return Cop.Val; 476 } 477 478 StringRef getToken() const { 479 assert(Kind == k_Token && "Invalid access!"); 480 return StringRef(Tok.Data, Tok.Length); 481 } 482 483 unsigned getReg() const { 484 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 485 return Reg.RegNum; 486 } 487 488 const SmallVectorImpl<unsigned> &getRegList() const { 489 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 490 Kind == k_SPRRegisterList) && "Invalid access!"); 491 return Registers; 492 } 493 494 const MCExpr *getImm() const { 495 assert(Kind == k_Immediate && "Invalid access!"); 496 return Imm.Val; 497 } 498 499 unsigned getFPImm() const { 500 assert(Kind == k_FPImmediate && "Invalid access!"); 501 return FPImm.Val; 502 } 503 504 unsigned getVectorIndex() const { 505 assert(Kind == k_VectorIndex && "Invalid access!"); 506 return VectorIndex.Val; 507 } 508 509 ARM_MB::MemBOpt getMemBarrierOpt() const { 510 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 511 return MBOpt.Val; 512 } 513 514 ARM_PROC::IFlags getProcIFlags() const { 515 assert(Kind == k_ProcIFlags && "Invalid access!"); 516 return IFlags.Val; 517 } 518 519 unsigned getMSRMask() const { 520 assert(Kind == k_MSRMask && "Invalid access!"); 521 return MMask.Val; 522 } 523 524 bool isCoprocNum() const { return Kind == k_CoprocNum; } 525 bool isCoprocReg() const { return Kind == k_CoprocReg; } 526 bool isCoprocOption() const { return Kind == k_CoprocOption; } 527 bool isCondCode() const { return Kind == k_CondCode; } 528 bool isCCOut() const { return Kind == k_CCOut; } 529 bool isITMask() const { return Kind == k_ITCondMask; } 530 bool isITCondCode() const { return Kind == k_CondCode; } 531 bool isImm() const { return Kind == k_Immediate; } 532 bool isFPImm() const { return Kind == k_FPImmediate; } 533 bool isImm8s4() const { 534 if (Kind != k_Immediate) 535 return false; 536 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 537 if (!CE) return false; 538 int64_t Value = CE->getValue(); 539 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 540 } 541 bool isImm0_1020s4() const { 542 if (Kind != k_Immediate) 543 return false; 544 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 545 if (!CE) return false; 546 int64_t Value = CE->getValue(); 547 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 548 } 549 bool isImm0_508s4() const { 550 if (Kind != k_Immediate) 551 return false; 552 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 553 if (!CE) return false; 554 int64_t Value = CE->getValue(); 555 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 556 } 557 bool isImm0_255() const { 558 if (Kind != k_Immediate) 559 return false; 560 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 561 if (!CE) return false; 562 int64_t Value = CE->getValue(); 563 return Value >= 0 && Value < 256; 564 } 565 bool isImm0_7() const { 566 if (Kind != k_Immediate) 567 return false; 568 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 569 if (!CE) return false; 570 int64_t Value = CE->getValue(); 571 return Value >= 0 && Value < 8; 572 } 573 bool isImm0_15() const { 574 if (Kind != k_Immediate) 575 return false; 576 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 577 if (!CE) return false; 578 int64_t Value = CE->getValue(); 579 return Value >= 0 && Value < 16; 580 } 581 bool isImm0_31() const { 582 if (Kind != k_Immediate) 583 return false; 584 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 585 if (!CE) return false; 586 int64_t Value = CE->getValue(); 587 return Value >= 0 && Value < 32; 588 } 589 bool isImm1_16() const { 590 if (Kind != k_Immediate) 591 return false; 592 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 593 if (!CE) return false; 594 int64_t Value = CE->getValue(); 595 return Value > 0 && Value < 17; 596 } 597 bool isImm1_32() const { 598 if (Kind != k_Immediate) 599 return false; 600 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 601 if (!CE) return false; 602 int64_t Value = CE->getValue(); 603 return Value > 0 && Value < 33; 604 } 605 bool isImm0_65535() const { 606 if (Kind != k_Immediate) 607 return false; 608 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 609 if (!CE) return false; 610 int64_t Value = CE->getValue(); 611 return Value >= 0 && Value < 65536; 612 } 613 bool isImm0_65535Expr() const { 614 if (Kind != k_Immediate) 615 return false; 616 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 617 // If it's not a constant expression, it'll generate a fixup and be 618 // handled later. 619 if (!CE) return true; 620 int64_t Value = CE->getValue(); 621 return Value >= 0 && Value < 65536; 622 } 623 bool isImm24bit() const { 624 if (Kind != k_Immediate) 625 return false; 626 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 627 if (!CE) return false; 628 int64_t Value = CE->getValue(); 629 return Value >= 0 && Value <= 0xffffff; 630 } 631 bool isImmThumbSR() const { 632 if (Kind != k_Immediate) 633 return false; 634 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 635 if (!CE) return false; 636 int64_t Value = CE->getValue(); 637 return Value > 0 && Value < 33; 638 } 639 bool isPKHLSLImm() const { 640 if (Kind != k_Immediate) 641 return false; 642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 643 if (!CE) return false; 644 int64_t Value = CE->getValue(); 645 return Value >= 0 && Value < 32; 646 } 647 bool isPKHASRImm() const { 648 if (Kind != k_Immediate) 649 return false; 650 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 651 if (!CE) return false; 652 int64_t Value = CE->getValue(); 653 return Value > 0 && Value <= 32; 654 } 655 bool isARMSOImm() const { 656 if (Kind != k_Immediate) 657 return false; 658 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 659 if (!CE) return false; 660 int64_t Value = CE->getValue(); 661 return ARM_AM::getSOImmVal(Value) != -1; 662 } 663 bool isARMSOImmNot() const { 664 if (Kind != k_Immediate) 665 return false; 666 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 667 if (!CE) return false; 668 int64_t Value = CE->getValue(); 669 return ARM_AM::getSOImmVal(~Value) != -1; 670 } 671 bool isT2SOImm() const { 672 if (Kind != k_Immediate) 673 return false; 674 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 675 if (!CE) return false; 676 int64_t Value = CE->getValue(); 677 return ARM_AM::getT2SOImmVal(Value) != -1; 678 } 679 bool isT2SOImmNot() const { 680 if (Kind != k_Immediate) 681 return false; 682 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 683 if (!CE) return false; 684 int64_t Value = CE->getValue(); 685 return ARM_AM::getT2SOImmVal(~Value) != -1; 686 } 687 bool isSetEndImm() const { 688 if (Kind != k_Immediate) 689 return false; 690 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 691 if (!CE) return false; 692 int64_t Value = CE->getValue(); 693 return Value == 1 || Value == 0; 694 } 695 bool isReg() const { return Kind == k_Register; } 696 bool isRegList() const { return Kind == k_RegisterList; } 697 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 698 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 699 bool isToken() const { return Kind == k_Token; } 700 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 701 bool isMemory() const { return Kind == k_Memory; } 702 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 703 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 704 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 705 bool isRotImm() const { return Kind == k_RotateImmediate; } 706 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 707 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 708 bool isPostIdxReg() const { 709 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 710 } 711 bool isMemNoOffset(bool alignOK = false) const { 712 if (!isMemory()) 713 return false; 714 // No offset of any kind. 715 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 716 (alignOK || Memory.Alignment == 0); 717 } 718 bool isAlignedMemory() const { 719 return isMemNoOffset(true); 720 } 721 bool isAddrMode2() const { 722 if (!isMemory() || Memory.Alignment != 0) return false; 723 // Check for register offset. 724 if (Memory.OffsetRegNum) return true; 725 // Immediate offset in range [-4095, 4095]. 726 if (!Memory.OffsetImm) return true; 727 int64_t Val = Memory.OffsetImm->getValue(); 728 return Val > -4096 && Val < 4096; 729 } 730 bool isAM2OffsetImm() const { 731 if (Kind != k_Immediate) 732 return false; 733 // Immediate offset in range [-4095, 4095]. 734 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 735 if (!CE) return false; 736 int64_t Val = CE->getValue(); 737 return Val > -4096 && Val < 4096; 738 } 739 bool isAddrMode3() const { 740 if (!isMemory() || Memory.Alignment != 0) return false; 741 // No shifts are legal for AM3. 742 if (Memory.ShiftType != ARM_AM::no_shift) return false; 743 // Check for register offset. 744 if (Memory.OffsetRegNum) return true; 745 // Immediate offset in range [-255, 255]. 746 if (!Memory.OffsetImm) return true; 747 int64_t Val = Memory.OffsetImm->getValue(); 748 return Val > -256 && Val < 256; 749 } 750 bool isAM3Offset() const { 751 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 752 return false; 753 if (Kind == k_PostIndexRegister) 754 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 755 // Immediate offset in range [-255, 255]. 756 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 757 if (!CE) return false; 758 int64_t Val = CE->getValue(); 759 // Special case, #-0 is INT32_MIN. 760 return (Val > -256 && Val < 256) || Val == INT32_MIN; 761 } 762 bool isAddrMode5() const { 763 // If we have an immediate that's not a constant, treat it as a label 764 // reference needing a fixup. If it is a constant, it's something else 765 // and we reject it. 766 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 767 return true; 768 if (!isMemory() || Memory.Alignment != 0) return false; 769 // Check for register offset. 770 if (Memory.OffsetRegNum) return false; 771 // Immediate offset in range [-1020, 1020] and a multiple of 4. 772 if (!Memory.OffsetImm) return true; 773 int64_t Val = Memory.OffsetImm->getValue(); 774 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 775 Val == INT32_MIN; 776 } 777 bool isMemTBB() const { 778 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 779 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 780 return false; 781 return true; 782 } 783 bool isMemTBH() const { 784 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 785 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 786 Memory.Alignment != 0 ) 787 return false; 788 return true; 789 } 790 bool isMemRegOffset() const { 791 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 792 return false; 793 return true; 794 } 795 bool isT2MemRegOffset() const { 796 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 797 Memory.Alignment != 0) 798 return false; 799 // Only lsl #{0, 1, 2, 3} allowed. 800 if (Memory.ShiftType == ARM_AM::no_shift) 801 return true; 802 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 803 return false; 804 return true; 805 } 806 bool isMemThumbRR() const { 807 // Thumb reg+reg addressing is simple. Just two registers, a base and 808 // an offset. No shifts, negations or any other complicating factors. 809 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 810 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 811 return false; 812 return isARMLowRegister(Memory.BaseRegNum) && 813 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 814 } 815 bool isMemThumbRIs4() const { 816 if (!isMemory() || Memory.OffsetRegNum != 0 || 817 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 818 return false; 819 // Immediate offset, multiple of 4 in range [0, 124]. 820 if (!Memory.OffsetImm) return true; 821 int64_t Val = Memory.OffsetImm->getValue(); 822 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 823 } 824 bool isMemThumbRIs2() const { 825 if (!isMemory() || Memory.OffsetRegNum != 0 || 826 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 827 return false; 828 // Immediate offset, multiple of 4 in range [0, 62]. 829 if (!Memory.OffsetImm) return true; 830 int64_t Val = Memory.OffsetImm->getValue(); 831 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 832 } 833 bool isMemThumbRIs1() const { 834 if (!isMemory() || Memory.OffsetRegNum != 0 || 835 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 836 return false; 837 // Immediate offset in range [0, 31]. 838 if (!Memory.OffsetImm) return true; 839 int64_t Val = Memory.OffsetImm->getValue(); 840 return Val >= 0 && Val <= 31; 841 } 842 bool isMemThumbSPI() const { 843 if (!isMemory() || Memory.OffsetRegNum != 0 || 844 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 845 return false; 846 // Immediate offset, multiple of 4 in range [0, 1020]. 847 if (!Memory.OffsetImm) return true; 848 int64_t Val = Memory.OffsetImm->getValue(); 849 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 850 } 851 bool isMemImm8s4Offset() const { 852 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 853 return false; 854 // Immediate offset a multiple of 4 in range [-1020, 1020]. 855 if (!Memory.OffsetImm) return true; 856 int64_t Val = Memory.OffsetImm->getValue(); 857 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 858 } 859 bool isMemImm0_1020s4Offset() const { 860 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 861 return false; 862 // Immediate offset a multiple of 4 in range [0, 1020]. 863 if (!Memory.OffsetImm) return true; 864 int64_t Val = Memory.OffsetImm->getValue(); 865 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 866 } 867 bool isMemImm8Offset() const { 868 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 869 return false; 870 // Immediate offset in range [-255, 255]. 871 if (!Memory.OffsetImm) return true; 872 int64_t Val = Memory.OffsetImm->getValue(); 873 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 874 } 875 bool isMemPosImm8Offset() const { 876 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 877 return false; 878 // Immediate offset in range [0, 255]. 879 if (!Memory.OffsetImm) return true; 880 int64_t Val = Memory.OffsetImm->getValue(); 881 return Val >= 0 && Val < 256; 882 } 883 bool isMemNegImm8Offset() const { 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [-255, -1]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return Val > -256 && Val < 0; 890 } 891 bool isMemUImm12Offset() const { 892 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 893 return false; 894 // Immediate offset in range [0, 4095]. 895 if (!Memory.OffsetImm) return true; 896 int64_t Val = Memory.OffsetImm->getValue(); 897 return (Val >= 0 && Val < 4096); 898 } 899 bool isMemImm12Offset() const { 900 // If we have an immediate that's not a constant, treat it as a label 901 // reference needing a fixup. If it is a constant, it's something else 902 // and we reject it. 903 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 904 return true; 905 906 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 907 return false; 908 // Immediate offset in range [-4095, 4095]. 909 if (!Memory.OffsetImm) return true; 910 int64_t Val = Memory.OffsetImm->getValue(); 911 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 912 } 913 bool isPostIdxImm8() const { 914 if (Kind != k_Immediate) 915 return false; 916 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 917 if (!CE) return false; 918 int64_t Val = CE->getValue(); 919 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 920 } 921 bool isPostIdxImm8s4() const { 922 if (Kind != k_Immediate) 923 return false; 924 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 925 if (!CE) return false; 926 int64_t Val = CE->getValue(); 927 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 928 (Val == INT32_MIN); 929 } 930 931 bool isMSRMask() const { return Kind == k_MSRMask; } 932 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 933 934 // NEON operands. 935 bool isVecListOneD() const { 936 if (Kind != k_VectorList) return false; 937 return VectorList.Count == 1; 938 } 939 940 bool isVecListTwoD() const { 941 if (Kind != k_VectorList) return false; 942 return VectorList.Count == 2; 943 } 944 945 bool isVecListThreeD() const { 946 if (Kind != k_VectorList) return false; 947 return VectorList.Count == 3; 948 } 949 950 bool isVecListFourD() const { 951 if (Kind != k_VectorList) return false; 952 return VectorList.Count == 4; 953 } 954 955 bool isVecListTwoQ() const { 956 if (Kind != k_VectorList) return false; 957 //FIXME: We haven't taught the parser to handle by-two register lists 958 // yet, so don't pretend to know one. 959 return VectorList.Count == 2 && false; 960 } 961 962 bool isVectorIndex8() const { 963 if (Kind != k_VectorIndex) return false; 964 return VectorIndex.Val < 8; 965 } 966 bool isVectorIndex16() const { 967 if (Kind != k_VectorIndex) return false; 968 return VectorIndex.Val < 4; 969 } 970 bool isVectorIndex32() const { 971 if (Kind != k_VectorIndex) return false; 972 return VectorIndex.Val < 2; 973 } 974 975 bool isNEONi8splat() const { 976 if (Kind != k_Immediate) 977 return false; 978 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 979 // Must be a constant. 980 if (!CE) return false; 981 int64_t Value = CE->getValue(); 982 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 983 // value. 984 return Value >= 0 && Value < 256; 985 } 986 987 bool isNEONi16splat() const { 988 if (Kind != k_Immediate) 989 return false; 990 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 991 // Must be a constant. 992 if (!CE) return false; 993 int64_t Value = CE->getValue(); 994 // i16 value in the range [0,255] or [0x0100, 0xff00] 995 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 996 } 997 998 bool isNEONi32splat() const { 999 if (Kind != k_Immediate) 1000 return false; 1001 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1002 // Must be a constant. 1003 if (!CE) return false; 1004 int64_t Value = CE->getValue(); 1005 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1006 return (Value >= 0 && Value < 256) || 1007 (Value >= 0x0100 && Value <= 0xff00) || 1008 (Value >= 0x010000 && Value <= 0xff0000) || 1009 (Value >= 0x01000000 && Value <= 0xff000000); 1010 } 1011 1012 bool isNEONi32vmov() const { 1013 if (Kind != k_Immediate) 1014 return false; 1015 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1016 // Must be a constant. 1017 if (!CE) return false; 1018 int64_t Value = CE->getValue(); 1019 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1020 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1021 return (Value >= 0 && Value < 256) || 1022 (Value >= 0x0100 && Value <= 0xff00) || 1023 (Value >= 0x010000 && Value <= 0xff0000) || 1024 (Value >= 0x01000000 && Value <= 0xff000000) || 1025 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1026 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1027 } 1028 1029 bool isNEONi64splat() const { 1030 if (Kind != k_Immediate) 1031 return false; 1032 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1033 // Must be a constant. 1034 if (!CE) return false; 1035 uint64_t Value = CE->getValue(); 1036 // i64 value with each byte being either 0 or 0xff. 1037 for (unsigned i = 0; i < 8; ++i) 1038 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1039 return true; 1040 } 1041 1042 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1043 // Add as immediates when possible. Null MCExpr = 0. 1044 if (Expr == 0) 1045 Inst.addOperand(MCOperand::CreateImm(0)); 1046 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1047 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1048 else 1049 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1050 } 1051 1052 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1053 assert(N == 2 && "Invalid number of operands!"); 1054 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1055 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1056 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1057 } 1058 1059 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1060 assert(N == 1 && "Invalid number of operands!"); 1061 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1062 } 1063 1064 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1065 assert(N == 1 && "Invalid number of operands!"); 1066 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1067 } 1068 1069 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1070 assert(N == 1 && "Invalid number of operands!"); 1071 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1072 } 1073 1074 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1075 assert(N == 1 && "Invalid number of operands!"); 1076 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1077 } 1078 1079 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1080 assert(N == 1 && "Invalid number of operands!"); 1081 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1082 } 1083 1084 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1085 assert(N == 1 && "Invalid number of operands!"); 1086 Inst.addOperand(MCOperand::CreateReg(getReg())); 1087 } 1088 1089 void addRegOperands(MCInst &Inst, unsigned N) const { 1090 assert(N == 1 && "Invalid number of operands!"); 1091 Inst.addOperand(MCOperand::CreateReg(getReg())); 1092 } 1093 1094 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1095 assert(N == 3 && "Invalid number of operands!"); 1096 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1097 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1098 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1099 Inst.addOperand(MCOperand::CreateImm( 1100 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1101 } 1102 1103 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1104 assert(N == 2 && "Invalid number of operands!"); 1105 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1106 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1107 Inst.addOperand(MCOperand::CreateImm( 1108 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1109 } 1110 1111 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1112 assert(N == 1 && "Invalid number of operands!"); 1113 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1114 ShifterImm.Imm)); 1115 } 1116 1117 void addRegListOperands(MCInst &Inst, unsigned N) const { 1118 assert(N == 1 && "Invalid number of operands!"); 1119 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1120 for (SmallVectorImpl<unsigned>::const_iterator 1121 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1122 Inst.addOperand(MCOperand::CreateReg(*I)); 1123 } 1124 1125 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1126 addRegListOperands(Inst, N); 1127 } 1128 1129 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1130 addRegListOperands(Inst, N); 1131 } 1132 1133 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1134 assert(N == 1 && "Invalid number of operands!"); 1135 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1136 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1137 } 1138 1139 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1140 assert(N == 1 && "Invalid number of operands!"); 1141 // Munge the lsb/width into a bitfield mask. 1142 unsigned lsb = Bitfield.LSB; 1143 unsigned width = Bitfield.Width; 1144 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1145 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1146 (32 - (lsb + width))); 1147 Inst.addOperand(MCOperand::CreateImm(Mask)); 1148 } 1149 1150 void addImmOperands(MCInst &Inst, unsigned N) const { 1151 assert(N == 1 && "Invalid number of operands!"); 1152 addExpr(Inst, getImm()); 1153 } 1154 1155 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1156 assert(N == 1 && "Invalid number of operands!"); 1157 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1158 } 1159 1160 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 // FIXME: We really want to scale the value here, but the LDRD/STRD 1163 // instruction don't encode operands that way yet. 1164 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1165 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1166 } 1167 1168 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1169 assert(N == 1 && "Invalid number of operands!"); 1170 // The immediate is scaled by four in the encoding and is stored 1171 // in the MCInst as such. Lop off the low two bits here. 1172 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1173 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1174 } 1175 1176 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1177 assert(N == 1 && "Invalid number of operands!"); 1178 // The immediate is scaled by four in the encoding and is stored 1179 // in the MCInst as such. Lop off the low two bits here. 1180 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1181 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1182 } 1183 1184 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1185 assert(N == 1 && "Invalid number of operands!"); 1186 addExpr(Inst, getImm()); 1187 } 1188 1189 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1190 assert(N == 1 && "Invalid number of operands!"); 1191 addExpr(Inst, getImm()); 1192 } 1193 1194 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1195 assert(N == 1 && "Invalid number of operands!"); 1196 addExpr(Inst, getImm()); 1197 } 1198 1199 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1200 assert(N == 1 && "Invalid number of operands!"); 1201 addExpr(Inst, getImm()); 1202 } 1203 1204 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1205 assert(N == 1 && "Invalid number of operands!"); 1206 // The constant encodes as the immediate-1, and we store in the instruction 1207 // the bits as encoded, so subtract off one here. 1208 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1209 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1210 } 1211 1212 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1213 assert(N == 1 && "Invalid number of operands!"); 1214 // The constant encodes as the immediate-1, and we store in the instruction 1215 // the bits as encoded, so subtract off one here. 1216 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1217 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1218 } 1219 1220 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1221 assert(N == 1 && "Invalid number of operands!"); 1222 addExpr(Inst, getImm()); 1223 } 1224 1225 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1226 assert(N == 1 && "Invalid number of operands!"); 1227 addExpr(Inst, getImm()); 1228 } 1229 1230 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1231 assert(N == 1 && "Invalid number of operands!"); 1232 addExpr(Inst, getImm()); 1233 } 1234 1235 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1236 assert(N == 1 && "Invalid number of operands!"); 1237 // The constant encodes as the immediate, except for 32, which encodes as 1238 // zero. 1239 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1240 unsigned Imm = CE->getValue(); 1241 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1242 } 1243 1244 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1245 assert(N == 1 && "Invalid number of operands!"); 1246 addExpr(Inst, getImm()); 1247 } 1248 1249 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1250 assert(N == 1 && "Invalid number of operands!"); 1251 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1252 // the instruction as well. 1253 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1254 int Val = CE->getValue(); 1255 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1256 } 1257 1258 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1259 assert(N == 1 && "Invalid number of operands!"); 1260 addExpr(Inst, getImm()); 1261 } 1262 1263 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1264 assert(N == 1 && "Invalid number of operands!"); 1265 addExpr(Inst, getImm()); 1266 } 1267 1268 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1269 assert(N == 1 && "Invalid number of operands!"); 1270 // The operand is actually a t2_so_imm, but we have its bitwise 1271 // negation in the assembly source, so twiddle it here. 1272 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1273 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1274 } 1275 1276 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1277 assert(N == 1 && "Invalid number of operands!"); 1278 // The operand is actually a so_imm, but we have its bitwise 1279 // negation in the assembly source, so twiddle it here. 1280 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1281 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1282 } 1283 1284 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1285 assert(N == 1 && "Invalid number of operands!"); 1286 addExpr(Inst, getImm()); 1287 } 1288 1289 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1290 assert(N == 1 && "Invalid number of operands!"); 1291 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1292 } 1293 1294 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1295 assert(N == 1 && "Invalid number of operands!"); 1296 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1297 } 1298 1299 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1300 assert(N == 2 && "Invalid number of operands!"); 1301 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1302 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1303 } 1304 1305 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1306 assert(N == 3 && "Invalid number of operands!"); 1307 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1308 if (!Memory.OffsetRegNum) { 1309 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1310 // Special case for #-0 1311 if (Val == INT32_MIN) Val = 0; 1312 if (Val < 0) Val = -Val; 1313 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1314 } else { 1315 // For register offset, we encode the shift type and negation flag 1316 // here. 1317 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1318 Memory.ShiftImm, Memory.ShiftType); 1319 } 1320 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1321 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1322 Inst.addOperand(MCOperand::CreateImm(Val)); 1323 } 1324 1325 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1326 assert(N == 2 && "Invalid number of operands!"); 1327 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1328 assert(CE && "non-constant AM2OffsetImm operand!"); 1329 int32_t Val = CE->getValue(); 1330 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1331 // Special case for #-0 1332 if (Val == INT32_MIN) Val = 0; 1333 if (Val < 0) Val = -Val; 1334 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1335 Inst.addOperand(MCOperand::CreateReg(0)); 1336 Inst.addOperand(MCOperand::CreateImm(Val)); 1337 } 1338 1339 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1340 assert(N == 3 && "Invalid number of operands!"); 1341 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1342 if (!Memory.OffsetRegNum) { 1343 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1344 // Special case for #-0 1345 if (Val == INT32_MIN) Val = 0; 1346 if (Val < 0) Val = -Val; 1347 Val = ARM_AM::getAM3Opc(AddSub, Val); 1348 } else { 1349 // For register offset, we encode the shift type and negation flag 1350 // here. 1351 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1352 } 1353 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1354 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1355 Inst.addOperand(MCOperand::CreateImm(Val)); 1356 } 1357 1358 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1359 assert(N == 2 && "Invalid number of operands!"); 1360 if (Kind == k_PostIndexRegister) { 1361 int32_t Val = 1362 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1363 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1364 Inst.addOperand(MCOperand::CreateImm(Val)); 1365 return; 1366 } 1367 1368 // Constant offset. 1369 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1370 int32_t Val = CE->getValue(); 1371 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1372 // Special case for #-0 1373 if (Val == INT32_MIN) Val = 0; 1374 if (Val < 0) Val = -Val; 1375 Val = ARM_AM::getAM3Opc(AddSub, Val); 1376 Inst.addOperand(MCOperand::CreateReg(0)); 1377 Inst.addOperand(MCOperand::CreateImm(Val)); 1378 } 1379 1380 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1381 assert(N == 2 && "Invalid number of operands!"); 1382 // If we have an immediate that's not a constant, treat it as a label 1383 // reference needing a fixup. If it is a constant, it's something else 1384 // and we reject it. 1385 if (isImm()) { 1386 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1387 Inst.addOperand(MCOperand::CreateImm(0)); 1388 return; 1389 } 1390 1391 // The lower two bits are always zero and as such are not encoded. 1392 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1393 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1394 // Special case for #-0 1395 if (Val == INT32_MIN) Val = 0; 1396 if (Val < 0) Val = -Val; 1397 Val = ARM_AM::getAM5Opc(AddSub, Val); 1398 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1399 Inst.addOperand(MCOperand::CreateImm(Val)); 1400 } 1401 1402 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1403 assert(N == 2 && "Invalid number of operands!"); 1404 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1405 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1406 Inst.addOperand(MCOperand::CreateImm(Val)); 1407 } 1408 1409 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1410 assert(N == 2 && "Invalid number of operands!"); 1411 // The lower two bits are always zero and as such are not encoded. 1412 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1413 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1414 Inst.addOperand(MCOperand::CreateImm(Val)); 1415 } 1416 1417 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1418 assert(N == 2 && "Invalid number of operands!"); 1419 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1420 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1421 Inst.addOperand(MCOperand::CreateImm(Val)); 1422 } 1423 1424 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1425 addMemImm8OffsetOperands(Inst, N); 1426 } 1427 1428 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1429 addMemImm8OffsetOperands(Inst, N); 1430 } 1431 1432 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1433 assert(N == 2 && "Invalid number of operands!"); 1434 // If this is an immediate, it's a label reference. 1435 if (Kind == k_Immediate) { 1436 addExpr(Inst, getImm()); 1437 Inst.addOperand(MCOperand::CreateImm(0)); 1438 return; 1439 } 1440 1441 // Otherwise, it's a normal memory reg+offset. 1442 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1443 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1444 Inst.addOperand(MCOperand::CreateImm(Val)); 1445 } 1446 1447 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1448 assert(N == 2 && "Invalid number of operands!"); 1449 // If this is an immediate, it's a label reference. 1450 if (Kind == k_Immediate) { 1451 addExpr(Inst, getImm()); 1452 Inst.addOperand(MCOperand::CreateImm(0)); 1453 return; 1454 } 1455 1456 // Otherwise, it's a normal memory reg+offset. 1457 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1458 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1459 Inst.addOperand(MCOperand::CreateImm(Val)); 1460 } 1461 1462 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1463 assert(N == 2 && "Invalid number of operands!"); 1464 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1465 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1466 } 1467 1468 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1469 assert(N == 2 && "Invalid number of operands!"); 1470 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1471 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1472 } 1473 1474 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1475 assert(N == 3 && "Invalid number of operands!"); 1476 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1477 Memory.ShiftImm, Memory.ShiftType); 1478 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1479 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1480 Inst.addOperand(MCOperand::CreateImm(Val)); 1481 } 1482 1483 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1484 assert(N == 3 && "Invalid number of operands!"); 1485 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1486 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1487 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1488 } 1489 1490 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1491 assert(N == 2 && "Invalid number of operands!"); 1492 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1493 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1494 } 1495 1496 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1497 assert(N == 2 && "Invalid number of operands!"); 1498 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1499 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1500 Inst.addOperand(MCOperand::CreateImm(Val)); 1501 } 1502 1503 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1504 assert(N == 2 && "Invalid number of operands!"); 1505 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1506 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1507 Inst.addOperand(MCOperand::CreateImm(Val)); 1508 } 1509 1510 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1511 assert(N == 2 && "Invalid number of operands!"); 1512 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1513 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1514 Inst.addOperand(MCOperand::CreateImm(Val)); 1515 } 1516 1517 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1518 assert(N == 2 && "Invalid number of operands!"); 1519 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1520 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1521 Inst.addOperand(MCOperand::CreateImm(Val)); 1522 } 1523 1524 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1525 assert(N == 1 && "Invalid number of operands!"); 1526 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1527 assert(CE && "non-constant post-idx-imm8 operand!"); 1528 int Imm = CE->getValue(); 1529 bool isAdd = Imm >= 0; 1530 if (Imm == INT32_MIN) Imm = 0; 1531 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1532 Inst.addOperand(MCOperand::CreateImm(Imm)); 1533 } 1534 1535 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1536 assert(N == 1 && "Invalid number of operands!"); 1537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1538 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1539 int Imm = CE->getValue(); 1540 bool isAdd = Imm >= 0; 1541 if (Imm == INT32_MIN) Imm = 0; 1542 // Immediate is scaled by 4. 1543 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1544 Inst.addOperand(MCOperand::CreateImm(Imm)); 1545 } 1546 1547 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1548 assert(N == 2 && "Invalid number of operands!"); 1549 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1550 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1551 } 1552 1553 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1554 assert(N == 2 && "Invalid number of operands!"); 1555 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1556 // The sign, shift type, and shift amount are encoded in a single operand 1557 // using the AM2 encoding helpers. 1558 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1559 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1560 PostIdxReg.ShiftTy); 1561 Inst.addOperand(MCOperand::CreateImm(Imm)); 1562 } 1563 1564 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1565 assert(N == 1 && "Invalid number of operands!"); 1566 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1567 } 1568 1569 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1570 assert(N == 1 && "Invalid number of operands!"); 1571 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1572 } 1573 1574 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1575 assert(N == 1 && "Invalid number of operands!"); 1576 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1577 } 1578 1579 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1580 assert(N == 1 && "Invalid number of operands!"); 1581 // Only the first register actually goes on the instruction. The rest 1582 // are implied by the opcode. 1583 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1584 } 1585 1586 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1587 assert(N == 1 && "Invalid number of operands!"); 1588 // Only the first register actually goes on the instruction. The rest 1589 // are implied by the opcode. 1590 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1591 } 1592 1593 void addVecListFourDOperands(MCInst &Inst, unsigned N) const { 1594 assert(N == 1 && "Invalid number of operands!"); 1595 // Only the first register actually goes on the instruction. The rest 1596 // are implied by the opcode. 1597 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1598 } 1599 1600 void addVecListTwoQOperands(MCInst &Inst, unsigned N) const { 1601 assert(N == 1 && "Invalid number of operands!"); 1602 // Only the first register actually goes on the instruction. The rest 1603 // are implied by the opcode. 1604 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1605 } 1606 1607 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1608 assert(N == 1 && "Invalid number of operands!"); 1609 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1610 } 1611 1612 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1613 assert(N == 1 && "Invalid number of operands!"); 1614 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1615 } 1616 1617 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1618 assert(N == 1 && "Invalid number of operands!"); 1619 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1620 } 1621 1622 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1623 assert(N == 1 && "Invalid number of operands!"); 1624 // The immediate encodes the type of constant as well as the value. 1625 // Mask in that this is an i8 splat. 1626 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1627 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1628 } 1629 1630 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1631 assert(N == 1 && "Invalid number of operands!"); 1632 // The immediate encodes the type of constant as well as the value. 1633 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1634 unsigned Value = CE->getValue(); 1635 if (Value >= 256) 1636 Value = (Value >> 8) | 0xa00; 1637 else 1638 Value |= 0x800; 1639 Inst.addOperand(MCOperand::CreateImm(Value)); 1640 } 1641 1642 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1643 assert(N == 1 && "Invalid number of operands!"); 1644 // The immediate encodes the type of constant as well as the value. 1645 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1646 unsigned Value = CE->getValue(); 1647 if (Value >= 256 && Value <= 0xff00) 1648 Value = (Value >> 8) | 0x200; 1649 else if (Value > 0xffff && Value <= 0xff0000) 1650 Value = (Value >> 16) | 0x400; 1651 else if (Value > 0xffffff) 1652 Value = (Value >> 24) | 0x600; 1653 Inst.addOperand(MCOperand::CreateImm(Value)); 1654 } 1655 1656 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1657 assert(N == 1 && "Invalid number of operands!"); 1658 // The immediate encodes the type of constant as well as the value. 1659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1660 unsigned Value = CE->getValue(); 1661 if (Value >= 256 && Value <= 0xffff) 1662 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1663 else if (Value > 0xffff && Value <= 0xffffff) 1664 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1665 else if (Value > 0xffffff) 1666 Value = (Value >> 24) | 0x600; 1667 Inst.addOperand(MCOperand::CreateImm(Value)); 1668 } 1669 1670 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1671 assert(N == 1 && "Invalid number of operands!"); 1672 // The immediate encodes the type of constant as well as the value. 1673 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1674 uint64_t Value = CE->getValue(); 1675 unsigned Imm = 0; 1676 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1677 Imm |= (Value & 1) << i; 1678 } 1679 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1680 } 1681 1682 virtual void print(raw_ostream &OS) const; 1683 1684 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1685 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1686 Op->ITMask.Mask = Mask; 1687 Op->StartLoc = S; 1688 Op->EndLoc = S; 1689 return Op; 1690 } 1691 1692 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1693 ARMOperand *Op = new ARMOperand(k_CondCode); 1694 Op->CC.Val = CC; 1695 Op->StartLoc = S; 1696 Op->EndLoc = S; 1697 return Op; 1698 } 1699 1700 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1701 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1702 Op->Cop.Val = CopVal; 1703 Op->StartLoc = S; 1704 Op->EndLoc = S; 1705 return Op; 1706 } 1707 1708 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1709 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1710 Op->Cop.Val = CopVal; 1711 Op->StartLoc = S; 1712 Op->EndLoc = S; 1713 return Op; 1714 } 1715 1716 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1717 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1718 Op->Cop.Val = Val; 1719 Op->StartLoc = S; 1720 Op->EndLoc = E; 1721 return Op; 1722 } 1723 1724 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1725 ARMOperand *Op = new ARMOperand(k_CCOut); 1726 Op->Reg.RegNum = RegNum; 1727 Op->StartLoc = S; 1728 Op->EndLoc = S; 1729 return Op; 1730 } 1731 1732 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1733 ARMOperand *Op = new ARMOperand(k_Token); 1734 Op->Tok.Data = Str.data(); 1735 Op->Tok.Length = Str.size(); 1736 Op->StartLoc = S; 1737 Op->EndLoc = S; 1738 return Op; 1739 } 1740 1741 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1742 ARMOperand *Op = new ARMOperand(k_Register); 1743 Op->Reg.RegNum = RegNum; 1744 Op->StartLoc = S; 1745 Op->EndLoc = E; 1746 return Op; 1747 } 1748 1749 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1750 unsigned SrcReg, 1751 unsigned ShiftReg, 1752 unsigned ShiftImm, 1753 SMLoc S, SMLoc E) { 1754 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1755 Op->RegShiftedReg.ShiftTy = ShTy; 1756 Op->RegShiftedReg.SrcReg = SrcReg; 1757 Op->RegShiftedReg.ShiftReg = ShiftReg; 1758 Op->RegShiftedReg.ShiftImm = ShiftImm; 1759 Op->StartLoc = S; 1760 Op->EndLoc = E; 1761 return Op; 1762 } 1763 1764 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1765 unsigned SrcReg, 1766 unsigned ShiftImm, 1767 SMLoc S, SMLoc E) { 1768 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1769 Op->RegShiftedImm.ShiftTy = ShTy; 1770 Op->RegShiftedImm.SrcReg = SrcReg; 1771 Op->RegShiftedImm.ShiftImm = ShiftImm; 1772 Op->StartLoc = S; 1773 Op->EndLoc = E; 1774 return Op; 1775 } 1776 1777 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1778 SMLoc S, SMLoc E) { 1779 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1780 Op->ShifterImm.isASR = isASR; 1781 Op->ShifterImm.Imm = Imm; 1782 Op->StartLoc = S; 1783 Op->EndLoc = E; 1784 return Op; 1785 } 1786 1787 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1788 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1789 Op->RotImm.Imm = Imm; 1790 Op->StartLoc = S; 1791 Op->EndLoc = E; 1792 return Op; 1793 } 1794 1795 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1796 SMLoc S, SMLoc E) { 1797 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1798 Op->Bitfield.LSB = LSB; 1799 Op->Bitfield.Width = Width; 1800 Op->StartLoc = S; 1801 Op->EndLoc = E; 1802 return Op; 1803 } 1804 1805 static ARMOperand * 1806 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1807 SMLoc StartLoc, SMLoc EndLoc) { 1808 KindTy Kind = k_RegisterList; 1809 1810 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1811 Kind = k_DPRRegisterList; 1812 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1813 contains(Regs.front().first)) 1814 Kind = k_SPRRegisterList; 1815 1816 ARMOperand *Op = new ARMOperand(Kind); 1817 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1818 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1819 Op->Registers.push_back(I->first); 1820 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1821 Op->StartLoc = StartLoc; 1822 Op->EndLoc = EndLoc; 1823 return Op; 1824 } 1825 1826 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1827 SMLoc S, SMLoc E) { 1828 ARMOperand *Op = new ARMOperand(k_VectorList); 1829 Op->VectorList.RegNum = RegNum; 1830 Op->VectorList.Count = Count; 1831 Op->StartLoc = S; 1832 Op->EndLoc = E; 1833 return Op; 1834 } 1835 1836 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1837 MCContext &Ctx) { 1838 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1839 Op->VectorIndex.Val = Idx; 1840 Op->StartLoc = S; 1841 Op->EndLoc = E; 1842 return Op; 1843 } 1844 1845 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1846 ARMOperand *Op = new ARMOperand(k_Immediate); 1847 Op->Imm.Val = Val; 1848 Op->StartLoc = S; 1849 Op->EndLoc = E; 1850 return Op; 1851 } 1852 1853 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1854 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1855 Op->FPImm.Val = Val; 1856 Op->StartLoc = S; 1857 Op->EndLoc = S; 1858 return Op; 1859 } 1860 1861 static ARMOperand *CreateMem(unsigned BaseRegNum, 1862 const MCConstantExpr *OffsetImm, 1863 unsigned OffsetRegNum, 1864 ARM_AM::ShiftOpc ShiftType, 1865 unsigned ShiftImm, 1866 unsigned Alignment, 1867 bool isNegative, 1868 SMLoc S, SMLoc E) { 1869 ARMOperand *Op = new ARMOperand(k_Memory); 1870 Op->Memory.BaseRegNum = BaseRegNum; 1871 Op->Memory.OffsetImm = OffsetImm; 1872 Op->Memory.OffsetRegNum = OffsetRegNum; 1873 Op->Memory.ShiftType = ShiftType; 1874 Op->Memory.ShiftImm = ShiftImm; 1875 Op->Memory.Alignment = Alignment; 1876 Op->Memory.isNegative = isNegative; 1877 Op->StartLoc = S; 1878 Op->EndLoc = E; 1879 return Op; 1880 } 1881 1882 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1883 ARM_AM::ShiftOpc ShiftTy, 1884 unsigned ShiftImm, 1885 SMLoc S, SMLoc E) { 1886 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1887 Op->PostIdxReg.RegNum = RegNum; 1888 Op->PostIdxReg.isAdd = isAdd; 1889 Op->PostIdxReg.ShiftTy = ShiftTy; 1890 Op->PostIdxReg.ShiftImm = ShiftImm; 1891 Op->StartLoc = S; 1892 Op->EndLoc = E; 1893 return Op; 1894 } 1895 1896 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1897 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1898 Op->MBOpt.Val = Opt; 1899 Op->StartLoc = S; 1900 Op->EndLoc = S; 1901 return Op; 1902 } 1903 1904 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1905 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1906 Op->IFlags.Val = IFlags; 1907 Op->StartLoc = S; 1908 Op->EndLoc = S; 1909 return Op; 1910 } 1911 1912 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1913 ARMOperand *Op = new ARMOperand(k_MSRMask); 1914 Op->MMask.Val = MMask; 1915 Op->StartLoc = S; 1916 Op->EndLoc = S; 1917 return Op; 1918 } 1919}; 1920 1921} // end anonymous namespace. 1922 1923void ARMOperand::print(raw_ostream &OS) const { 1924 switch (Kind) { 1925 case k_FPImmediate: 1926 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1927 << ") >"; 1928 break; 1929 case k_CondCode: 1930 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1931 break; 1932 case k_CCOut: 1933 OS << "<ccout " << getReg() << ">"; 1934 break; 1935 case k_ITCondMask: { 1936 static const char *MaskStr[] = { 1937 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 1938 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 1939 }; 1940 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1941 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1942 break; 1943 } 1944 case k_CoprocNum: 1945 OS << "<coprocessor number: " << getCoproc() << ">"; 1946 break; 1947 case k_CoprocReg: 1948 OS << "<coprocessor register: " << getCoproc() << ">"; 1949 break; 1950 case k_CoprocOption: 1951 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1952 break; 1953 case k_MSRMask: 1954 OS << "<mask: " << getMSRMask() << ">"; 1955 break; 1956 case k_Immediate: 1957 getImm()->print(OS); 1958 break; 1959 case k_MemBarrierOpt: 1960 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1961 break; 1962 case k_Memory: 1963 OS << "<memory " 1964 << " base:" << Memory.BaseRegNum; 1965 OS << ">"; 1966 break; 1967 case k_PostIndexRegister: 1968 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1969 << PostIdxReg.RegNum; 1970 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1971 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1972 << PostIdxReg.ShiftImm; 1973 OS << ">"; 1974 break; 1975 case k_ProcIFlags: { 1976 OS << "<ARM_PROC::"; 1977 unsigned IFlags = getProcIFlags(); 1978 for (int i=2; i >= 0; --i) 1979 if (IFlags & (1 << i)) 1980 OS << ARM_PROC::IFlagsToString(1 << i); 1981 OS << ">"; 1982 break; 1983 } 1984 case k_Register: 1985 OS << "<register " << getReg() << ">"; 1986 break; 1987 case k_ShifterImmediate: 1988 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1989 << " #" << ShifterImm.Imm << ">"; 1990 break; 1991 case k_ShiftedRegister: 1992 OS << "<so_reg_reg " 1993 << RegShiftedReg.SrcReg 1994 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1995 << ", " << RegShiftedReg.ShiftReg << ", " 1996 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1997 << ">"; 1998 break; 1999 case k_ShiftedImmediate: 2000 OS << "<so_reg_imm " 2001 << RegShiftedImm.SrcReg 2002 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 2003 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 2004 << ">"; 2005 break; 2006 case k_RotateImmediate: 2007 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2008 break; 2009 case k_BitfieldDescriptor: 2010 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2011 << ", width: " << Bitfield.Width << ">"; 2012 break; 2013 case k_RegisterList: 2014 case k_DPRRegisterList: 2015 case k_SPRRegisterList: { 2016 OS << "<register_list "; 2017 2018 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2019 for (SmallVectorImpl<unsigned>::const_iterator 2020 I = RegList.begin(), E = RegList.end(); I != E; ) { 2021 OS << *I; 2022 if (++I < E) OS << ", "; 2023 } 2024 2025 OS << ">"; 2026 break; 2027 } 2028 case k_VectorList: 2029 OS << "<vector_list " << VectorList.Count << " * " 2030 << VectorList.RegNum << ">"; 2031 break; 2032 case k_Token: 2033 OS << "'" << getToken() << "'"; 2034 break; 2035 case k_VectorIndex: 2036 OS << "<vectorindex " << getVectorIndex() << ">"; 2037 break; 2038 } 2039} 2040 2041/// @name Auto-generated Match Functions 2042/// { 2043 2044static unsigned MatchRegisterName(StringRef Name); 2045 2046/// } 2047 2048bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2049 SMLoc &StartLoc, SMLoc &EndLoc) { 2050 RegNo = tryParseRegister(); 2051 2052 return (RegNo == (unsigned)-1); 2053} 2054 2055/// Try to parse a register name. The token must be an Identifier when called, 2056/// and if it is a register name the token is eaten and the register number is 2057/// returned. Otherwise return -1. 2058/// 2059int ARMAsmParser::tryParseRegister() { 2060 const AsmToken &Tok = Parser.getTok(); 2061 if (Tok.isNot(AsmToken::Identifier)) return -1; 2062 2063 // FIXME: Validate register for the current architecture; we have to do 2064 // validation later, so maybe there is no need for this here. 2065 std::string lowerCase = Tok.getString().lower(); 2066 unsigned RegNum = MatchRegisterName(lowerCase); 2067 if (!RegNum) { 2068 RegNum = StringSwitch<unsigned>(lowerCase) 2069 .Case("r13", ARM::SP) 2070 .Case("r14", ARM::LR) 2071 .Case("r15", ARM::PC) 2072 .Case("ip", ARM::R12) 2073 .Default(0); 2074 } 2075 if (!RegNum) return -1; 2076 2077 Parser.Lex(); // Eat identifier token. 2078 2079 return RegNum; 2080} 2081 2082// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2083// If a recoverable error occurs, return 1. If an irrecoverable error 2084// occurs, return -1. An irrecoverable error is one where tokens have been 2085// consumed in the process of trying to parse the shifter (i.e., when it is 2086// indeed a shifter operand, but malformed). 2087int ARMAsmParser::tryParseShiftRegister( 2088 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2089 SMLoc S = Parser.getTok().getLoc(); 2090 const AsmToken &Tok = Parser.getTok(); 2091 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2092 2093 std::string lowerCase = Tok.getString().lower(); 2094 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2095 .Case("lsl", ARM_AM::lsl) 2096 .Case("lsr", ARM_AM::lsr) 2097 .Case("asr", ARM_AM::asr) 2098 .Case("ror", ARM_AM::ror) 2099 .Case("rrx", ARM_AM::rrx) 2100 .Default(ARM_AM::no_shift); 2101 2102 if (ShiftTy == ARM_AM::no_shift) 2103 return 1; 2104 2105 Parser.Lex(); // Eat the operator. 2106 2107 // The source register for the shift has already been added to the 2108 // operand list, so we need to pop it off and combine it into the shifted 2109 // register operand instead. 2110 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2111 if (!PrevOp->isReg()) 2112 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2113 int SrcReg = PrevOp->getReg(); 2114 int64_t Imm = 0; 2115 int ShiftReg = 0; 2116 if (ShiftTy == ARM_AM::rrx) { 2117 // RRX Doesn't have an explicit shift amount. The encoder expects 2118 // the shift register to be the same as the source register. Seems odd, 2119 // but OK. 2120 ShiftReg = SrcReg; 2121 } else { 2122 // Figure out if this is shifted by a constant or a register (for non-RRX). 2123 if (Parser.getTok().is(AsmToken::Hash)) { 2124 Parser.Lex(); // Eat hash. 2125 SMLoc ImmLoc = Parser.getTok().getLoc(); 2126 const MCExpr *ShiftExpr = 0; 2127 if (getParser().ParseExpression(ShiftExpr)) { 2128 Error(ImmLoc, "invalid immediate shift value"); 2129 return -1; 2130 } 2131 // The expression must be evaluatable as an immediate. 2132 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2133 if (!CE) { 2134 Error(ImmLoc, "invalid immediate shift value"); 2135 return -1; 2136 } 2137 // Range check the immediate. 2138 // lsl, ror: 0 <= imm <= 31 2139 // lsr, asr: 0 <= imm <= 32 2140 Imm = CE->getValue(); 2141 if (Imm < 0 || 2142 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2143 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2144 Error(ImmLoc, "immediate shift value out of range"); 2145 return -1; 2146 } 2147 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2148 ShiftReg = tryParseRegister(); 2149 SMLoc L = Parser.getTok().getLoc(); 2150 if (ShiftReg == -1) { 2151 Error (L, "expected immediate or register in shift operand"); 2152 return -1; 2153 } 2154 } else { 2155 Error (Parser.getTok().getLoc(), 2156 "expected immediate or register in shift operand"); 2157 return -1; 2158 } 2159 } 2160 2161 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2162 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2163 ShiftReg, Imm, 2164 S, Parser.getTok().getLoc())); 2165 else 2166 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2167 S, Parser.getTok().getLoc())); 2168 2169 return 0; 2170} 2171 2172 2173/// Try to parse a register name. The token must be an Identifier when called. 2174/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2175/// if there is a "writeback". 'true' if it's not a register. 2176/// 2177/// TODO this is likely to change to allow different register types and or to 2178/// parse for a specific register type. 2179bool ARMAsmParser:: 2180tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2181 SMLoc S = Parser.getTok().getLoc(); 2182 int RegNo = tryParseRegister(); 2183 if (RegNo == -1) 2184 return true; 2185 2186 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2187 2188 const AsmToken &ExclaimTok = Parser.getTok(); 2189 if (ExclaimTok.is(AsmToken::Exclaim)) { 2190 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2191 ExclaimTok.getLoc())); 2192 Parser.Lex(); // Eat exclaim token 2193 return false; 2194 } 2195 2196 // Also check for an index operand. This is only legal for vector registers, 2197 // but that'll get caught OK in operand matching, so we don't need to 2198 // explicitly filter everything else out here. 2199 if (Parser.getTok().is(AsmToken::LBrac)) { 2200 SMLoc SIdx = Parser.getTok().getLoc(); 2201 Parser.Lex(); // Eat left bracket token. 2202 2203 const MCExpr *ImmVal; 2204 if (getParser().ParseExpression(ImmVal)) 2205 return MatchOperand_ParseFail; 2206 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2207 if (!MCE) { 2208 TokError("immediate value expected for vector index"); 2209 return MatchOperand_ParseFail; 2210 } 2211 2212 SMLoc E = Parser.getTok().getLoc(); 2213 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2214 Error(E, "']' expected"); 2215 return MatchOperand_ParseFail; 2216 } 2217 2218 Parser.Lex(); // Eat right bracket token. 2219 2220 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2221 SIdx, E, 2222 getContext())); 2223 } 2224 2225 return false; 2226} 2227 2228/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2229/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2230/// "c5", ... 2231static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2232 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2233 // but efficient. 2234 switch (Name.size()) { 2235 default: break; 2236 case 2: 2237 if (Name[0] != CoprocOp) 2238 return -1; 2239 switch (Name[1]) { 2240 default: return -1; 2241 case '0': return 0; 2242 case '1': return 1; 2243 case '2': return 2; 2244 case '3': return 3; 2245 case '4': return 4; 2246 case '5': return 5; 2247 case '6': return 6; 2248 case '7': return 7; 2249 case '8': return 8; 2250 case '9': return 9; 2251 } 2252 break; 2253 case 3: 2254 if (Name[0] != CoprocOp || Name[1] != '1') 2255 return -1; 2256 switch (Name[2]) { 2257 default: return -1; 2258 case '0': return 10; 2259 case '1': return 11; 2260 case '2': return 12; 2261 case '3': return 13; 2262 case '4': return 14; 2263 case '5': return 15; 2264 } 2265 break; 2266 } 2267 2268 return -1; 2269} 2270 2271/// parseITCondCode - Try to parse a condition code for an IT instruction. 2272ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2273parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2274 SMLoc S = Parser.getTok().getLoc(); 2275 const AsmToken &Tok = Parser.getTok(); 2276 if (!Tok.is(AsmToken::Identifier)) 2277 return MatchOperand_NoMatch; 2278 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2279 .Case("eq", ARMCC::EQ) 2280 .Case("ne", ARMCC::NE) 2281 .Case("hs", ARMCC::HS) 2282 .Case("cs", ARMCC::HS) 2283 .Case("lo", ARMCC::LO) 2284 .Case("cc", ARMCC::LO) 2285 .Case("mi", ARMCC::MI) 2286 .Case("pl", ARMCC::PL) 2287 .Case("vs", ARMCC::VS) 2288 .Case("vc", ARMCC::VC) 2289 .Case("hi", ARMCC::HI) 2290 .Case("ls", ARMCC::LS) 2291 .Case("ge", ARMCC::GE) 2292 .Case("lt", ARMCC::LT) 2293 .Case("gt", ARMCC::GT) 2294 .Case("le", ARMCC::LE) 2295 .Case("al", ARMCC::AL) 2296 .Default(~0U); 2297 if (CC == ~0U) 2298 return MatchOperand_NoMatch; 2299 Parser.Lex(); // Eat the token. 2300 2301 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2302 2303 return MatchOperand_Success; 2304} 2305 2306/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2307/// token must be an Identifier when called, and if it is a coprocessor 2308/// number, the token is eaten and the operand is added to the operand list. 2309ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2310parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2311 SMLoc S = Parser.getTok().getLoc(); 2312 const AsmToken &Tok = Parser.getTok(); 2313 if (Tok.isNot(AsmToken::Identifier)) 2314 return MatchOperand_NoMatch; 2315 2316 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2317 if (Num == -1) 2318 return MatchOperand_NoMatch; 2319 2320 Parser.Lex(); // Eat identifier token. 2321 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2322 return MatchOperand_Success; 2323} 2324 2325/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2326/// token must be an Identifier when called, and if it is a coprocessor 2327/// number, the token is eaten and the operand is added to the operand list. 2328ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2329parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2330 SMLoc S = Parser.getTok().getLoc(); 2331 const AsmToken &Tok = Parser.getTok(); 2332 if (Tok.isNot(AsmToken::Identifier)) 2333 return MatchOperand_NoMatch; 2334 2335 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2336 if (Reg == -1) 2337 return MatchOperand_NoMatch; 2338 2339 Parser.Lex(); // Eat identifier token. 2340 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2341 return MatchOperand_Success; 2342} 2343 2344/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2345/// coproc_option : '{' imm0_255 '}' 2346ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2347parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2348 SMLoc S = Parser.getTok().getLoc(); 2349 2350 // If this isn't a '{', this isn't a coprocessor immediate operand. 2351 if (Parser.getTok().isNot(AsmToken::LCurly)) 2352 return MatchOperand_NoMatch; 2353 Parser.Lex(); // Eat the '{' 2354 2355 const MCExpr *Expr; 2356 SMLoc Loc = Parser.getTok().getLoc(); 2357 if (getParser().ParseExpression(Expr)) { 2358 Error(Loc, "illegal expression"); 2359 return MatchOperand_ParseFail; 2360 } 2361 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2362 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2363 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2364 return MatchOperand_ParseFail; 2365 } 2366 int Val = CE->getValue(); 2367 2368 // Check for and consume the closing '}' 2369 if (Parser.getTok().isNot(AsmToken::RCurly)) 2370 return MatchOperand_ParseFail; 2371 SMLoc E = Parser.getTok().getLoc(); 2372 Parser.Lex(); // Eat the '}' 2373 2374 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2375 return MatchOperand_Success; 2376} 2377 2378// For register list parsing, we need to map from raw GPR register numbering 2379// to the enumeration values. The enumeration values aren't sorted by 2380// register number due to our using "sp", "lr" and "pc" as canonical names. 2381static unsigned getNextRegister(unsigned Reg) { 2382 // If this is a GPR, we need to do it manually, otherwise we can rely 2383 // on the sort ordering of the enumeration since the other reg-classes 2384 // are sane. 2385 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2386 return Reg + 1; 2387 switch(Reg) { 2388 default: assert(0 && "Invalid GPR number!"); 2389 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2390 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2391 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2392 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2393 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2394 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2395 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2396 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2397 } 2398} 2399 2400/// Parse a register list. 2401bool ARMAsmParser:: 2402parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2403 assert(Parser.getTok().is(AsmToken::LCurly) && 2404 "Token is not a Left Curly Brace"); 2405 SMLoc S = Parser.getTok().getLoc(); 2406 Parser.Lex(); // Eat '{' token. 2407 SMLoc RegLoc = Parser.getTok().getLoc(); 2408 2409 // Check the first register in the list to see what register class 2410 // this is a list of. 2411 int Reg = tryParseRegister(); 2412 if (Reg == -1) 2413 return Error(RegLoc, "register expected"); 2414 2415 const MCRegisterClass *RC; 2416 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2417 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2418 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2419 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2420 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2421 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2422 else 2423 return Error(RegLoc, "invalid register in register list"); 2424 2425 // The reglist instructions have at most 16 registers, so reserve 2426 // space for that many. 2427 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2428 // Store the first register. 2429 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2430 2431 // This starts immediately after the first register token in the list, 2432 // so we can see either a comma or a minus (range separator) as a legal 2433 // next token. 2434 while (Parser.getTok().is(AsmToken::Comma) || 2435 Parser.getTok().is(AsmToken::Minus)) { 2436 if (Parser.getTok().is(AsmToken::Minus)) { 2437 Parser.Lex(); // Eat the comma. 2438 SMLoc EndLoc = Parser.getTok().getLoc(); 2439 int EndReg = tryParseRegister(); 2440 if (EndReg == -1) 2441 return Error(EndLoc, "register expected"); 2442 // If the register is the same as the start reg, there's nothing 2443 // more to do. 2444 if (Reg == EndReg) 2445 continue; 2446 // The register must be in the same register class as the first. 2447 if (!RC->contains(EndReg)) 2448 return Error(EndLoc, "invalid register in register list"); 2449 // Ranges must go from low to high. 2450 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2451 return Error(EndLoc, "bad range in register list"); 2452 2453 // Add all the registers in the range to the register list. 2454 while (Reg != EndReg) { 2455 Reg = getNextRegister(Reg); 2456 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2457 } 2458 continue; 2459 } 2460 Parser.Lex(); // Eat the comma. 2461 RegLoc = Parser.getTok().getLoc(); 2462 int OldReg = Reg; 2463 Reg = tryParseRegister(); 2464 if (Reg == -1) 2465 return Error(RegLoc, "register expected"); 2466 // The register must be in the same register class as the first. 2467 if (!RC->contains(Reg)) 2468 return Error(RegLoc, "invalid register in register list"); 2469 // List must be monotonically increasing. 2470 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2471 return Error(RegLoc, "register list not in ascending order"); 2472 // VFP register lists must also be contiguous. 2473 // It's OK to use the enumeration values directly here rather, as the 2474 // VFP register classes have the enum sorted properly. 2475 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2476 Reg != OldReg + 1) 2477 return Error(RegLoc, "non-contiguous register range"); 2478 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2479 } 2480 2481 SMLoc E = Parser.getTok().getLoc(); 2482 if (Parser.getTok().isNot(AsmToken::RCurly)) 2483 return Error(E, "'}' expected"); 2484 Parser.Lex(); // Eat '}' token. 2485 2486 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2487 return false; 2488} 2489 2490// Return the low-subreg of a given Q register. 2491static unsigned getDRegFromQReg(unsigned QReg) { 2492 switch (QReg) { 2493 default: llvm_unreachable("expected a Q register!"); 2494 case ARM::Q0: return ARM::D0; 2495 case ARM::Q1: return ARM::D2; 2496 case ARM::Q2: return ARM::D4; 2497 case ARM::Q3: return ARM::D6; 2498 case ARM::Q4: return ARM::D8; 2499 case ARM::Q5: return ARM::D10; 2500 case ARM::Q6: return ARM::D12; 2501 case ARM::Q7: return ARM::D14; 2502 case ARM::Q8: return ARM::D16; 2503 case ARM::Q9: return ARM::D19; 2504 case ARM::Q10: return ARM::D20; 2505 case ARM::Q11: return ARM::D22; 2506 case ARM::Q12: return ARM::D24; 2507 case ARM::Q13: return ARM::D26; 2508 case ARM::Q14: return ARM::D28; 2509 case ARM::Q15: return ARM::D30; 2510 } 2511} 2512 2513// parse a vector register list 2514ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2515parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2516 if(Parser.getTok().isNot(AsmToken::LCurly)) 2517 return MatchOperand_NoMatch; 2518 2519 SMLoc S = Parser.getTok().getLoc(); 2520 Parser.Lex(); // Eat '{' token. 2521 SMLoc RegLoc = Parser.getTok().getLoc(); 2522 2523 int Reg = tryParseRegister(); 2524 if (Reg == -1) { 2525 Error(RegLoc, "register expected"); 2526 return MatchOperand_ParseFail; 2527 } 2528 unsigned Count = 1; 2529 unsigned FirstReg = Reg; 2530 // The list is of D registers, but we also allow Q regs and just interpret 2531 // them as the two D sub-registers. 2532 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2533 FirstReg = Reg = getDRegFromQReg(Reg); 2534 ++Reg; 2535 ++Count; 2536 } 2537 2538 while (Parser.getTok().is(AsmToken::Comma)) { 2539 Parser.Lex(); // Eat the comma. 2540 RegLoc = Parser.getTok().getLoc(); 2541 int OldReg = Reg; 2542 Reg = tryParseRegister(); 2543 if (Reg == -1) { 2544 Error(RegLoc, "register expected"); 2545 return MatchOperand_ParseFail; 2546 } 2547 // vector register lists must be contiguous. 2548 // It's OK to use the enumeration values directly here rather, as the 2549 // VFP register classes have the enum sorted properly. 2550 // 2551 // The list is of D registers, but we also allow Q regs and just interpret 2552 // them as the two D sub-registers. 2553 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2554 Reg = getDRegFromQReg(Reg); 2555 if (Reg != OldReg + 1) { 2556 Error(RegLoc, "non-contiguous register range"); 2557 return MatchOperand_ParseFail; 2558 } 2559 ++Reg; 2560 Count += 2; 2561 continue; 2562 } 2563 // Normal D register. Just check that it's contiguous and keep going. 2564 if (Reg != OldReg + 1) { 2565 Error(RegLoc, "non-contiguous register range"); 2566 return MatchOperand_ParseFail; 2567 } 2568 ++Count; 2569 } 2570 2571 SMLoc E = Parser.getTok().getLoc(); 2572 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2573 Error(E, "'}' expected"); 2574 return MatchOperand_ParseFail; 2575 } 2576 Parser.Lex(); // Eat '}' token. 2577 2578 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2579 return MatchOperand_Success; 2580} 2581 2582/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2583ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2584parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2585 SMLoc S = Parser.getTok().getLoc(); 2586 const AsmToken &Tok = Parser.getTok(); 2587 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2588 StringRef OptStr = Tok.getString(); 2589 2590 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2591 .Case("sy", ARM_MB::SY) 2592 .Case("st", ARM_MB::ST) 2593 .Case("sh", ARM_MB::ISH) 2594 .Case("ish", ARM_MB::ISH) 2595 .Case("shst", ARM_MB::ISHST) 2596 .Case("ishst", ARM_MB::ISHST) 2597 .Case("nsh", ARM_MB::NSH) 2598 .Case("un", ARM_MB::NSH) 2599 .Case("nshst", ARM_MB::NSHST) 2600 .Case("unst", ARM_MB::NSHST) 2601 .Case("osh", ARM_MB::OSH) 2602 .Case("oshst", ARM_MB::OSHST) 2603 .Default(~0U); 2604 2605 if (Opt == ~0U) 2606 return MatchOperand_NoMatch; 2607 2608 Parser.Lex(); // Eat identifier token. 2609 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2610 return MatchOperand_Success; 2611} 2612 2613/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2614ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2615parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2616 SMLoc S = Parser.getTok().getLoc(); 2617 const AsmToken &Tok = Parser.getTok(); 2618 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2619 StringRef IFlagsStr = Tok.getString(); 2620 2621 // An iflags string of "none" is interpreted to mean that none of the AIF 2622 // bits are set. Not a terribly useful instruction, but a valid encoding. 2623 unsigned IFlags = 0; 2624 if (IFlagsStr != "none") { 2625 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2626 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2627 .Case("a", ARM_PROC::A) 2628 .Case("i", ARM_PROC::I) 2629 .Case("f", ARM_PROC::F) 2630 .Default(~0U); 2631 2632 // If some specific iflag is already set, it means that some letter is 2633 // present more than once, this is not acceptable. 2634 if (Flag == ~0U || (IFlags & Flag)) 2635 return MatchOperand_NoMatch; 2636 2637 IFlags |= Flag; 2638 } 2639 } 2640 2641 Parser.Lex(); // Eat identifier token. 2642 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2643 return MatchOperand_Success; 2644} 2645 2646/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2647ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2648parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2649 SMLoc S = Parser.getTok().getLoc(); 2650 const AsmToken &Tok = Parser.getTok(); 2651 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2652 StringRef Mask = Tok.getString(); 2653 2654 if (isMClass()) { 2655 // See ARMv6-M 10.1.1 2656 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2657 .Case("apsr", 0) 2658 .Case("iapsr", 1) 2659 .Case("eapsr", 2) 2660 .Case("xpsr", 3) 2661 .Case("ipsr", 5) 2662 .Case("epsr", 6) 2663 .Case("iepsr", 7) 2664 .Case("msp", 8) 2665 .Case("psp", 9) 2666 .Case("primask", 16) 2667 .Case("basepri", 17) 2668 .Case("basepri_max", 18) 2669 .Case("faultmask", 19) 2670 .Case("control", 20) 2671 .Default(~0U); 2672 2673 if (FlagsVal == ~0U) 2674 return MatchOperand_NoMatch; 2675 2676 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2677 // basepri, basepri_max and faultmask only valid for V7m. 2678 return MatchOperand_NoMatch; 2679 2680 Parser.Lex(); // Eat identifier token. 2681 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2682 return MatchOperand_Success; 2683 } 2684 2685 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2686 size_t Start = 0, Next = Mask.find('_'); 2687 StringRef Flags = ""; 2688 std::string SpecReg = Mask.slice(Start, Next).lower(); 2689 if (Next != StringRef::npos) 2690 Flags = Mask.slice(Next+1, Mask.size()); 2691 2692 // FlagsVal contains the complete mask: 2693 // 3-0: Mask 2694 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2695 unsigned FlagsVal = 0; 2696 2697 if (SpecReg == "apsr") { 2698 FlagsVal = StringSwitch<unsigned>(Flags) 2699 .Case("nzcvq", 0x8) // same as CPSR_f 2700 .Case("g", 0x4) // same as CPSR_s 2701 .Case("nzcvqg", 0xc) // same as CPSR_fs 2702 .Default(~0U); 2703 2704 if (FlagsVal == ~0U) { 2705 if (!Flags.empty()) 2706 return MatchOperand_NoMatch; 2707 else 2708 FlagsVal = 8; // No flag 2709 } 2710 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2711 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2712 Flags = "fc"; 2713 for (int i = 0, e = Flags.size(); i != e; ++i) { 2714 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2715 .Case("c", 1) 2716 .Case("x", 2) 2717 .Case("s", 4) 2718 .Case("f", 8) 2719 .Default(~0U); 2720 2721 // If some specific flag is already set, it means that some letter is 2722 // present more than once, this is not acceptable. 2723 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2724 return MatchOperand_NoMatch; 2725 FlagsVal |= Flag; 2726 } 2727 } else // No match for special register. 2728 return MatchOperand_NoMatch; 2729 2730 // Special register without flags is NOT equivalent to "fc" flags. 2731 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2732 // two lines would enable gas compatibility at the expense of breaking 2733 // round-tripping. 2734 // 2735 // if (!FlagsVal) 2736 // FlagsVal = 0x9; 2737 2738 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2739 if (SpecReg == "spsr") 2740 FlagsVal |= 16; 2741 2742 Parser.Lex(); // Eat identifier token. 2743 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2744 return MatchOperand_Success; 2745} 2746 2747ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2748parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2749 int Low, int High) { 2750 const AsmToken &Tok = Parser.getTok(); 2751 if (Tok.isNot(AsmToken::Identifier)) { 2752 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2753 return MatchOperand_ParseFail; 2754 } 2755 StringRef ShiftName = Tok.getString(); 2756 std::string LowerOp = Op.lower(); 2757 std::string UpperOp = Op.upper(); 2758 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2759 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2760 return MatchOperand_ParseFail; 2761 } 2762 Parser.Lex(); // Eat shift type token. 2763 2764 // There must be a '#' and a shift amount. 2765 if (Parser.getTok().isNot(AsmToken::Hash)) { 2766 Error(Parser.getTok().getLoc(), "'#' expected"); 2767 return MatchOperand_ParseFail; 2768 } 2769 Parser.Lex(); // Eat hash token. 2770 2771 const MCExpr *ShiftAmount; 2772 SMLoc Loc = Parser.getTok().getLoc(); 2773 if (getParser().ParseExpression(ShiftAmount)) { 2774 Error(Loc, "illegal expression"); 2775 return MatchOperand_ParseFail; 2776 } 2777 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2778 if (!CE) { 2779 Error(Loc, "constant expression expected"); 2780 return MatchOperand_ParseFail; 2781 } 2782 int Val = CE->getValue(); 2783 if (Val < Low || Val > High) { 2784 Error(Loc, "immediate value out of range"); 2785 return MatchOperand_ParseFail; 2786 } 2787 2788 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2789 2790 return MatchOperand_Success; 2791} 2792 2793ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2794parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2795 const AsmToken &Tok = Parser.getTok(); 2796 SMLoc S = Tok.getLoc(); 2797 if (Tok.isNot(AsmToken::Identifier)) { 2798 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2799 return MatchOperand_ParseFail; 2800 } 2801 int Val = StringSwitch<int>(Tok.getString()) 2802 .Case("be", 1) 2803 .Case("le", 0) 2804 .Default(-1); 2805 Parser.Lex(); // Eat the token. 2806 2807 if (Val == -1) { 2808 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2809 return MatchOperand_ParseFail; 2810 } 2811 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2812 getContext()), 2813 S, Parser.getTok().getLoc())); 2814 return MatchOperand_Success; 2815} 2816 2817/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2818/// instructions. Legal values are: 2819/// lsl #n 'n' in [0,31] 2820/// asr #n 'n' in [1,32] 2821/// n == 32 encoded as n == 0. 2822ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2823parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2824 const AsmToken &Tok = Parser.getTok(); 2825 SMLoc S = Tok.getLoc(); 2826 if (Tok.isNot(AsmToken::Identifier)) { 2827 Error(S, "shift operator 'asr' or 'lsl' expected"); 2828 return MatchOperand_ParseFail; 2829 } 2830 StringRef ShiftName = Tok.getString(); 2831 bool isASR; 2832 if (ShiftName == "lsl" || ShiftName == "LSL") 2833 isASR = false; 2834 else if (ShiftName == "asr" || ShiftName == "ASR") 2835 isASR = true; 2836 else { 2837 Error(S, "shift operator 'asr' or 'lsl' expected"); 2838 return MatchOperand_ParseFail; 2839 } 2840 Parser.Lex(); // Eat the operator. 2841 2842 // A '#' and a shift amount. 2843 if (Parser.getTok().isNot(AsmToken::Hash)) { 2844 Error(Parser.getTok().getLoc(), "'#' expected"); 2845 return MatchOperand_ParseFail; 2846 } 2847 Parser.Lex(); // Eat hash token. 2848 2849 const MCExpr *ShiftAmount; 2850 SMLoc E = Parser.getTok().getLoc(); 2851 if (getParser().ParseExpression(ShiftAmount)) { 2852 Error(E, "malformed shift expression"); 2853 return MatchOperand_ParseFail; 2854 } 2855 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2856 if (!CE) { 2857 Error(E, "shift amount must be an immediate"); 2858 return MatchOperand_ParseFail; 2859 } 2860 2861 int64_t Val = CE->getValue(); 2862 if (isASR) { 2863 // Shift amount must be in [1,32] 2864 if (Val < 1 || Val > 32) { 2865 Error(E, "'asr' shift amount must be in range [1,32]"); 2866 return MatchOperand_ParseFail; 2867 } 2868 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2869 if (isThumb() && Val == 32) { 2870 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2871 return MatchOperand_ParseFail; 2872 } 2873 if (Val == 32) Val = 0; 2874 } else { 2875 // Shift amount must be in [1,32] 2876 if (Val < 0 || Val > 31) { 2877 Error(E, "'lsr' shift amount must be in range [0,31]"); 2878 return MatchOperand_ParseFail; 2879 } 2880 } 2881 2882 E = Parser.getTok().getLoc(); 2883 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2884 2885 return MatchOperand_Success; 2886} 2887 2888/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2889/// of instructions. Legal values are: 2890/// ror #n 'n' in {0, 8, 16, 24} 2891ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2892parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2893 const AsmToken &Tok = Parser.getTok(); 2894 SMLoc S = Tok.getLoc(); 2895 if (Tok.isNot(AsmToken::Identifier)) 2896 return MatchOperand_NoMatch; 2897 StringRef ShiftName = Tok.getString(); 2898 if (ShiftName != "ror" && ShiftName != "ROR") 2899 return MatchOperand_NoMatch; 2900 Parser.Lex(); // Eat the operator. 2901 2902 // A '#' and a rotate amount. 2903 if (Parser.getTok().isNot(AsmToken::Hash)) { 2904 Error(Parser.getTok().getLoc(), "'#' expected"); 2905 return MatchOperand_ParseFail; 2906 } 2907 Parser.Lex(); // Eat hash token. 2908 2909 const MCExpr *ShiftAmount; 2910 SMLoc E = Parser.getTok().getLoc(); 2911 if (getParser().ParseExpression(ShiftAmount)) { 2912 Error(E, "malformed rotate expression"); 2913 return MatchOperand_ParseFail; 2914 } 2915 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2916 if (!CE) { 2917 Error(E, "rotate amount must be an immediate"); 2918 return MatchOperand_ParseFail; 2919 } 2920 2921 int64_t Val = CE->getValue(); 2922 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2923 // normally, zero is represented in asm by omitting the rotate operand 2924 // entirely. 2925 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2926 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2927 return MatchOperand_ParseFail; 2928 } 2929 2930 E = Parser.getTok().getLoc(); 2931 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2932 2933 return MatchOperand_Success; 2934} 2935 2936ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2937parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2938 SMLoc S = Parser.getTok().getLoc(); 2939 // The bitfield descriptor is really two operands, the LSB and the width. 2940 if (Parser.getTok().isNot(AsmToken::Hash)) { 2941 Error(Parser.getTok().getLoc(), "'#' expected"); 2942 return MatchOperand_ParseFail; 2943 } 2944 Parser.Lex(); // Eat hash token. 2945 2946 const MCExpr *LSBExpr; 2947 SMLoc E = Parser.getTok().getLoc(); 2948 if (getParser().ParseExpression(LSBExpr)) { 2949 Error(E, "malformed immediate expression"); 2950 return MatchOperand_ParseFail; 2951 } 2952 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2953 if (!CE) { 2954 Error(E, "'lsb' operand must be an immediate"); 2955 return MatchOperand_ParseFail; 2956 } 2957 2958 int64_t LSB = CE->getValue(); 2959 // The LSB must be in the range [0,31] 2960 if (LSB < 0 || LSB > 31) { 2961 Error(E, "'lsb' operand must be in the range [0,31]"); 2962 return MatchOperand_ParseFail; 2963 } 2964 E = Parser.getTok().getLoc(); 2965 2966 // Expect another immediate operand. 2967 if (Parser.getTok().isNot(AsmToken::Comma)) { 2968 Error(Parser.getTok().getLoc(), "too few operands"); 2969 return MatchOperand_ParseFail; 2970 } 2971 Parser.Lex(); // Eat hash token. 2972 if (Parser.getTok().isNot(AsmToken::Hash)) { 2973 Error(Parser.getTok().getLoc(), "'#' expected"); 2974 return MatchOperand_ParseFail; 2975 } 2976 Parser.Lex(); // Eat hash token. 2977 2978 const MCExpr *WidthExpr; 2979 if (getParser().ParseExpression(WidthExpr)) { 2980 Error(E, "malformed immediate expression"); 2981 return MatchOperand_ParseFail; 2982 } 2983 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2984 if (!CE) { 2985 Error(E, "'width' operand must be an immediate"); 2986 return MatchOperand_ParseFail; 2987 } 2988 2989 int64_t Width = CE->getValue(); 2990 // The LSB must be in the range [1,32-lsb] 2991 if (Width < 1 || Width > 32 - LSB) { 2992 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2993 return MatchOperand_ParseFail; 2994 } 2995 E = Parser.getTok().getLoc(); 2996 2997 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2998 2999 return MatchOperand_Success; 3000} 3001 3002ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3003parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3004 // Check for a post-index addressing register operand. Specifically: 3005 // postidx_reg := '+' register {, shift} 3006 // | '-' register {, shift} 3007 // | register {, shift} 3008 3009 // This method must return MatchOperand_NoMatch without consuming any tokens 3010 // in the case where there is no match, as other alternatives take other 3011 // parse methods. 3012 AsmToken Tok = Parser.getTok(); 3013 SMLoc S = Tok.getLoc(); 3014 bool haveEaten = false; 3015 bool isAdd = true; 3016 int Reg = -1; 3017 if (Tok.is(AsmToken::Plus)) { 3018 Parser.Lex(); // Eat the '+' token. 3019 haveEaten = true; 3020 } else if (Tok.is(AsmToken::Minus)) { 3021 Parser.Lex(); // Eat the '-' token. 3022 isAdd = false; 3023 haveEaten = true; 3024 } 3025 if (Parser.getTok().is(AsmToken::Identifier)) 3026 Reg = tryParseRegister(); 3027 if (Reg == -1) { 3028 if (!haveEaten) 3029 return MatchOperand_NoMatch; 3030 Error(Parser.getTok().getLoc(), "register expected"); 3031 return MatchOperand_ParseFail; 3032 } 3033 SMLoc E = Parser.getTok().getLoc(); 3034 3035 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3036 unsigned ShiftImm = 0; 3037 if (Parser.getTok().is(AsmToken::Comma)) { 3038 Parser.Lex(); // Eat the ','. 3039 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3040 return MatchOperand_ParseFail; 3041 } 3042 3043 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3044 ShiftImm, S, E)); 3045 3046 return MatchOperand_Success; 3047} 3048 3049ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3050parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3051 // Check for a post-index addressing register operand. Specifically: 3052 // am3offset := '+' register 3053 // | '-' register 3054 // | register 3055 // | # imm 3056 // | # + imm 3057 // | # - imm 3058 3059 // This method must return MatchOperand_NoMatch without consuming any tokens 3060 // in the case where there is no match, as other alternatives take other 3061 // parse methods. 3062 AsmToken Tok = Parser.getTok(); 3063 SMLoc S = Tok.getLoc(); 3064 3065 // Do immediates first, as we always parse those if we have a '#'. 3066 if (Parser.getTok().is(AsmToken::Hash)) { 3067 Parser.Lex(); // Eat the '#'. 3068 // Explicitly look for a '-', as we need to encode negative zero 3069 // differently. 3070 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3071 const MCExpr *Offset; 3072 if (getParser().ParseExpression(Offset)) 3073 return MatchOperand_ParseFail; 3074 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3075 if (!CE) { 3076 Error(S, "constant expression expected"); 3077 return MatchOperand_ParseFail; 3078 } 3079 SMLoc E = Tok.getLoc(); 3080 // Negative zero is encoded as the flag value INT32_MIN. 3081 int32_t Val = CE->getValue(); 3082 if (isNegative && Val == 0) 3083 Val = INT32_MIN; 3084 3085 Operands.push_back( 3086 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3087 3088 return MatchOperand_Success; 3089 } 3090 3091 3092 bool haveEaten = false; 3093 bool isAdd = true; 3094 int Reg = -1; 3095 if (Tok.is(AsmToken::Plus)) { 3096 Parser.Lex(); // Eat the '+' token. 3097 haveEaten = true; 3098 } else if (Tok.is(AsmToken::Minus)) { 3099 Parser.Lex(); // Eat the '-' token. 3100 isAdd = false; 3101 haveEaten = true; 3102 } 3103 if (Parser.getTok().is(AsmToken::Identifier)) 3104 Reg = tryParseRegister(); 3105 if (Reg == -1) { 3106 if (!haveEaten) 3107 return MatchOperand_NoMatch; 3108 Error(Parser.getTok().getLoc(), "register expected"); 3109 return MatchOperand_ParseFail; 3110 } 3111 SMLoc E = Parser.getTok().getLoc(); 3112 3113 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3114 0, S, E)); 3115 3116 return MatchOperand_Success; 3117} 3118 3119/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3120/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3121/// when they refer multiple MIOperands inside a single one. 3122bool ARMAsmParser:: 3123cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3124 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3125 // Rt, Rt2 3126 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3127 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3128 // Create a writeback register dummy placeholder. 3129 Inst.addOperand(MCOperand::CreateReg(0)); 3130 // addr 3131 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3132 // pred 3133 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3134 return true; 3135} 3136 3137/// cvtT2StrdPre - Convert parsed operands to MCInst. 3138/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3139/// when they refer multiple MIOperands inside a single one. 3140bool ARMAsmParser:: 3141cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3142 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3143 // Create a writeback register dummy placeholder. 3144 Inst.addOperand(MCOperand::CreateReg(0)); 3145 // Rt, Rt2 3146 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3147 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3148 // addr 3149 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3150 // pred 3151 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3152 return true; 3153} 3154 3155/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3156/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3157/// when they refer multiple MIOperands inside a single one. 3158bool ARMAsmParser:: 3159cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3160 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3161 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3162 3163 // Create a writeback register dummy placeholder. 3164 Inst.addOperand(MCOperand::CreateImm(0)); 3165 3166 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3167 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3168 return true; 3169} 3170 3171/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3172/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3173/// when they refer multiple MIOperands inside a single one. 3174bool ARMAsmParser:: 3175cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3176 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3177 // Create a writeback register dummy placeholder. 3178 Inst.addOperand(MCOperand::CreateImm(0)); 3179 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3180 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3181 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3182 return true; 3183} 3184 3185/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3186/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3187/// when they refer multiple MIOperands inside a single one. 3188bool ARMAsmParser:: 3189cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3190 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3191 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3192 3193 // Create a writeback register dummy placeholder. 3194 Inst.addOperand(MCOperand::CreateImm(0)); 3195 3196 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3197 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3198 return true; 3199} 3200 3201/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3202/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3203/// when they refer multiple MIOperands inside a single one. 3204bool ARMAsmParser:: 3205cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3206 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3207 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3208 3209 // Create a writeback register dummy placeholder. 3210 Inst.addOperand(MCOperand::CreateImm(0)); 3211 3212 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3213 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3214 return true; 3215} 3216 3217 3218/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3219/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3220/// when they refer multiple MIOperands inside a single one. 3221bool ARMAsmParser:: 3222cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3223 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3224 // Create a writeback register dummy placeholder. 3225 Inst.addOperand(MCOperand::CreateImm(0)); 3226 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3227 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3228 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3229 return true; 3230} 3231 3232/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3233/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3234/// when they refer multiple MIOperands inside a single one. 3235bool ARMAsmParser:: 3236cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3237 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3238 // Create a writeback register dummy placeholder. 3239 Inst.addOperand(MCOperand::CreateImm(0)); 3240 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3241 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3242 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3243 return true; 3244} 3245 3246/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3247/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3248/// when they refer multiple MIOperands inside a single one. 3249bool ARMAsmParser:: 3250cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3251 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3252 // Create a writeback register dummy placeholder. 3253 Inst.addOperand(MCOperand::CreateImm(0)); 3254 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3255 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3256 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3257 return true; 3258} 3259 3260/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3261/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3262/// when they refer multiple MIOperands inside a single one. 3263bool ARMAsmParser:: 3264cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3265 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3266 // Rt 3267 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3268 // Create a writeback register dummy placeholder. 3269 Inst.addOperand(MCOperand::CreateImm(0)); 3270 // addr 3271 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3272 // offset 3273 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3274 // pred 3275 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3276 return true; 3277} 3278 3279/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3280/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3281/// when they refer multiple MIOperands inside a single one. 3282bool ARMAsmParser:: 3283cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3284 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3285 // Rt 3286 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3287 // Create a writeback register dummy placeholder. 3288 Inst.addOperand(MCOperand::CreateImm(0)); 3289 // addr 3290 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3291 // offset 3292 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3293 // pred 3294 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3295 return true; 3296} 3297 3298/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3299/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3300/// when they refer multiple MIOperands inside a single one. 3301bool ARMAsmParser:: 3302cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3303 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3304 // Create a writeback register dummy placeholder. 3305 Inst.addOperand(MCOperand::CreateImm(0)); 3306 // Rt 3307 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3308 // addr 3309 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3310 // offset 3311 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3312 // pred 3313 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3314 return true; 3315} 3316 3317/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3318/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3319/// when they refer multiple MIOperands inside a single one. 3320bool ARMAsmParser:: 3321cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3322 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3323 // Create a writeback register dummy placeholder. 3324 Inst.addOperand(MCOperand::CreateImm(0)); 3325 // Rt 3326 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3327 // addr 3328 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3329 // offset 3330 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3331 // pred 3332 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3333 return true; 3334} 3335 3336/// cvtLdrdPre - Convert parsed operands to MCInst. 3337/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3338/// when they refer multiple MIOperands inside a single one. 3339bool ARMAsmParser:: 3340cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3341 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3342 // Rt, Rt2 3343 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3344 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3345 // Create a writeback register dummy placeholder. 3346 Inst.addOperand(MCOperand::CreateImm(0)); 3347 // addr 3348 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3349 // pred 3350 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3351 return true; 3352} 3353 3354/// cvtStrdPre - Convert parsed operands to MCInst. 3355/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3356/// when they refer multiple MIOperands inside a single one. 3357bool ARMAsmParser:: 3358cvtStrdPre(MCInst &Inst, unsigned Opcode, 3359 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3360 // Create a writeback register dummy placeholder. 3361 Inst.addOperand(MCOperand::CreateImm(0)); 3362 // Rt, Rt2 3363 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3364 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3365 // addr 3366 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3367 // pred 3368 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3369 return true; 3370} 3371 3372/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3373/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3374/// when they refer multiple MIOperands inside a single one. 3375bool ARMAsmParser:: 3376cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3377 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3378 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3379 // Create a writeback register dummy placeholder. 3380 Inst.addOperand(MCOperand::CreateImm(0)); 3381 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3382 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3383 return true; 3384} 3385 3386/// cvtThumbMultiple- Convert parsed operands to MCInst. 3387/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3388/// when they refer multiple MIOperands inside a single one. 3389bool ARMAsmParser:: 3390cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3391 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3392 // The second source operand must be the same register as the destination 3393 // operand. 3394 if (Operands.size() == 6 && 3395 (((ARMOperand*)Operands[3])->getReg() != 3396 ((ARMOperand*)Operands[5])->getReg()) && 3397 (((ARMOperand*)Operands[3])->getReg() != 3398 ((ARMOperand*)Operands[4])->getReg())) { 3399 Error(Operands[3]->getStartLoc(), 3400 "destination register must match source register"); 3401 return false; 3402 } 3403 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3404 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3405 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3406 // If we have a three-operand form, use that, else the second source operand 3407 // is just the destination operand again. 3408 if (Operands.size() == 6) 3409 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3410 else 3411 Inst.addOperand(Inst.getOperand(0)); 3412 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3413 3414 return true; 3415} 3416 3417bool ARMAsmParser:: 3418cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3419 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3420 // Vd 3421 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3422 // Create a writeback register dummy placeholder. 3423 Inst.addOperand(MCOperand::CreateImm(0)); 3424 // Vn 3425 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3426 // pred 3427 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3428 return true; 3429} 3430 3431bool ARMAsmParser:: 3432cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3433 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3434 // Vd 3435 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3436 // Create a writeback register dummy placeholder. 3437 Inst.addOperand(MCOperand::CreateImm(0)); 3438 // Vn 3439 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3440 // Vm 3441 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3442 // pred 3443 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3444 return true; 3445} 3446 3447bool ARMAsmParser:: 3448cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3449 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3450 // Create a writeback register dummy placeholder. 3451 Inst.addOperand(MCOperand::CreateImm(0)); 3452 // Vn 3453 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3454 // Vt 3455 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3456 // pred 3457 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3458 return true; 3459} 3460 3461bool ARMAsmParser:: 3462cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3463 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3464 // Create a writeback register dummy placeholder. 3465 Inst.addOperand(MCOperand::CreateImm(0)); 3466 // Vn 3467 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3468 // Vm 3469 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3470 // Vt 3471 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3472 // pred 3473 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3474 return true; 3475} 3476 3477/// Parse an ARM memory expression, return false if successful else return true 3478/// or an error. The first token must be a '[' when called. 3479bool ARMAsmParser:: 3480parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3481 SMLoc S, E; 3482 assert(Parser.getTok().is(AsmToken::LBrac) && 3483 "Token is not a Left Bracket"); 3484 S = Parser.getTok().getLoc(); 3485 Parser.Lex(); // Eat left bracket token. 3486 3487 const AsmToken &BaseRegTok = Parser.getTok(); 3488 int BaseRegNum = tryParseRegister(); 3489 if (BaseRegNum == -1) 3490 return Error(BaseRegTok.getLoc(), "register expected"); 3491 3492 // The next token must either be a comma or a closing bracket. 3493 const AsmToken &Tok = Parser.getTok(); 3494 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3495 return Error(Tok.getLoc(), "malformed memory operand"); 3496 3497 if (Tok.is(AsmToken::RBrac)) { 3498 E = Tok.getLoc(); 3499 Parser.Lex(); // Eat right bracket token. 3500 3501 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3502 0, 0, false, S, E)); 3503 3504 // If there's a pre-indexing writeback marker, '!', just add it as a token 3505 // operand. It's rather odd, but syntactically valid. 3506 if (Parser.getTok().is(AsmToken::Exclaim)) { 3507 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3508 Parser.Lex(); // Eat the '!'. 3509 } 3510 3511 return false; 3512 } 3513 3514 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3515 Parser.Lex(); // Eat the comma. 3516 3517 // If we have a ':', it's an alignment specifier. 3518 if (Parser.getTok().is(AsmToken::Colon)) { 3519 Parser.Lex(); // Eat the ':'. 3520 E = Parser.getTok().getLoc(); 3521 3522 const MCExpr *Expr; 3523 if (getParser().ParseExpression(Expr)) 3524 return true; 3525 3526 // The expression has to be a constant. Memory references with relocations 3527 // don't come through here, as they use the <label> forms of the relevant 3528 // instructions. 3529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3530 if (!CE) 3531 return Error (E, "constant expression expected"); 3532 3533 unsigned Align = 0; 3534 switch (CE->getValue()) { 3535 default: 3536 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3537 case 64: Align = 8; break; 3538 case 128: Align = 16; break; 3539 case 256: Align = 32; break; 3540 } 3541 3542 // Now we should have the closing ']' 3543 E = Parser.getTok().getLoc(); 3544 if (Parser.getTok().isNot(AsmToken::RBrac)) 3545 return Error(E, "']' expected"); 3546 Parser.Lex(); // Eat right bracket token. 3547 3548 // Don't worry about range checking the value here. That's handled by 3549 // the is*() predicates. 3550 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3551 ARM_AM::no_shift, 0, Align, 3552 false, S, E)); 3553 3554 // If there's a pre-indexing writeback marker, '!', just add it as a token 3555 // operand. 3556 if (Parser.getTok().is(AsmToken::Exclaim)) { 3557 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3558 Parser.Lex(); // Eat the '!'. 3559 } 3560 3561 return false; 3562 } 3563 3564 // If we have a '#', it's an immediate offset, else assume it's a register 3565 // offset. 3566 if (Parser.getTok().is(AsmToken::Hash)) { 3567 Parser.Lex(); // Eat the '#'. 3568 E = Parser.getTok().getLoc(); 3569 3570 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3571 const MCExpr *Offset; 3572 if (getParser().ParseExpression(Offset)) 3573 return true; 3574 3575 // The expression has to be a constant. Memory references with relocations 3576 // don't come through here, as they use the <label> forms of the relevant 3577 // instructions. 3578 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3579 if (!CE) 3580 return Error (E, "constant expression expected"); 3581 3582 // If the constant was #-0, represent it as INT32_MIN. 3583 int32_t Val = CE->getValue(); 3584 if (isNegative && Val == 0) 3585 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3586 3587 // Now we should have the closing ']' 3588 E = Parser.getTok().getLoc(); 3589 if (Parser.getTok().isNot(AsmToken::RBrac)) 3590 return Error(E, "']' expected"); 3591 Parser.Lex(); // Eat right bracket token. 3592 3593 // Don't worry about range checking the value here. That's handled by 3594 // the is*() predicates. 3595 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3596 ARM_AM::no_shift, 0, 0, 3597 false, S, E)); 3598 3599 // If there's a pre-indexing writeback marker, '!', just add it as a token 3600 // operand. 3601 if (Parser.getTok().is(AsmToken::Exclaim)) { 3602 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3603 Parser.Lex(); // Eat the '!'. 3604 } 3605 3606 return false; 3607 } 3608 3609 // The register offset is optionally preceded by a '+' or '-' 3610 bool isNegative = false; 3611 if (Parser.getTok().is(AsmToken::Minus)) { 3612 isNegative = true; 3613 Parser.Lex(); // Eat the '-'. 3614 } else if (Parser.getTok().is(AsmToken::Plus)) { 3615 // Nothing to do. 3616 Parser.Lex(); // Eat the '+'. 3617 } 3618 3619 E = Parser.getTok().getLoc(); 3620 int OffsetRegNum = tryParseRegister(); 3621 if (OffsetRegNum == -1) 3622 return Error(E, "register expected"); 3623 3624 // If there's a shift operator, handle it. 3625 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3626 unsigned ShiftImm = 0; 3627 if (Parser.getTok().is(AsmToken::Comma)) { 3628 Parser.Lex(); // Eat the ','. 3629 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3630 return true; 3631 } 3632 3633 // Now we should have the closing ']' 3634 E = Parser.getTok().getLoc(); 3635 if (Parser.getTok().isNot(AsmToken::RBrac)) 3636 return Error(E, "']' expected"); 3637 Parser.Lex(); // Eat right bracket token. 3638 3639 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3640 ShiftType, ShiftImm, 0, isNegative, 3641 S, E)); 3642 3643 // If there's a pre-indexing writeback marker, '!', just add it as a token 3644 // operand. 3645 if (Parser.getTok().is(AsmToken::Exclaim)) { 3646 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3647 Parser.Lex(); // Eat the '!'. 3648 } 3649 3650 return false; 3651} 3652 3653/// parseMemRegOffsetShift - one of these two: 3654/// ( lsl | lsr | asr | ror ) , # shift_amount 3655/// rrx 3656/// return true if it parses a shift otherwise it returns false. 3657bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3658 unsigned &Amount) { 3659 SMLoc Loc = Parser.getTok().getLoc(); 3660 const AsmToken &Tok = Parser.getTok(); 3661 if (Tok.isNot(AsmToken::Identifier)) 3662 return true; 3663 StringRef ShiftName = Tok.getString(); 3664 if (ShiftName == "lsl" || ShiftName == "LSL") 3665 St = ARM_AM::lsl; 3666 else if (ShiftName == "lsr" || ShiftName == "LSR") 3667 St = ARM_AM::lsr; 3668 else if (ShiftName == "asr" || ShiftName == "ASR") 3669 St = ARM_AM::asr; 3670 else if (ShiftName == "ror" || ShiftName == "ROR") 3671 St = ARM_AM::ror; 3672 else if (ShiftName == "rrx" || ShiftName == "RRX") 3673 St = ARM_AM::rrx; 3674 else 3675 return Error(Loc, "illegal shift operator"); 3676 Parser.Lex(); // Eat shift type token. 3677 3678 // rrx stands alone. 3679 Amount = 0; 3680 if (St != ARM_AM::rrx) { 3681 Loc = Parser.getTok().getLoc(); 3682 // A '#' and a shift amount. 3683 const AsmToken &HashTok = Parser.getTok(); 3684 if (HashTok.isNot(AsmToken::Hash)) 3685 return Error(HashTok.getLoc(), "'#' expected"); 3686 Parser.Lex(); // Eat hash token. 3687 3688 const MCExpr *Expr; 3689 if (getParser().ParseExpression(Expr)) 3690 return true; 3691 // Range check the immediate. 3692 // lsl, ror: 0 <= imm <= 31 3693 // lsr, asr: 0 <= imm <= 32 3694 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3695 if (!CE) 3696 return Error(Loc, "shift amount must be an immediate"); 3697 int64_t Imm = CE->getValue(); 3698 if (Imm < 0 || 3699 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3700 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3701 return Error(Loc, "immediate shift value out of range"); 3702 Amount = Imm; 3703 } 3704 3705 return false; 3706} 3707 3708/// parseFPImm - A floating point immediate expression operand. 3709ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3710parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3711 SMLoc S = Parser.getTok().getLoc(); 3712 3713 if (Parser.getTok().isNot(AsmToken::Hash)) 3714 return MatchOperand_NoMatch; 3715 3716 // Disambiguate the VMOV forms that can accept an FP immediate. 3717 // vmov.f32 <sreg>, #imm 3718 // vmov.f64 <dreg>, #imm 3719 // vmov.f32 <dreg>, #imm @ vector f32x2 3720 // vmov.f32 <qreg>, #imm @ vector f32x4 3721 // 3722 // There are also the NEON VMOV instructions which expect an 3723 // integer constant. Make sure we don't try to parse an FPImm 3724 // for these: 3725 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3726 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3727 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3728 TyOp->getToken() != ".f64")) 3729 return MatchOperand_NoMatch; 3730 3731 Parser.Lex(); // Eat the '#'. 3732 3733 // Handle negation, as that still comes through as a separate token. 3734 bool isNegative = false; 3735 if (Parser.getTok().is(AsmToken::Minus)) { 3736 isNegative = true; 3737 Parser.Lex(); 3738 } 3739 const AsmToken &Tok = Parser.getTok(); 3740 if (Tok.is(AsmToken::Real)) { 3741 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3742 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3743 // If we had a '-' in front, toggle the sign bit. 3744 IntVal ^= (uint64_t)isNegative << 63; 3745 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3746 Parser.Lex(); // Eat the token. 3747 if (Val == -1) { 3748 TokError("floating point value out of range"); 3749 return MatchOperand_ParseFail; 3750 } 3751 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3752 return MatchOperand_Success; 3753 } 3754 if (Tok.is(AsmToken::Integer)) { 3755 int64_t Val = Tok.getIntVal(); 3756 Parser.Lex(); // Eat the token. 3757 if (Val > 255 || Val < 0) { 3758 TokError("encoded floating point value out of range"); 3759 return MatchOperand_ParseFail; 3760 } 3761 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3762 return MatchOperand_Success; 3763 } 3764 3765 TokError("invalid floating point immediate"); 3766 return MatchOperand_ParseFail; 3767} 3768/// Parse a arm instruction operand. For now this parses the operand regardless 3769/// of the mnemonic. 3770bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3771 StringRef Mnemonic) { 3772 SMLoc S, E; 3773 3774 // Check if the current operand has a custom associated parser, if so, try to 3775 // custom parse the operand, or fallback to the general approach. 3776 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3777 if (ResTy == MatchOperand_Success) 3778 return false; 3779 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3780 // there was a match, but an error occurred, in which case, just return that 3781 // the operand parsing failed. 3782 if (ResTy == MatchOperand_ParseFail) 3783 return true; 3784 3785 switch (getLexer().getKind()) { 3786 default: 3787 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3788 return true; 3789 case AsmToken::Identifier: { 3790 // If this is VMRS, check for the apsr_nzcv operand. 3791 if (!tryParseRegisterWithWriteBack(Operands)) 3792 return false; 3793 int Res = tryParseShiftRegister(Operands); 3794 if (Res == 0) // success 3795 return false; 3796 else if (Res == -1) // irrecoverable error 3797 return true; 3798 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3799 S = Parser.getTok().getLoc(); 3800 Parser.Lex(); 3801 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3802 return false; 3803 } 3804 3805 // Fall though for the Identifier case that is not a register or a 3806 // special name. 3807 } 3808 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 3809 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3810 case AsmToken::String: // quoted label names. 3811 case AsmToken::Dot: { // . as a branch target 3812 // This was not a register so parse other operands that start with an 3813 // identifier (like labels) as expressions and create them as immediates. 3814 const MCExpr *IdVal; 3815 S = Parser.getTok().getLoc(); 3816 if (getParser().ParseExpression(IdVal)) 3817 return true; 3818 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3819 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3820 return false; 3821 } 3822 case AsmToken::LBrac: 3823 return parseMemory(Operands); 3824 case AsmToken::LCurly: 3825 return parseRegisterList(Operands); 3826 case AsmToken::Hash: { 3827 // #42 -> immediate. 3828 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3829 S = Parser.getTok().getLoc(); 3830 Parser.Lex(); 3831 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3832 const MCExpr *ImmVal; 3833 if (getParser().ParseExpression(ImmVal)) 3834 return true; 3835 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3836 if (CE) { 3837 int32_t Val = CE->getValue(); 3838 if (isNegative && Val == 0) 3839 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3840 } 3841 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3842 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3843 return false; 3844 } 3845 case AsmToken::Colon: { 3846 // ":lower16:" and ":upper16:" expression prefixes 3847 // FIXME: Check it's an expression prefix, 3848 // e.g. (FOO - :lower16:BAR) isn't legal. 3849 ARMMCExpr::VariantKind RefKind; 3850 if (parsePrefix(RefKind)) 3851 return true; 3852 3853 const MCExpr *SubExprVal; 3854 if (getParser().ParseExpression(SubExprVal)) 3855 return true; 3856 3857 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3858 getContext()); 3859 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3860 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3861 return false; 3862 } 3863 } 3864} 3865 3866// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3867// :lower16: and :upper16:. 3868bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3869 RefKind = ARMMCExpr::VK_ARM_None; 3870 3871 // :lower16: and :upper16: modifiers 3872 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3873 Parser.Lex(); // Eat ':' 3874 3875 if (getLexer().isNot(AsmToken::Identifier)) { 3876 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3877 return true; 3878 } 3879 3880 StringRef IDVal = Parser.getTok().getIdentifier(); 3881 if (IDVal == "lower16") { 3882 RefKind = ARMMCExpr::VK_ARM_LO16; 3883 } else if (IDVal == "upper16") { 3884 RefKind = ARMMCExpr::VK_ARM_HI16; 3885 } else { 3886 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3887 return true; 3888 } 3889 Parser.Lex(); 3890 3891 if (getLexer().isNot(AsmToken::Colon)) { 3892 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3893 return true; 3894 } 3895 Parser.Lex(); // Eat the last ':' 3896 return false; 3897} 3898 3899/// \brief Given a mnemonic, split out possible predication code and carry 3900/// setting letters to form a canonical mnemonic and flags. 3901// 3902// FIXME: Would be nice to autogen this. 3903// FIXME: This is a bit of a maze of special cases. 3904StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3905 unsigned &PredicationCode, 3906 bool &CarrySetting, 3907 unsigned &ProcessorIMod, 3908 StringRef &ITMask) { 3909 PredicationCode = ARMCC::AL; 3910 CarrySetting = false; 3911 ProcessorIMod = 0; 3912 3913 // Ignore some mnemonics we know aren't predicated forms. 3914 // 3915 // FIXME: Would be nice to autogen this. 3916 if ((Mnemonic == "movs" && isThumb()) || 3917 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3918 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3919 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3920 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3921 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3922 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3923 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3924 return Mnemonic; 3925 3926 // First, split out any predication code. Ignore mnemonics we know aren't 3927 // predicated but do have a carry-set and so weren't caught above. 3928 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3929 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3930 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3931 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3932 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3933 .Case("eq", ARMCC::EQ) 3934 .Case("ne", ARMCC::NE) 3935 .Case("hs", ARMCC::HS) 3936 .Case("cs", ARMCC::HS) 3937 .Case("lo", ARMCC::LO) 3938 .Case("cc", ARMCC::LO) 3939 .Case("mi", ARMCC::MI) 3940 .Case("pl", ARMCC::PL) 3941 .Case("vs", ARMCC::VS) 3942 .Case("vc", ARMCC::VC) 3943 .Case("hi", ARMCC::HI) 3944 .Case("ls", ARMCC::LS) 3945 .Case("ge", ARMCC::GE) 3946 .Case("lt", ARMCC::LT) 3947 .Case("gt", ARMCC::GT) 3948 .Case("le", ARMCC::LE) 3949 .Case("al", ARMCC::AL) 3950 .Default(~0U); 3951 if (CC != ~0U) { 3952 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3953 PredicationCode = CC; 3954 } 3955 } 3956 3957 // Next, determine if we have a carry setting bit. We explicitly ignore all 3958 // the instructions we know end in 's'. 3959 if (Mnemonic.endswith("s") && 3960 !(Mnemonic == "cps" || Mnemonic == "mls" || 3961 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3962 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3963 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3964 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3965 (Mnemonic == "movs" && isThumb()))) { 3966 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3967 CarrySetting = true; 3968 } 3969 3970 // The "cps" instruction can have a interrupt mode operand which is glued into 3971 // the mnemonic. Check if this is the case, split it and parse the imod op 3972 if (Mnemonic.startswith("cps")) { 3973 // Split out any imod code. 3974 unsigned IMod = 3975 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3976 .Case("ie", ARM_PROC::IE) 3977 .Case("id", ARM_PROC::ID) 3978 .Default(~0U); 3979 if (IMod != ~0U) { 3980 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3981 ProcessorIMod = IMod; 3982 } 3983 } 3984 3985 // The "it" instruction has the condition mask on the end of the mnemonic. 3986 if (Mnemonic.startswith("it")) { 3987 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3988 Mnemonic = Mnemonic.slice(0, 2); 3989 } 3990 3991 return Mnemonic; 3992} 3993 3994/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3995/// inclusion of carry set or predication code operands. 3996// 3997// FIXME: It would be nice to autogen this. 3998void ARMAsmParser:: 3999getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4000 bool &CanAcceptPredicationCode) { 4001 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4002 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4003 Mnemonic == "add" || Mnemonic == "adc" || 4004 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4005 Mnemonic == "orr" || Mnemonic == "mvn" || 4006 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4007 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4008 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4009 Mnemonic == "mla" || Mnemonic == "smlal" || 4010 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4011 CanAcceptCarrySet = true; 4012 } else 4013 CanAcceptCarrySet = false; 4014 4015 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4016 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4017 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4018 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4019 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4020 (Mnemonic == "clrex" && !isThumb()) || 4021 (Mnemonic == "nop" && isThumbOne()) || 4022 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4023 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4024 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4025 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4026 !isThumb()) || 4027 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4028 CanAcceptPredicationCode = false; 4029 } else 4030 CanAcceptPredicationCode = true; 4031 4032 if (isThumb()) { 4033 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4034 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4035 CanAcceptPredicationCode = false; 4036 } 4037} 4038 4039bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4040 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4041 // FIXME: This is all horribly hacky. We really need a better way to deal 4042 // with optional operands like this in the matcher table. 4043 4044 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4045 // another does not. Specifically, the MOVW instruction does not. So we 4046 // special case it here and remove the defaulted (non-setting) cc_out 4047 // operand if that's the instruction we're trying to match. 4048 // 4049 // We do this as post-processing of the explicit operands rather than just 4050 // conditionally adding the cc_out in the first place because we need 4051 // to check the type of the parsed immediate operand. 4052 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4053 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4054 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4055 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4056 return true; 4057 4058 // Register-register 'add' for thumb does not have a cc_out operand 4059 // when there are only two register operands. 4060 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4061 static_cast<ARMOperand*>(Operands[3])->isReg() && 4062 static_cast<ARMOperand*>(Operands[4])->isReg() && 4063 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4064 return true; 4065 // Register-register 'add' for thumb does not have a cc_out operand 4066 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4067 // have to check the immediate range here since Thumb2 has a variant 4068 // that can handle a different range and has a cc_out operand. 4069 if (((isThumb() && Mnemonic == "add") || 4070 (isThumbTwo() && Mnemonic == "sub")) && 4071 Operands.size() == 6 && 4072 static_cast<ARMOperand*>(Operands[3])->isReg() && 4073 static_cast<ARMOperand*>(Operands[4])->isReg() && 4074 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4075 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4076 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4077 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4078 return true; 4079 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4080 // imm0_4095 variant. That's the least-preferred variant when 4081 // selecting via the generic "add" mnemonic, so to know that we 4082 // should remove the cc_out operand, we have to explicitly check that 4083 // it's not one of the other variants. Ugh. 4084 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4085 Operands.size() == 6 && 4086 static_cast<ARMOperand*>(Operands[3])->isReg() && 4087 static_cast<ARMOperand*>(Operands[4])->isReg() && 4088 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4089 // Nest conditions rather than one big 'if' statement for readability. 4090 // 4091 // If either register is a high reg, it's either one of the SP 4092 // variants (handled above) or a 32-bit encoding, so we just 4093 // check against T3. 4094 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4095 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4096 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4097 return false; 4098 // If both registers are low, we're in an IT block, and the immediate is 4099 // in range, we should use encoding T1 instead, which has a cc_out. 4100 if (inITBlock() && 4101 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4102 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4103 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4104 return false; 4105 4106 // Otherwise, we use encoding T4, which does not have a cc_out 4107 // operand. 4108 return true; 4109 } 4110 4111 // The thumb2 multiply instruction doesn't have a CCOut register, so 4112 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4113 // use the 16-bit encoding or not. 4114 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4115 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4116 static_cast<ARMOperand*>(Operands[3])->isReg() && 4117 static_cast<ARMOperand*>(Operands[4])->isReg() && 4118 static_cast<ARMOperand*>(Operands[5])->isReg() && 4119 // If the registers aren't low regs, the destination reg isn't the 4120 // same as one of the source regs, or the cc_out operand is zero 4121 // outside of an IT block, we have to use the 32-bit encoding, so 4122 // remove the cc_out operand. 4123 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4124 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4125 !inITBlock() || 4126 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4127 static_cast<ARMOperand*>(Operands[5])->getReg() && 4128 static_cast<ARMOperand*>(Operands[3])->getReg() != 4129 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4130 return true; 4131 4132 4133 4134 // Register-register 'add/sub' for thumb does not have a cc_out operand 4135 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4136 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4137 // right, this will result in better diagnostics (which operand is off) 4138 // anyway. 4139 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4140 (Operands.size() == 5 || Operands.size() == 6) && 4141 static_cast<ARMOperand*>(Operands[3])->isReg() && 4142 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4143 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4144 return true; 4145 4146 return false; 4147} 4148 4149/// Parse an arm instruction mnemonic followed by its operands. 4150bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4151 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4152 // Create the leading tokens for the mnemonic, split by '.' characters. 4153 size_t Start = 0, Next = Name.find('.'); 4154 StringRef Mnemonic = Name.slice(Start, Next); 4155 4156 // Split out the predication code and carry setting flag from the mnemonic. 4157 unsigned PredicationCode; 4158 unsigned ProcessorIMod; 4159 bool CarrySetting; 4160 StringRef ITMask; 4161 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4162 ProcessorIMod, ITMask); 4163 4164 // In Thumb1, only the branch (B) instruction can be predicated. 4165 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4166 Parser.EatToEndOfStatement(); 4167 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4168 } 4169 4170 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4171 4172 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4173 // is the mask as it will be for the IT encoding if the conditional 4174 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4175 // where the conditional bit0 is zero, the instruction post-processing 4176 // will adjust the mask accordingly. 4177 if (Mnemonic == "it") { 4178 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4179 if (ITMask.size() > 3) { 4180 Parser.EatToEndOfStatement(); 4181 return Error(Loc, "too many conditions on IT instruction"); 4182 } 4183 unsigned Mask = 8; 4184 for (unsigned i = ITMask.size(); i != 0; --i) { 4185 char pos = ITMask[i - 1]; 4186 if (pos != 't' && pos != 'e') { 4187 Parser.EatToEndOfStatement(); 4188 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4189 } 4190 Mask >>= 1; 4191 if (ITMask[i - 1] == 't') 4192 Mask |= 8; 4193 } 4194 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4195 } 4196 4197 // FIXME: This is all a pretty gross hack. We should automatically handle 4198 // optional operands like this via tblgen. 4199 4200 // Next, add the CCOut and ConditionCode operands, if needed. 4201 // 4202 // For mnemonics which can ever incorporate a carry setting bit or predication 4203 // code, our matching model involves us always generating CCOut and 4204 // ConditionCode operands to match the mnemonic "as written" and then we let 4205 // the matcher deal with finding the right instruction or generating an 4206 // appropriate error. 4207 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4208 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4209 4210 // If we had a carry-set on an instruction that can't do that, issue an 4211 // error. 4212 if (!CanAcceptCarrySet && CarrySetting) { 4213 Parser.EatToEndOfStatement(); 4214 return Error(NameLoc, "instruction '" + Mnemonic + 4215 "' can not set flags, but 's' suffix specified"); 4216 } 4217 // If we had a predication code on an instruction that can't do that, issue an 4218 // error. 4219 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4220 Parser.EatToEndOfStatement(); 4221 return Error(NameLoc, "instruction '" + Mnemonic + 4222 "' is not predicable, but condition code specified"); 4223 } 4224 4225 // Add the carry setting operand, if necessary. 4226 if (CanAcceptCarrySet) { 4227 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4228 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4229 Loc)); 4230 } 4231 4232 // Add the predication code operand, if necessary. 4233 if (CanAcceptPredicationCode) { 4234 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4235 CarrySetting); 4236 Operands.push_back(ARMOperand::CreateCondCode( 4237 ARMCC::CondCodes(PredicationCode), Loc)); 4238 } 4239 4240 // Add the processor imod operand, if necessary. 4241 if (ProcessorIMod) { 4242 Operands.push_back(ARMOperand::CreateImm( 4243 MCConstantExpr::Create(ProcessorIMod, getContext()), 4244 NameLoc, NameLoc)); 4245 } 4246 4247 // Add the remaining tokens in the mnemonic. 4248 while (Next != StringRef::npos) { 4249 Start = Next; 4250 Next = Name.find('.', Start + 1); 4251 StringRef ExtraToken = Name.slice(Start, Next); 4252 4253 // For now, we're only parsing Thumb1 (for the most part), so 4254 // just ignore ".n" qualifiers. We'll use them to restrict 4255 // matching when we do Thumb2. 4256 if (ExtraToken != ".n") { 4257 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4258 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4259 } 4260 } 4261 4262 // Read the remaining operands. 4263 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4264 // Read the first operand. 4265 if (parseOperand(Operands, Mnemonic)) { 4266 Parser.EatToEndOfStatement(); 4267 return true; 4268 } 4269 4270 while (getLexer().is(AsmToken::Comma)) { 4271 Parser.Lex(); // Eat the comma. 4272 4273 // Parse and remember the operand. 4274 if (parseOperand(Operands, Mnemonic)) { 4275 Parser.EatToEndOfStatement(); 4276 return true; 4277 } 4278 } 4279 } 4280 4281 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4282 SMLoc Loc = getLexer().getLoc(); 4283 Parser.EatToEndOfStatement(); 4284 return Error(Loc, "unexpected token in argument list"); 4285 } 4286 4287 Parser.Lex(); // Consume the EndOfStatement 4288 4289 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4290 // do and don't have a cc_out optional-def operand. With some spot-checks 4291 // of the operand list, we can figure out which variant we're trying to 4292 // parse and adjust accordingly before actually matching. We shouldn't ever 4293 // try to remove a cc_out operand that was explicitly set on the the 4294 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4295 // table driven matcher doesn't fit well with the ARM instruction set. 4296 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4297 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4298 Operands.erase(Operands.begin() + 1); 4299 delete Op; 4300 } 4301 4302 // ARM mode 'blx' need special handling, as the register operand version 4303 // is predicable, but the label operand version is not. So, we can't rely 4304 // on the Mnemonic based checking to correctly figure out when to put 4305 // a k_CondCode operand in the list. If we're trying to match the label 4306 // version, remove the k_CondCode operand here. 4307 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4308 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4309 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4310 Operands.erase(Operands.begin() + 1); 4311 delete Op; 4312 } 4313 4314 // The vector-compare-to-zero instructions have a literal token "#0" at 4315 // the end that comes to here as an immediate operand. Convert it to a 4316 // token to play nicely with the matcher. 4317 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4318 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4319 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4320 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4321 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4322 if (CE && CE->getValue() == 0) { 4323 Operands.erase(Operands.begin() + 5); 4324 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4325 delete Op; 4326 } 4327 } 4328 // VCMP{E} does the same thing, but with a different operand count. 4329 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4330 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4331 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4332 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4333 if (CE && CE->getValue() == 0) { 4334 Operands.erase(Operands.begin() + 4); 4335 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4336 delete Op; 4337 } 4338 } 4339 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4340 // end. Convert it to a token here. 4341 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4342 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4343 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4344 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4345 if (CE && CE->getValue() == 0) { 4346 Operands.erase(Operands.begin() + 5); 4347 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4348 delete Op; 4349 } 4350 } 4351 4352 return false; 4353} 4354 4355// Validate context-sensitive operand constraints. 4356 4357// return 'true' if register list contains non-low GPR registers, 4358// 'false' otherwise. If Reg is in the register list or is HiReg, set 4359// 'containsReg' to true. 4360static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4361 unsigned HiReg, bool &containsReg) { 4362 containsReg = false; 4363 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4364 unsigned OpReg = Inst.getOperand(i).getReg(); 4365 if (OpReg == Reg) 4366 containsReg = true; 4367 // Anything other than a low register isn't legal here. 4368 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4369 return true; 4370 } 4371 return false; 4372} 4373 4374// Check if the specified regisgter is in the register list of the inst, 4375// starting at the indicated operand number. 4376static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4377 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4378 unsigned OpReg = Inst.getOperand(i).getReg(); 4379 if (OpReg == Reg) 4380 return true; 4381 } 4382 return false; 4383} 4384 4385// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4386// the ARMInsts array) instead. Getting that here requires awkward 4387// API changes, though. Better way? 4388namespace llvm { 4389extern const MCInstrDesc ARMInsts[]; 4390} 4391static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4392 return ARMInsts[Opcode]; 4393} 4394 4395// FIXME: We would really like to be able to tablegen'erate this. 4396bool ARMAsmParser:: 4397validateInstruction(MCInst &Inst, 4398 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4399 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4400 SMLoc Loc = Operands[0]->getStartLoc(); 4401 // Check the IT block state first. 4402 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4403 // being allowed in IT blocks, but not being predicable. It just always 4404 // executes. 4405 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4406 unsigned bit = 1; 4407 if (ITState.FirstCond) 4408 ITState.FirstCond = false; 4409 else 4410 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4411 // The instruction must be predicable. 4412 if (!MCID.isPredicable()) 4413 return Error(Loc, "instructions in IT block must be predicable"); 4414 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4415 unsigned ITCond = bit ? ITState.Cond : 4416 ARMCC::getOppositeCondition(ITState.Cond); 4417 if (Cond != ITCond) { 4418 // Find the condition code Operand to get its SMLoc information. 4419 SMLoc CondLoc; 4420 for (unsigned i = 1; i < Operands.size(); ++i) 4421 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4422 CondLoc = Operands[i]->getStartLoc(); 4423 return Error(CondLoc, "incorrect condition in IT block; got '" + 4424 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4425 "', but expected '" + 4426 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4427 } 4428 // Check for non-'al' condition codes outside of the IT block. 4429 } else if (isThumbTwo() && MCID.isPredicable() && 4430 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4431 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4432 Inst.getOpcode() != ARM::t2B) 4433 return Error(Loc, "predicated instructions must be in IT block"); 4434 4435 switch (Inst.getOpcode()) { 4436 case ARM::LDRD: 4437 case ARM::LDRD_PRE: 4438 case ARM::LDRD_POST: 4439 case ARM::LDREXD: { 4440 // Rt2 must be Rt + 1. 4441 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4442 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4443 if (Rt2 != Rt + 1) 4444 return Error(Operands[3]->getStartLoc(), 4445 "destination operands must be sequential"); 4446 return false; 4447 } 4448 case ARM::STRD: { 4449 // Rt2 must be Rt + 1. 4450 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4451 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4452 if (Rt2 != Rt + 1) 4453 return Error(Operands[3]->getStartLoc(), 4454 "source operands must be sequential"); 4455 return false; 4456 } 4457 case ARM::STRD_PRE: 4458 case ARM::STRD_POST: 4459 case ARM::STREXD: { 4460 // Rt2 must be Rt + 1. 4461 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4462 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4463 if (Rt2 != Rt + 1) 4464 return Error(Operands[3]->getStartLoc(), 4465 "source operands must be sequential"); 4466 return false; 4467 } 4468 case ARM::SBFX: 4469 case ARM::UBFX: { 4470 // width must be in range [1, 32-lsb] 4471 unsigned lsb = Inst.getOperand(2).getImm(); 4472 unsigned widthm1 = Inst.getOperand(3).getImm(); 4473 if (widthm1 >= 32 - lsb) 4474 return Error(Operands[5]->getStartLoc(), 4475 "bitfield width must be in range [1,32-lsb]"); 4476 return false; 4477 } 4478 case ARM::tLDMIA: { 4479 // If we're parsing Thumb2, the .w variant is available and handles 4480 // most cases that are normally illegal for a Thumb1 LDM 4481 // instruction. We'll make the transformation in processInstruction() 4482 // if necessary. 4483 // 4484 // Thumb LDM instructions are writeback iff the base register is not 4485 // in the register list. 4486 unsigned Rn = Inst.getOperand(0).getReg(); 4487 bool hasWritebackToken = 4488 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4489 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4490 bool listContainsBase; 4491 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4492 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4493 "registers must be in range r0-r7"); 4494 // If we should have writeback, then there should be a '!' token. 4495 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4496 return Error(Operands[2]->getStartLoc(), 4497 "writeback operator '!' expected"); 4498 // If we should not have writeback, there must not be a '!'. This is 4499 // true even for the 32-bit wide encodings. 4500 if (listContainsBase && hasWritebackToken) 4501 return Error(Operands[3]->getStartLoc(), 4502 "writeback operator '!' not allowed when base register " 4503 "in register list"); 4504 4505 break; 4506 } 4507 case ARM::t2LDMIA_UPD: { 4508 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4509 return Error(Operands[4]->getStartLoc(), 4510 "writeback operator '!' not allowed when base register " 4511 "in register list"); 4512 break; 4513 } 4514 case ARM::tPOP: { 4515 bool listContainsBase; 4516 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4517 return Error(Operands[2]->getStartLoc(), 4518 "registers must be in range r0-r7 or pc"); 4519 break; 4520 } 4521 case ARM::tPUSH: { 4522 bool listContainsBase; 4523 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4524 return Error(Operands[2]->getStartLoc(), 4525 "registers must be in range r0-r7 or lr"); 4526 break; 4527 } 4528 case ARM::tSTMIA_UPD: { 4529 bool listContainsBase; 4530 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4531 return Error(Operands[4]->getStartLoc(), 4532 "registers must be in range r0-r7"); 4533 break; 4534 } 4535 } 4536 4537 return false; 4538} 4539 4540void ARMAsmParser:: 4541processInstruction(MCInst &Inst, 4542 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4543 switch (Inst.getOpcode()) { 4544 // Handle the MOV complex aliases. 4545 case ARM::ASRi: { 4546 unsigned Amt = Inst.getOperand(2).getImm() + 1; 4547 unsigned ShiftOp = ARM_AM::getSORegOpc(ARM_AM::asr, Amt); 4548 MCInst TmpInst; 4549 TmpInst.setOpcode(ARM::MOVsi); 4550 TmpInst.addOperand(Inst.getOperand(0)); // Rd 4551 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4552 TmpInst.addOperand(MCOperand::CreateImm(ShiftOp)); // Shift value and ty 4553 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 4554 TmpInst.addOperand(Inst.getOperand(4)); 4555 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 4556 Inst = TmpInst; 4557 break; 4558 } 4559 case ARM::LDMIA_UPD: 4560 // If this is a load of a single register via a 'pop', then we should use 4561 // a post-indexed LDR instruction instead, per the ARM ARM. 4562 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4563 Inst.getNumOperands() == 5) { 4564 MCInst TmpInst; 4565 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4566 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4567 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4568 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4569 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4570 TmpInst.addOperand(MCOperand::CreateImm(4)); 4571 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4572 TmpInst.addOperand(Inst.getOperand(3)); 4573 Inst = TmpInst; 4574 } 4575 break; 4576 case ARM::STMDB_UPD: 4577 // If this is a store of a single register via a 'push', then we should use 4578 // a pre-indexed STR instruction instead, per the ARM ARM. 4579 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4580 Inst.getNumOperands() == 5) { 4581 MCInst TmpInst; 4582 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4583 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4584 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4585 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4586 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4587 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4588 TmpInst.addOperand(Inst.getOperand(3)); 4589 Inst = TmpInst; 4590 } 4591 break; 4592 case ARM::tADDi8: 4593 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4594 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4595 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4596 // to encoding T1 if <Rd> is omitted." 4597 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4598 Inst.setOpcode(ARM::tADDi3); 4599 break; 4600 case ARM::tSUBi8: 4601 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4602 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4603 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4604 // to encoding T1 if <Rd> is omitted." 4605 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4606 Inst.setOpcode(ARM::tSUBi3); 4607 break; 4608 case ARM::tB: 4609 // A Thumb conditional branch outside of an IT block is a tBcc. 4610 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4611 Inst.setOpcode(ARM::tBcc); 4612 break; 4613 case ARM::t2B: 4614 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4615 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4616 Inst.setOpcode(ARM::t2Bcc); 4617 break; 4618 case ARM::t2Bcc: 4619 // If the conditional is AL or we're in an IT block, we really want t2B. 4620 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4621 Inst.setOpcode(ARM::t2B); 4622 break; 4623 case ARM::tBcc: 4624 // If the conditional is AL, we really want tB. 4625 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4626 Inst.setOpcode(ARM::tB); 4627 break; 4628 case ARM::tLDMIA: { 4629 // If the register list contains any high registers, or if the writeback 4630 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4631 // instead if we're in Thumb2. Otherwise, this should have generated 4632 // an error in validateInstruction(). 4633 unsigned Rn = Inst.getOperand(0).getReg(); 4634 bool hasWritebackToken = 4635 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4636 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4637 bool listContainsBase; 4638 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4639 (!listContainsBase && !hasWritebackToken) || 4640 (listContainsBase && hasWritebackToken)) { 4641 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4642 assert (isThumbTwo()); 4643 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4644 // If we're switching to the updating version, we need to insert 4645 // the writeback tied operand. 4646 if (hasWritebackToken) 4647 Inst.insert(Inst.begin(), 4648 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4649 } 4650 break; 4651 } 4652 case ARM::tSTMIA_UPD: { 4653 // If the register list contains any high registers, we need to use 4654 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4655 // should have generated an error in validateInstruction(). 4656 unsigned Rn = Inst.getOperand(0).getReg(); 4657 bool listContainsBase; 4658 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4659 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4660 assert (isThumbTwo()); 4661 Inst.setOpcode(ARM::t2STMIA_UPD); 4662 } 4663 break; 4664 } 4665 case ARM::t2MOVi: { 4666 // If we can use the 16-bit encoding and the user didn't explicitly 4667 // request the 32-bit variant, transform it here. 4668 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4669 Inst.getOperand(1).getImm() <= 255 && 4670 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4671 Inst.getOperand(4).getReg() == ARM::CPSR) || 4672 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4673 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4674 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4675 // The operands aren't in the same order for tMOVi8... 4676 MCInst TmpInst; 4677 TmpInst.setOpcode(ARM::tMOVi8); 4678 TmpInst.addOperand(Inst.getOperand(0)); 4679 TmpInst.addOperand(Inst.getOperand(4)); 4680 TmpInst.addOperand(Inst.getOperand(1)); 4681 TmpInst.addOperand(Inst.getOperand(2)); 4682 TmpInst.addOperand(Inst.getOperand(3)); 4683 Inst = TmpInst; 4684 } 4685 break; 4686 } 4687 case ARM::t2MOVr: { 4688 // If we can use the 16-bit encoding and the user didn't explicitly 4689 // request the 32-bit variant, transform it here. 4690 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4691 isARMLowRegister(Inst.getOperand(1).getReg()) && 4692 Inst.getOperand(2).getImm() == ARMCC::AL && 4693 Inst.getOperand(4).getReg() == ARM::CPSR && 4694 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4695 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4696 // The operands aren't the same for tMOV[S]r... (no cc_out) 4697 MCInst TmpInst; 4698 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4699 TmpInst.addOperand(Inst.getOperand(0)); 4700 TmpInst.addOperand(Inst.getOperand(1)); 4701 TmpInst.addOperand(Inst.getOperand(2)); 4702 TmpInst.addOperand(Inst.getOperand(3)); 4703 Inst = TmpInst; 4704 } 4705 break; 4706 } 4707 case ARM::t2SXTH: 4708 case ARM::t2SXTB: 4709 case ARM::t2UXTH: 4710 case ARM::t2UXTB: { 4711 // If we can use the 16-bit encoding and the user didn't explicitly 4712 // request the 32-bit variant, transform it here. 4713 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4714 isARMLowRegister(Inst.getOperand(1).getReg()) && 4715 Inst.getOperand(2).getImm() == 0 && 4716 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4717 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4718 unsigned NewOpc; 4719 switch (Inst.getOpcode()) { 4720 default: llvm_unreachable("Illegal opcode!"); 4721 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4722 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4723 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4724 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4725 } 4726 // The operands aren't the same for thumb1 (no rotate operand). 4727 MCInst TmpInst; 4728 TmpInst.setOpcode(NewOpc); 4729 TmpInst.addOperand(Inst.getOperand(0)); 4730 TmpInst.addOperand(Inst.getOperand(1)); 4731 TmpInst.addOperand(Inst.getOperand(3)); 4732 TmpInst.addOperand(Inst.getOperand(4)); 4733 Inst = TmpInst; 4734 } 4735 break; 4736 } 4737 case ARM::t2IT: { 4738 // The mask bits for all but the first condition are represented as 4739 // the low bit of the condition code value implies 't'. We currently 4740 // always have 1 implies 't', so XOR toggle the bits if the low bit 4741 // of the condition code is zero. The encoding also expects the low 4742 // bit of the condition to be encoded as bit 4 of the mask operand, 4743 // so mask that in if needed 4744 MCOperand &MO = Inst.getOperand(1); 4745 unsigned Mask = MO.getImm(); 4746 unsigned OrigMask = Mask; 4747 unsigned TZ = CountTrailingZeros_32(Mask); 4748 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4749 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4750 for (unsigned i = 3; i != TZ; --i) 4751 Mask ^= 1 << i; 4752 } else 4753 Mask |= 0x10; 4754 MO.setImm(Mask); 4755 4756 // Set up the IT block state according to the IT instruction we just 4757 // matched. 4758 assert(!inITBlock() && "nested IT blocks?!"); 4759 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4760 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4761 ITState.CurPosition = 0; 4762 ITState.FirstCond = true; 4763 break; 4764 } 4765 } 4766} 4767 4768unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4769 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4770 // suffix depending on whether they're in an IT block or not. 4771 unsigned Opc = Inst.getOpcode(); 4772 const MCInstrDesc &MCID = getInstDesc(Opc); 4773 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4774 assert(MCID.hasOptionalDef() && 4775 "optionally flag setting instruction missing optional def operand"); 4776 assert(MCID.NumOperands == Inst.getNumOperands() && 4777 "operand count mismatch!"); 4778 // Find the optional-def operand (cc_out). 4779 unsigned OpNo; 4780 for (OpNo = 0; 4781 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4782 ++OpNo) 4783 ; 4784 // If we're parsing Thumb1, reject it completely. 4785 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4786 return Match_MnemonicFail; 4787 // If we're parsing Thumb2, which form is legal depends on whether we're 4788 // in an IT block. 4789 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4790 !inITBlock()) 4791 return Match_RequiresITBlock; 4792 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4793 inITBlock()) 4794 return Match_RequiresNotITBlock; 4795 } 4796 // Some high-register supporting Thumb1 encodings only allow both registers 4797 // to be from r0-r7 when in Thumb2. 4798 else if (Opc == ARM::tADDhirr && isThumbOne() && 4799 isARMLowRegister(Inst.getOperand(1).getReg()) && 4800 isARMLowRegister(Inst.getOperand(2).getReg())) 4801 return Match_RequiresThumb2; 4802 // Others only require ARMv6 or later. 4803 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4804 isARMLowRegister(Inst.getOperand(0).getReg()) && 4805 isARMLowRegister(Inst.getOperand(1).getReg())) 4806 return Match_RequiresV6; 4807 return Match_Success; 4808} 4809 4810bool ARMAsmParser:: 4811MatchAndEmitInstruction(SMLoc IDLoc, 4812 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4813 MCStreamer &Out) { 4814 MCInst Inst; 4815 unsigned ErrorInfo; 4816 unsigned MatchResult; 4817 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4818 switch (MatchResult) { 4819 default: break; 4820 case Match_Success: 4821 // Context sensitive operand constraints aren't handled by the matcher, 4822 // so check them here. 4823 if (validateInstruction(Inst, Operands)) { 4824 // Still progress the IT block, otherwise one wrong condition causes 4825 // nasty cascading errors. 4826 forwardITPosition(); 4827 return true; 4828 } 4829 4830 // Some instructions need post-processing to, for example, tweak which 4831 // encoding is selected. 4832 processInstruction(Inst, Operands); 4833 4834 // Only move forward at the very end so that everything in validate 4835 // and process gets a consistent answer about whether we're in an IT 4836 // block. 4837 forwardITPosition(); 4838 4839 Out.EmitInstruction(Inst); 4840 return false; 4841 case Match_MissingFeature: 4842 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4843 return true; 4844 case Match_InvalidOperand: { 4845 SMLoc ErrorLoc = IDLoc; 4846 if (ErrorInfo != ~0U) { 4847 if (ErrorInfo >= Operands.size()) 4848 return Error(IDLoc, "too few operands for instruction"); 4849 4850 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4851 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4852 } 4853 4854 return Error(ErrorLoc, "invalid operand for instruction"); 4855 } 4856 case Match_MnemonicFail: 4857 return Error(IDLoc, "invalid instruction"); 4858 case Match_ConversionFail: 4859 // The converter function will have already emited a diagnostic. 4860 return true; 4861 case Match_RequiresNotITBlock: 4862 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4863 case Match_RequiresITBlock: 4864 return Error(IDLoc, "instruction only valid inside IT block"); 4865 case Match_RequiresV6: 4866 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4867 case Match_RequiresThumb2: 4868 return Error(IDLoc, "instruction variant requires Thumb2"); 4869 } 4870 4871 llvm_unreachable("Implement any new match types added!"); 4872 return true; 4873} 4874 4875/// parseDirective parses the arm specific directives 4876bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4877 StringRef IDVal = DirectiveID.getIdentifier(); 4878 if (IDVal == ".word") 4879 return parseDirectiveWord(4, DirectiveID.getLoc()); 4880 else if (IDVal == ".thumb") 4881 return parseDirectiveThumb(DirectiveID.getLoc()); 4882 else if (IDVal == ".thumb_func") 4883 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4884 else if (IDVal == ".code") 4885 return parseDirectiveCode(DirectiveID.getLoc()); 4886 else if (IDVal == ".syntax") 4887 return parseDirectiveSyntax(DirectiveID.getLoc()); 4888 return true; 4889} 4890 4891/// parseDirectiveWord 4892/// ::= .word [ expression (, expression)* ] 4893bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4894 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4895 for (;;) { 4896 const MCExpr *Value; 4897 if (getParser().ParseExpression(Value)) 4898 return true; 4899 4900 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4901 4902 if (getLexer().is(AsmToken::EndOfStatement)) 4903 break; 4904 4905 // FIXME: Improve diagnostic. 4906 if (getLexer().isNot(AsmToken::Comma)) 4907 return Error(L, "unexpected token in directive"); 4908 Parser.Lex(); 4909 } 4910 } 4911 4912 Parser.Lex(); 4913 return false; 4914} 4915 4916/// parseDirectiveThumb 4917/// ::= .thumb 4918bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4919 if (getLexer().isNot(AsmToken::EndOfStatement)) 4920 return Error(L, "unexpected token in directive"); 4921 Parser.Lex(); 4922 4923 // TODO: set thumb mode 4924 // TODO: tell the MC streamer the mode 4925 // getParser().getStreamer().Emit???(); 4926 return false; 4927} 4928 4929/// parseDirectiveThumbFunc 4930/// ::= .thumbfunc symbol_name 4931bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4932 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4933 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4934 StringRef Name; 4935 4936 // Darwin asm has function name after .thumb_func direction 4937 // ELF doesn't 4938 if (isMachO) { 4939 const AsmToken &Tok = Parser.getTok(); 4940 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4941 return Error(L, "unexpected token in .thumb_func directive"); 4942 Name = Tok.getString(); 4943 Parser.Lex(); // Consume the identifier token. 4944 } 4945 4946 if (getLexer().isNot(AsmToken::EndOfStatement)) 4947 return Error(L, "unexpected token in directive"); 4948 Parser.Lex(); 4949 4950 // FIXME: assuming function name will be the line following .thumb_func 4951 if (!isMachO) { 4952 Name = Parser.getTok().getString(); 4953 } 4954 4955 // Mark symbol as a thumb symbol. 4956 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4957 getParser().getStreamer().EmitThumbFunc(Func); 4958 return false; 4959} 4960 4961/// parseDirectiveSyntax 4962/// ::= .syntax unified | divided 4963bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4964 const AsmToken &Tok = Parser.getTok(); 4965 if (Tok.isNot(AsmToken::Identifier)) 4966 return Error(L, "unexpected token in .syntax directive"); 4967 StringRef Mode = Tok.getString(); 4968 if (Mode == "unified" || Mode == "UNIFIED") 4969 Parser.Lex(); 4970 else if (Mode == "divided" || Mode == "DIVIDED") 4971 return Error(L, "'.syntax divided' arm asssembly not supported"); 4972 else 4973 return Error(L, "unrecognized syntax mode in .syntax directive"); 4974 4975 if (getLexer().isNot(AsmToken::EndOfStatement)) 4976 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4977 Parser.Lex(); 4978 4979 // TODO tell the MC streamer the mode 4980 // getParser().getStreamer().Emit???(); 4981 return false; 4982} 4983 4984/// parseDirectiveCode 4985/// ::= .code 16 | 32 4986bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4987 const AsmToken &Tok = Parser.getTok(); 4988 if (Tok.isNot(AsmToken::Integer)) 4989 return Error(L, "unexpected token in .code directive"); 4990 int64_t Val = Parser.getTok().getIntVal(); 4991 if (Val == 16) 4992 Parser.Lex(); 4993 else if (Val == 32) 4994 Parser.Lex(); 4995 else 4996 return Error(L, "invalid operand to .code directive"); 4997 4998 if (getLexer().isNot(AsmToken::EndOfStatement)) 4999 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5000 Parser.Lex(); 5001 5002 if (Val == 16) { 5003 if (!isThumb()) 5004 SwitchMode(); 5005 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5006 } else { 5007 if (isThumb()) 5008 SwitchMode(); 5009 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5010 } 5011 5012 return false; 5013} 5014 5015extern "C" void LLVMInitializeARMAsmLexer(); 5016 5017/// Force static initialization. 5018extern "C" void LLVMInitializeARMAsmParser() { 5019 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 5020 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 5021 LLVMInitializeARMAsmLexer(); 5022} 5023 5024#define GET_REGISTER_MATCHER 5025#define GET_MATCHER_IMPLEMENTATION 5026#include "ARMGenAsmMatcher.inc" 5027