ARMAsmParser.cpp revision c3937b97c00a857dff3528895e71ecfbc7ff3a28
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42class ARMAsmParser : public MCTargetAsmParser { 43 MCSubtargetInfo &STI; 44 MCAsmParser &Parser; 45 46 struct { 47 ARMCC::CondCodes Cond; // Condition for IT block. 48 unsigned Mask:4; // Condition mask for instructions. 49 // Starting at first 1 (from lsb). 50 // '1' condition as indicated in IT. 51 // '0' inverse of condition (else). 52 // Count of instructions in IT block is 53 // 4 - trailingzeroes(mask) 54 55 bool FirstCond; // Explicit flag for when we're parsing the 56 // First instruction in the IT block. It's 57 // implied in the mask, so needs special 58 // handling. 59 60 unsigned CurPosition; // Current position in parsing of IT 61 // block. In range [0,3]. Initialized 62 // according to count of instructions in block. 63 // ~0U if no active IT block. 64 } ITState; 65 bool inITBlock() { return ITState.CurPosition != ~0U;} 66 void forwardITPosition() { 67 if (!inITBlock()) return; 68 // Move to the next instruction in the IT block, if there is one. If not, 69 // mark the block as done. 70 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 71 if (++ITState.CurPosition == 5 - TZ) 72 ITState.CurPosition = ~0U; // Done with the IT block after this. 73 } 74 75 76 MCAsmParser &getParser() const { return Parser; } 77 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 78 79 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 80 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 81 82 int tryParseRegister(); 83 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 84 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 85 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 88 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 89 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 90 unsigned &ShiftAmount); 91 bool parseDirectiveWord(unsigned Size, SMLoc L); 92 bool parseDirectiveThumb(SMLoc L); 93 bool parseDirectiveThumbFunc(SMLoc L); 94 bool parseDirectiveCode(SMLoc L); 95 bool parseDirectiveSyntax(SMLoc L); 96 97 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 98 bool &CarrySetting, unsigned &ProcessorIMod, 99 StringRef &ITMask); 100 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 101 bool &CanAcceptPredicationCode); 102 103 bool isThumb() const { 104 // FIXME: Can tablegen auto-generate this? 105 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 106 } 107 bool isThumbOne() const { 108 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 109 } 110 bool isThumbTwo() const { 111 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 112 } 113 bool hasV6Ops() const { 114 return STI.getFeatureBits() & ARM::HasV6Ops; 115 } 116 bool hasV7Ops() const { 117 return STI.getFeatureBits() & ARM::HasV7Ops; 118 } 119 void SwitchMode() { 120 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 121 setAvailableFeatures(FB); 122 } 123 bool isMClass() const { 124 return STI.getFeatureBits() & ARM::FeatureMClass; 125 } 126 127 /// @name Auto-generated Match Functions 128 /// { 129 130#define GET_ASSEMBLER_HEADER 131#include "ARMGenAsmMatcher.inc" 132 133 /// } 134 135 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 136 OperandMatchResultTy parseCoprocNumOperand( 137 SmallVectorImpl<MCParsedAsmOperand*>&); 138 OperandMatchResultTy parseCoprocRegOperand( 139 SmallVectorImpl<MCParsedAsmOperand*>&); 140 OperandMatchResultTy parseCoprocOptionOperand( 141 SmallVectorImpl<MCParsedAsmOperand*>&); 142 OperandMatchResultTy parseMemBarrierOptOperand( 143 SmallVectorImpl<MCParsedAsmOperand*>&); 144 OperandMatchResultTy parseProcIFlagsOperand( 145 SmallVectorImpl<MCParsedAsmOperand*>&); 146 OperandMatchResultTy parseMSRMaskOperand( 147 SmallVectorImpl<MCParsedAsmOperand*>&); 148 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 149 StringRef Op, int Low, int High); 150 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 151 return parsePKHImm(O, "lsl", 0, 31); 152 } 153 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 154 return parsePKHImm(O, "asr", 1, 32); 155 } 156 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 157 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 164 165 // Asm Match Converter Methods 166 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 167 const SmallVectorImpl<MCParsedAsmOperand*> &); 168 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 169 const SmallVectorImpl<MCParsedAsmOperand*> &); 170 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 209 bool validateInstruction(MCInst &Inst, 210 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 211 bool processInstruction(MCInst &Inst, 212 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 213 bool shouldOmitCCOutOperand(StringRef Mnemonic, 214 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 215 216public: 217 enum ARMMatchResultTy { 218 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 219 Match_RequiresNotITBlock, 220 Match_RequiresV6, 221 Match_RequiresThumb2 222 }; 223 224 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 225 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 226 MCAsmParserExtension::Initialize(_Parser); 227 228 // Initialize the set of available features. 229 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 230 231 // Not in an ITBlock to start with. 232 ITState.CurPosition = ~0U; 233 } 234 235 // Implementation of the MCTargetAsmParser interface: 236 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 237 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 238 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 239 bool ParseDirective(AsmToken DirectiveID); 240 241 unsigned checkTargetMatchPredicate(MCInst &Inst); 242 243 bool MatchAndEmitInstruction(SMLoc IDLoc, 244 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 245 MCStreamer &Out); 246}; 247} // end anonymous namespace 248 249namespace { 250 251/// ARMOperand - Instances of this class represent a parsed ARM machine 252/// instruction. 253class ARMOperand : public MCParsedAsmOperand { 254 enum KindTy { 255 k_CondCode, 256 k_CCOut, 257 k_ITCondMask, 258 k_CoprocNum, 259 k_CoprocReg, 260 k_CoprocOption, 261 k_Immediate, 262 k_FPImmediate, 263 k_MemBarrierOpt, 264 k_Memory, 265 k_PostIndexRegister, 266 k_MSRMask, 267 k_ProcIFlags, 268 k_VectorIndex, 269 k_Register, 270 k_RegisterList, 271 k_DPRRegisterList, 272 k_SPRRegisterList, 273 k_VectorList, 274 k_ShiftedRegister, 275 k_ShiftedImmediate, 276 k_ShifterImmediate, 277 k_RotateImmediate, 278 k_BitfieldDescriptor, 279 k_Token 280 } Kind; 281 282 SMLoc StartLoc, EndLoc; 283 SmallVector<unsigned, 8> Registers; 284 285 union { 286 struct { 287 ARMCC::CondCodes Val; 288 } CC; 289 290 struct { 291 unsigned Val; 292 } Cop; 293 294 struct { 295 unsigned Val; 296 } CoprocOption; 297 298 struct { 299 unsigned Mask:4; 300 } ITMask; 301 302 struct { 303 ARM_MB::MemBOpt Val; 304 } MBOpt; 305 306 struct { 307 ARM_PROC::IFlags Val; 308 } IFlags; 309 310 struct { 311 unsigned Val; 312 } MMask; 313 314 struct { 315 const char *Data; 316 unsigned Length; 317 } Tok; 318 319 struct { 320 unsigned RegNum; 321 } Reg; 322 323 // A vector register list is a sequential list of 1 to 4 registers. 324 struct { 325 unsigned RegNum; 326 unsigned Count; 327 } VectorList; 328 329 struct { 330 unsigned Val; 331 } VectorIndex; 332 333 struct { 334 const MCExpr *Val; 335 } Imm; 336 337 struct { 338 unsigned Val; // encoded 8-bit representation 339 } FPImm; 340 341 /// Combined record for all forms of ARM address expressions. 342 struct { 343 unsigned BaseRegNum; 344 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 345 // was specified. 346 const MCConstantExpr *OffsetImm; // Offset immediate value 347 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 348 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 349 unsigned ShiftImm; // shift for OffsetReg. 350 unsigned Alignment; // 0 = no alignment specified 351 // n = alignment in bytes (8, 16, or 32) 352 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 353 } Memory; 354 355 struct { 356 unsigned RegNum; 357 bool isAdd; 358 ARM_AM::ShiftOpc ShiftTy; 359 unsigned ShiftImm; 360 } PostIdxReg; 361 362 struct { 363 bool isASR; 364 unsigned Imm; 365 } ShifterImm; 366 struct { 367 ARM_AM::ShiftOpc ShiftTy; 368 unsigned SrcReg; 369 unsigned ShiftReg; 370 unsigned ShiftImm; 371 } RegShiftedReg; 372 struct { 373 ARM_AM::ShiftOpc ShiftTy; 374 unsigned SrcReg; 375 unsigned ShiftImm; 376 } RegShiftedImm; 377 struct { 378 unsigned Imm; 379 } RotImm; 380 struct { 381 unsigned LSB; 382 unsigned Width; 383 } Bitfield; 384 }; 385 386 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 387public: 388 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 389 Kind = o.Kind; 390 StartLoc = o.StartLoc; 391 EndLoc = o.EndLoc; 392 switch (Kind) { 393 case k_CondCode: 394 CC = o.CC; 395 break; 396 case k_ITCondMask: 397 ITMask = o.ITMask; 398 break; 399 case k_Token: 400 Tok = o.Tok; 401 break; 402 case k_CCOut: 403 case k_Register: 404 Reg = o.Reg; 405 break; 406 case k_RegisterList: 407 case k_DPRRegisterList: 408 case k_SPRRegisterList: 409 Registers = o.Registers; 410 break; 411 case k_VectorList: 412 VectorList = o.VectorList; 413 break; 414 case k_CoprocNum: 415 case k_CoprocReg: 416 Cop = o.Cop; 417 break; 418 case k_CoprocOption: 419 CoprocOption = o.CoprocOption; 420 break; 421 case k_Immediate: 422 Imm = o.Imm; 423 break; 424 case k_FPImmediate: 425 FPImm = o.FPImm; 426 break; 427 case k_MemBarrierOpt: 428 MBOpt = o.MBOpt; 429 break; 430 case k_Memory: 431 Memory = o.Memory; 432 break; 433 case k_PostIndexRegister: 434 PostIdxReg = o.PostIdxReg; 435 break; 436 case k_MSRMask: 437 MMask = o.MMask; 438 break; 439 case k_ProcIFlags: 440 IFlags = o.IFlags; 441 break; 442 case k_ShifterImmediate: 443 ShifterImm = o.ShifterImm; 444 break; 445 case k_ShiftedRegister: 446 RegShiftedReg = o.RegShiftedReg; 447 break; 448 case k_ShiftedImmediate: 449 RegShiftedImm = o.RegShiftedImm; 450 break; 451 case k_RotateImmediate: 452 RotImm = o.RotImm; 453 break; 454 case k_BitfieldDescriptor: 455 Bitfield = o.Bitfield; 456 break; 457 case k_VectorIndex: 458 VectorIndex = o.VectorIndex; 459 break; 460 } 461 } 462 463 /// getStartLoc - Get the location of the first token of this operand. 464 SMLoc getStartLoc() const { return StartLoc; } 465 /// getEndLoc - Get the location of the last token of this operand. 466 SMLoc getEndLoc() const { return EndLoc; } 467 468 ARMCC::CondCodes getCondCode() const { 469 assert(Kind == k_CondCode && "Invalid access!"); 470 return CC.Val; 471 } 472 473 unsigned getCoproc() const { 474 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 475 return Cop.Val; 476 } 477 478 StringRef getToken() const { 479 assert(Kind == k_Token && "Invalid access!"); 480 return StringRef(Tok.Data, Tok.Length); 481 } 482 483 unsigned getReg() const { 484 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 485 return Reg.RegNum; 486 } 487 488 const SmallVectorImpl<unsigned> &getRegList() const { 489 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 490 Kind == k_SPRRegisterList) && "Invalid access!"); 491 return Registers; 492 } 493 494 const MCExpr *getImm() const { 495 assert(Kind == k_Immediate && "Invalid access!"); 496 return Imm.Val; 497 } 498 499 unsigned getFPImm() const { 500 assert(Kind == k_FPImmediate && "Invalid access!"); 501 return FPImm.Val; 502 } 503 504 unsigned getVectorIndex() const { 505 assert(Kind == k_VectorIndex && "Invalid access!"); 506 return VectorIndex.Val; 507 } 508 509 ARM_MB::MemBOpt getMemBarrierOpt() const { 510 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 511 return MBOpt.Val; 512 } 513 514 ARM_PROC::IFlags getProcIFlags() const { 515 assert(Kind == k_ProcIFlags && "Invalid access!"); 516 return IFlags.Val; 517 } 518 519 unsigned getMSRMask() const { 520 assert(Kind == k_MSRMask && "Invalid access!"); 521 return MMask.Val; 522 } 523 524 bool isCoprocNum() const { return Kind == k_CoprocNum; } 525 bool isCoprocReg() const { return Kind == k_CoprocReg; } 526 bool isCoprocOption() const { return Kind == k_CoprocOption; } 527 bool isCondCode() const { return Kind == k_CondCode; } 528 bool isCCOut() const { return Kind == k_CCOut; } 529 bool isITMask() const { return Kind == k_ITCondMask; } 530 bool isITCondCode() const { return Kind == k_CondCode; } 531 bool isImm() const { return Kind == k_Immediate; } 532 bool isFPImm() const { return Kind == k_FPImmediate; } 533 bool isImm8s4() const { 534 if (Kind != k_Immediate) 535 return false; 536 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 537 if (!CE) return false; 538 int64_t Value = CE->getValue(); 539 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 540 } 541 bool isImm0_1020s4() const { 542 if (Kind != k_Immediate) 543 return false; 544 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 545 if (!CE) return false; 546 int64_t Value = CE->getValue(); 547 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 548 } 549 bool isImm0_508s4() const { 550 if (Kind != k_Immediate) 551 return false; 552 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 553 if (!CE) return false; 554 int64_t Value = CE->getValue(); 555 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 556 } 557 bool isImm0_255() const { 558 if (Kind != k_Immediate) 559 return false; 560 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 561 if (!CE) return false; 562 int64_t Value = CE->getValue(); 563 return Value >= 0 && Value < 256; 564 } 565 bool isImm0_7() const { 566 if (Kind != k_Immediate) 567 return false; 568 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 569 if (!CE) return false; 570 int64_t Value = CE->getValue(); 571 return Value >= 0 && Value < 8; 572 } 573 bool isImm0_15() const { 574 if (Kind != k_Immediate) 575 return false; 576 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 577 if (!CE) return false; 578 int64_t Value = CE->getValue(); 579 return Value >= 0 && Value < 16; 580 } 581 bool isImm0_31() const { 582 if (Kind != k_Immediate) 583 return false; 584 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 585 if (!CE) return false; 586 int64_t Value = CE->getValue(); 587 return Value >= 0 && Value < 32; 588 } 589 bool isImm1_16() const { 590 if (Kind != k_Immediate) 591 return false; 592 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 593 if (!CE) return false; 594 int64_t Value = CE->getValue(); 595 return Value > 0 && Value < 17; 596 } 597 bool isImm1_32() const { 598 if (Kind != k_Immediate) 599 return false; 600 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 601 if (!CE) return false; 602 int64_t Value = CE->getValue(); 603 return Value > 0 && Value < 33; 604 } 605 bool isImm0_32() const { 606 if (Kind != k_Immediate) 607 return false; 608 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 609 if (!CE) return false; 610 int64_t Value = CE->getValue(); 611 return Value >= 0 && Value < 33; 612 } 613 bool isImm0_65535() const { 614 if (Kind != k_Immediate) 615 return false; 616 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 617 if (!CE) return false; 618 int64_t Value = CE->getValue(); 619 return Value >= 0 && Value < 65536; 620 } 621 bool isImm0_65535Expr() const { 622 if (Kind != k_Immediate) 623 return false; 624 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 625 // If it's not a constant expression, it'll generate a fixup and be 626 // handled later. 627 if (!CE) return true; 628 int64_t Value = CE->getValue(); 629 return Value >= 0 && Value < 65536; 630 } 631 bool isImm24bit() const { 632 if (Kind != k_Immediate) 633 return false; 634 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 635 if (!CE) return false; 636 int64_t Value = CE->getValue(); 637 return Value >= 0 && Value <= 0xffffff; 638 } 639 bool isImmThumbSR() const { 640 if (Kind != k_Immediate) 641 return false; 642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 643 if (!CE) return false; 644 int64_t Value = CE->getValue(); 645 return Value > 0 && Value < 33; 646 } 647 bool isPKHLSLImm() const { 648 if (Kind != k_Immediate) 649 return false; 650 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 651 if (!CE) return false; 652 int64_t Value = CE->getValue(); 653 return Value >= 0 && Value < 32; 654 } 655 bool isPKHASRImm() const { 656 if (Kind != k_Immediate) 657 return false; 658 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 659 if (!CE) return false; 660 int64_t Value = CE->getValue(); 661 return Value > 0 && Value <= 32; 662 } 663 bool isARMSOImm() const { 664 if (Kind != k_Immediate) 665 return false; 666 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 667 if (!CE) return false; 668 int64_t Value = CE->getValue(); 669 return ARM_AM::getSOImmVal(Value) != -1; 670 } 671 bool isARMSOImmNot() const { 672 if (Kind != k_Immediate) 673 return false; 674 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 675 if (!CE) return false; 676 int64_t Value = CE->getValue(); 677 return ARM_AM::getSOImmVal(~Value) != -1; 678 } 679 bool isT2SOImm() const { 680 if (Kind != k_Immediate) 681 return false; 682 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 683 if (!CE) return false; 684 int64_t Value = CE->getValue(); 685 return ARM_AM::getT2SOImmVal(Value) != -1; 686 } 687 bool isT2SOImmNot() const { 688 if (Kind != k_Immediate) 689 return false; 690 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 691 if (!CE) return false; 692 int64_t Value = CE->getValue(); 693 return ARM_AM::getT2SOImmVal(~Value) != -1; 694 } 695 bool isSetEndImm() const { 696 if (Kind != k_Immediate) 697 return false; 698 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 699 if (!CE) return false; 700 int64_t Value = CE->getValue(); 701 return Value == 1 || Value == 0; 702 } 703 bool isReg() const { return Kind == k_Register; } 704 bool isRegList() const { return Kind == k_RegisterList; } 705 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 706 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 707 bool isToken() const { return Kind == k_Token; } 708 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 709 bool isMemory() const { return Kind == k_Memory; } 710 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 711 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 712 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 713 bool isRotImm() const { return Kind == k_RotateImmediate; } 714 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 715 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 716 bool isPostIdxReg() const { 717 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 718 } 719 bool isMemNoOffset(bool alignOK = false) const { 720 if (!isMemory()) 721 return false; 722 // No offset of any kind. 723 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 724 (alignOK || Memory.Alignment == 0); 725 } 726 bool isAlignedMemory() const { 727 return isMemNoOffset(true); 728 } 729 bool isAddrMode2() const { 730 if (!isMemory() || Memory.Alignment != 0) return false; 731 // Check for register offset. 732 if (Memory.OffsetRegNum) return true; 733 // Immediate offset in range [-4095, 4095]. 734 if (!Memory.OffsetImm) return true; 735 int64_t Val = Memory.OffsetImm->getValue(); 736 return Val > -4096 && Val < 4096; 737 } 738 bool isAM2OffsetImm() const { 739 if (Kind != k_Immediate) 740 return false; 741 // Immediate offset in range [-4095, 4095]. 742 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 743 if (!CE) return false; 744 int64_t Val = CE->getValue(); 745 return Val > -4096 && Val < 4096; 746 } 747 bool isAddrMode3() const { 748 if (!isMemory() || Memory.Alignment != 0) return false; 749 // No shifts are legal for AM3. 750 if (Memory.ShiftType != ARM_AM::no_shift) return false; 751 // Check for register offset. 752 if (Memory.OffsetRegNum) return true; 753 // Immediate offset in range [-255, 255]. 754 if (!Memory.OffsetImm) return true; 755 int64_t Val = Memory.OffsetImm->getValue(); 756 return Val > -256 && Val < 256; 757 } 758 bool isAM3Offset() const { 759 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 760 return false; 761 if (Kind == k_PostIndexRegister) 762 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 763 // Immediate offset in range [-255, 255]. 764 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 765 if (!CE) return false; 766 int64_t Val = CE->getValue(); 767 // Special case, #-0 is INT32_MIN. 768 return (Val > -256 && Val < 256) || Val == INT32_MIN; 769 } 770 bool isAddrMode5() const { 771 // If we have an immediate that's not a constant, treat it as a label 772 // reference needing a fixup. If it is a constant, it's something else 773 // and we reject it. 774 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 775 return true; 776 if (!isMemory() || Memory.Alignment != 0) return false; 777 // Check for register offset. 778 if (Memory.OffsetRegNum) return false; 779 // Immediate offset in range [-1020, 1020] and a multiple of 4. 780 if (!Memory.OffsetImm) return true; 781 int64_t Val = Memory.OffsetImm->getValue(); 782 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 783 Val == INT32_MIN; 784 } 785 bool isMemTBB() const { 786 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 787 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 788 return false; 789 return true; 790 } 791 bool isMemTBH() const { 792 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 793 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 794 Memory.Alignment != 0 ) 795 return false; 796 return true; 797 } 798 bool isMemRegOffset() const { 799 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 800 return false; 801 return true; 802 } 803 bool isT2MemRegOffset() const { 804 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 805 Memory.Alignment != 0) 806 return false; 807 // Only lsl #{0, 1, 2, 3} allowed. 808 if (Memory.ShiftType == ARM_AM::no_shift) 809 return true; 810 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 811 return false; 812 return true; 813 } 814 bool isMemThumbRR() const { 815 // Thumb reg+reg addressing is simple. Just two registers, a base and 816 // an offset. No shifts, negations or any other complicating factors. 817 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 818 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 819 return false; 820 return isARMLowRegister(Memory.BaseRegNum) && 821 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 822 } 823 bool isMemThumbRIs4() const { 824 if (!isMemory() || Memory.OffsetRegNum != 0 || 825 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 826 return false; 827 // Immediate offset, multiple of 4 in range [0, 124]. 828 if (!Memory.OffsetImm) return true; 829 int64_t Val = Memory.OffsetImm->getValue(); 830 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 831 } 832 bool isMemThumbRIs2() const { 833 if (!isMemory() || Memory.OffsetRegNum != 0 || 834 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 835 return false; 836 // Immediate offset, multiple of 4 in range [0, 62]. 837 if (!Memory.OffsetImm) return true; 838 int64_t Val = Memory.OffsetImm->getValue(); 839 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 840 } 841 bool isMemThumbRIs1() const { 842 if (!isMemory() || Memory.OffsetRegNum != 0 || 843 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 844 return false; 845 // Immediate offset in range [0, 31]. 846 if (!Memory.OffsetImm) return true; 847 int64_t Val = Memory.OffsetImm->getValue(); 848 return Val >= 0 && Val <= 31; 849 } 850 bool isMemThumbSPI() const { 851 if (!isMemory() || Memory.OffsetRegNum != 0 || 852 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 853 return false; 854 // Immediate offset, multiple of 4 in range [0, 1020]. 855 if (!Memory.OffsetImm) return true; 856 int64_t Val = Memory.OffsetImm->getValue(); 857 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 858 } 859 bool isMemImm8s4Offset() const { 860 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 861 return false; 862 // Immediate offset a multiple of 4 in range [-1020, 1020]. 863 if (!Memory.OffsetImm) return true; 864 int64_t Val = Memory.OffsetImm->getValue(); 865 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 866 } 867 bool isMemImm0_1020s4Offset() const { 868 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 869 return false; 870 // Immediate offset a multiple of 4 in range [0, 1020]. 871 if (!Memory.OffsetImm) return true; 872 int64_t Val = Memory.OffsetImm->getValue(); 873 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 874 } 875 bool isMemImm8Offset() const { 876 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 877 return false; 878 // Immediate offset in range [-255, 255]. 879 if (!Memory.OffsetImm) return true; 880 int64_t Val = Memory.OffsetImm->getValue(); 881 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 882 } 883 bool isMemPosImm8Offset() const { 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [0, 255]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return Val >= 0 && Val < 256; 890 } 891 bool isMemNegImm8Offset() const { 892 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 893 return false; 894 // Immediate offset in range [-255, -1]. 895 if (!Memory.OffsetImm) return true; 896 int64_t Val = Memory.OffsetImm->getValue(); 897 return Val > -256 && Val < 0; 898 } 899 bool isMemUImm12Offset() const { 900 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 901 return false; 902 // Immediate offset in range [0, 4095]. 903 if (!Memory.OffsetImm) return true; 904 int64_t Val = Memory.OffsetImm->getValue(); 905 return (Val >= 0 && Val < 4096); 906 } 907 bool isMemImm12Offset() const { 908 // If we have an immediate that's not a constant, treat it as a label 909 // reference needing a fixup. If it is a constant, it's something else 910 // and we reject it. 911 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 912 return true; 913 914 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 915 return false; 916 // Immediate offset in range [-4095, 4095]. 917 if (!Memory.OffsetImm) return true; 918 int64_t Val = Memory.OffsetImm->getValue(); 919 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 920 } 921 bool isPostIdxImm8() const { 922 if (Kind != k_Immediate) 923 return false; 924 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 925 if (!CE) return false; 926 int64_t Val = CE->getValue(); 927 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 928 } 929 bool isPostIdxImm8s4() const { 930 if (Kind != k_Immediate) 931 return false; 932 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 933 if (!CE) return false; 934 int64_t Val = CE->getValue(); 935 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 936 (Val == INT32_MIN); 937 } 938 939 bool isMSRMask() const { return Kind == k_MSRMask; } 940 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 941 942 // NEON operands. 943 bool isVecListOneD() const { 944 if (Kind != k_VectorList) return false; 945 return VectorList.Count == 1; 946 } 947 948 bool isVecListTwoD() const { 949 if (Kind != k_VectorList) return false; 950 return VectorList.Count == 2; 951 } 952 953 bool isVecListThreeD() const { 954 if (Kind != k_VectorList) return false; 955 return VectorList.Count == 3; 956 } 957 958 bool isVecListFourD() const { 959 if (Kind != k_VectorList) return false; 960 return VectorList.Count == 4; 961 } 962 963 bool isVecListTwoQ() const { 964 if (Kind != k_VectorList) return false; 965 //FIXME: We haven't taught the parser to handle by-two register lists 966 // yet, so don't pretend to know one. 967 return VectorList.Count == 2 && false; 968 } 969 970 bool isVectorIndex8() const { 971 if (Kind != k_VectorIndex) return false; 972 return VectorIndex.Val < 8; 973 } 974 bool isVectorIndex16() const { 975 if (Kind != k_VectorIndex) return false; 976 return VectorIndex.Val < 4; 977 } 978 bool isVectorIndex32() const { 979 if (Kind != k_VectorIndex) return false; 980 return VectorIndex.Val < 2; 981 } 982 983 bool isNEONi8splat() const { 984 if (Kind != k_Immediate) 985 return false; 986 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 987 // Must be a constant. 988 if (!CE) return false; 989 int64_t Value = CE->getValue(); 990 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 991 // value. 992 return Value >= 0 && Value < 256; 993 } 994 995 bool isNEONi16splat() const { 996 if (Kind != k_Immediate) 997 return false; 998 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 999 // Must be a constant. 1000 if (!CE) return false; 1001 int64_t Value = CE->getValue(); 1002 // i16 value in the range [0,255] or [0x0100, 0xff00] 1003 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1004 } 1005 1006 bool isNEONi32splat() const { 1007 if (Kind != k_Immediate) 1008 return false; 1009 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1010 // Must be a constant. 1011 if (!CE) return false; 1012 int64_t Value = CE->getValue(); 1013 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1014 return (Value >= 0 && Value < 256) || 1015 (Value >= 0x0100 && Value <= 0xff00) || 1016 (Value >= 0x010000 && Value <= 0xff0000) || 1017 (Value >= 0x01000000 && Value <= 0xff000000); 1018 } 1019 1020 bool isNEONi32vmov() const { 1021 if (Kind != k_Immediate) 1022 return false; 1023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1024 // Must be a constant. 1025 if (!CE) return false; 1026 int64_t Value = CE->getValue(); 1027 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1028 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1029 return (Value >= 0 && Value < 256) || 1030 (Value >= 0x0100 && Value <= 0xff00) || 1031 (Value >= 0x010000 && Value <= 0xff0000) || 1032 (Value >= 0x01000000 && Value <= 0xff000000) || 1033 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1034 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1035 } 1036 1037 bool isNEONi64splat() const { 1038 if (Kind != k_Immediate) 1039 return false; 1040 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1041 // Must be a constant. 1042 if (!CE) return false; 1043 uint64_t Value = CE->getValue(); 1044 // i64 value with each byte being either 0 or 0xff. 1045 for (unsigned i = 0; i < 8; ++i) 1046 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1047 return true; 1048 } 1049 1050 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1051 // Add as immediates when possible. Null MCExpr = 0. 1052 if (Expr == 0) 1053 Inst.addOperand(MCOperand::CreateImm(0)); 1054 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1055 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1056 else 1057 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1058 } 1059 1060 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1061 assert(N == 2 && "Invalid number of operands!"); 1062 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1063 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1064 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1065 } 1066 1067 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1068 assert(N == 1 && "Invalid number of operands!"); 1069 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1070 } 1071 1072 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1073 assert(N == 1 && "Invalid number of operands!"); 1074 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1075 } 1076 1077 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1078 assert(N == 1 && "Invalid number of operands!"); 1079 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1080 } 1081 1082 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1083 assert(N == 1 && "Invalid number of operands!"); 1084 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1085 } 1086 1087 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1088 assert(N == 1 && "Invalid number of operands!"); 1089 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1090 } 1091 1092 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1093 assert(N == 1 && "Invalid number of operands!"); 1094 Inst.addOperand(MCOperand::CreateReg(getReg())); 1095 } 1096 1097 void addRegOperands(MCInst &Inst, unsigned N) const { 1098 assert(N == 1 && "Invalid number of operands!"); 1099 Inst.addOperand(MCOperand::CreateReg(getReg())); 1100 } 1101 1102 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1103 assert(N == 3 && "Invalid number of operands!"); 1104 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1105 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1106 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1107 Inst.addOperand(MCOperand::CreateImm( 1108 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1109 } 1110 1111 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1112 assert(N == 2 && "Invalid number of operands!"); 1113 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1114 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1115 Inst.addOperand(MCOperand::CreateImm( 1116 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1117 } 1118 1119 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1120 assert(N == 1 && "Invalid number of operands!"); 1121 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1122 ShifterImm.Imm)); 1123 } 1124 1125 void addRegListOperands(MCInst &Inst, unsigned N) const { 1126 assert(N == 1 && "Invalid number of operands!"); 1127 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1128 for (SmallVectorImpl<unsigned>::const_iterator 1129 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1130 Inst.addOperand(MCOperand::CreateReg(*I)); 1131 } 1132 1133 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1134 addRegListOperands(Inst, N); 1135 } 1136 1137 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1138 addRegListOperands(Inst, N); 1139 } 1140 1141 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1142 assert(N == 1 && "Invalid number of operands!"); 1143 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1144 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1145 } 1146 1147 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1148 assert(N == 1 && "Invalid number of operands!"); 1149 // Munge the lsb/width into a bitfield mask. 1150 unsigned lsb = Bitfield.LSB; 1151 unsigned width = Bitfield.Width; 1152 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1153 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1154 (32 - (lsb + width))); 1155 Inst.addOperand(MCOperand::CreateImm(Mask)); 1156 } 1157 1158 void addImmOperands(MCInst &Inst, unsigned N) const { 1159 assert(N == 1 && "Invalid number of operands!"); 1160 addExpr(Inst, getImm()); 1161 } 1162 1163 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1164 assert(N == 1 && "Invalid number of operands!"); 1165 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1166 } 1167 1168 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1169 assert(N == 1 && "Invalid number of operands!"); 1170 // FIXME: We really want to scale the value here, but the LDRD/STRD 1171 // instruction don't encode operands that way yet. 1172 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1173 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1174 } 1175 1176 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1177 assert(N == 1 && "Invalid number of operands!"); 1178 // The immediate is scaled by four in the encoding and is stored 1179 // in the MCInst as such. Lop off the low two bits here. 1180 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1181 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1182 } 1183 1184 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1185 assert(N == 1 && "Invalid number of operands!"); 1186 // The immediate is scaled by four in the encoding and is stored 1187 // in the MCInst as such. Lop off the low two bits here. 1188 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1189 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1190 } 1191 1192 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1193 assert(N == 1 && "Invalid number of operands!"); 1194 addExpr(Inst, getImm()); 1195 } 1196 1197 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1198 assert(N == 1 && "Invalid number of operands!"); 1199 addExpr(Inst, getImm()); 1200 } 1201 1202 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1203 assert(N == 1 && "Invalid number of operands!"); 1204 addExpr(Inst, getImm()); 1205 } 1206 1207 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1208 assert(N == 1 && "Invalid number of operands!"); 1209 addExpr(Inst, getImm()); 1210 } 1211 1212 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1213 assert(N == 1 && "Invalid number of operands!"); 1214 // The constant encodes as the immediate-1, and we store in the instruction 1215 // the bits as encoded, so subtract off one here. 1216 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1217 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1218 } 1219 1220 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1221 assert(N == 1 && "Invalid number of operands!"); 1222 // The constant encodes as the immediate-1, and we store in the instruction 1223 // the bits as encoded, so subtract off one here. 1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1225 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1226 } 1227 1228 void addImm0_32Operands(MCInst &Inst, unsigned N) const { 1229 assert(N == 1 && "Invalid number of operands!"); 1230 addExpr(Inst, getImm()); 1231 } 1232 1233 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1234 assert(N == 1 && "Invalid number of operands!"); 1235 addExpr(Inst, getImm()); 1236 } 1237 1238 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1239 assert(N == 1 && "Invalid number of operands!"); 1240 addExpr(Inst, getImm()); 1241 } 1242 1243 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1244 assert(N == 1 && "Invalid number of operands!"); 1245 addExpr(Inst, getImm()); 1246 } 1247 1248 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1249 assert(N == 1 && "Invalid number of operands!"); 1250 // The constant encodes as the immediate, except for 32, which encodes as 1251 // zero. 1252 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1253 unsigned Imm = CE->getValue(); 1254 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1255 } 1256 1257 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1258 assert(N == 1 && "Invalid number of operands!"); 1259 addExpr(Inst, getImm()); 1260 } 1261 1262 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1263 assert(N == 1 && "Invalid number of operands!"); 1264 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1265 // the instruction as well. 1266 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1267 int Val = CE->getValue(); 1268 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1269 } 1270 1271 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1272 assert(N == 1 && "Invalid number of operands!"); 1273 addExpr(Inst, getImm()); 1274 } 1275 1276 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1277 assert(N == 1 && "Invalid number of operands!"); 1278 addExpr(Inst, getImm()); 1279 } 1280 1281 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1282 assert(N == 1 && "Invalid number of operands!"); 1283 // The operand is actually a t2_so_imm, but we have its bitwise 1284 // negation in the assembly source, so twiddle it here. 1285 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1286 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1287 } 1288 1289 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1290 assert(N == 1 && "Invalid number of operands!"); 1291 // The operand is actually a so_imm, but we have its bitwise 1292 // negation in the assembly source, so twiddle it here. 1293 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1294 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1295 } 1296 1297 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1298 assert(N == 1 && "Invalid number of operands!"); 1299 addExpr(Inst, getImm()); 1300 } 1301 1302 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1303 assert(N == 1 && "Invalid number of operands!"); 1304 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1305 } 1306 1307 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1308 assert(N == 1 && "Invalid number of operands!"); 1309 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1310 } 1311 1312 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1313 assert(N == 2 && "Invalid number of operands!"); 1314 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1315 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1316 } 1317 1318 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1319 assert(N == 3 && "Invalid number of operands!"); 1320 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1321 if (!Memory.OffsetRegNum) { 1322 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1323 // Special case for #-0 1324 if (Val == INT32_MIN) Val = 0; 1325 if (Val < 0) Val = -Val; 1326 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1327 } else { 1328 // For register offset, we encode the shift type and negation flag 1329 // here. 1330 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1331 Memory.ShiftImm, Memory.ShiftType); 1332 } 1333 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1334 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1335 Inst.addOperand(MCOperand::CreateImm(Val)); 1336 } 1337 1338 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1339 assert(N == 2 && "Invalid number of operands!"); 1340 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1341 assert(CE && "non-constant AM2OffsetImm operand!"); 1342 int32_t Val = CE->getValue(); 1343 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1344 // Special case for #-0 1345 if (Val == INT32_MIN) Val = 0; 1346 if (Val < 0) Val = -Val; 1347 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1348 Inst.addOperand(MCOperand::CreateReg(0)); 1349 Inst.addOperand(MCOperand::CreateImm(Val)); 1350 } 1351 1352 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1353 assert(N == 3 && "Invalid number of operands!"); 1354 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1355 if (!Memory.OffsetRegNum) { 1356 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1357 // Special case for #-0 1358 if (Val == INT32_MIN) Val = 0; 1359 if (Val < 0) Val = -Val; 1360 Val = ARM_AM::getAM3Opc(AddSub, Val); 1361 } else { 1362 // For register offset, we encode the shift type and negation flag 1363 // here. 1364 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1365 } 1366 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1367 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1368 Inst.addOperand(MCOperand::CreateImm(Val)); 1369 } 1370 1371 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1372 assert(N == 2 && "Invalid number of operands!"); 1373 if (Kind == k_PostIndexRegister) { 1374 int32_t Val = 1375 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1376 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1377 Inst.addOperand(MCOperand::CreateImm(Val)); 1378 return; 1379 } 1380 1381 // Constant offset. 1382 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1383 int32_t Val = CE->getValue(); 1384 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1385 // Special case for #-0 1386 if (Val == INT32_MIN) Val = 0; 1387 if (Val < 0) Val = -Val; 1388 Val = ARM_AM::getAM3Opc(AddSub, Val); 1389 Inst.addOperand(MCOperand::CreateReg(0)); 1390 Inst.addOperand(MCOperand::CreateImm(Val)); 1391 } 1392 1393 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1394 assert(N == 2 && "Invalid number of operands!"); 1395 // If we have an immediate that's not a constant, treat it as a label 1396 // reference needing a fixup. If it is a constant, it's something else 1397 // and we reject it. 1398 if (isImm()) { 1399 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1400 Inst.addOperand(MCOperand::CreateImm(0)); 1401 return; 1402 } 1403 1404 // The lower two bits are always zero and as such are not encoded. 1405 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1406 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1407 // Special case for #-0 1408 if (Val == INT32_MIN) Val = 0; 1409 if (Val < 0) Val = -Val; 1410 Val = ARM_AM::getAM5Opc(AddSub, Val); 1411 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1412 Inst.addOperand(MCOperand::CreateImm(Val)); 1413 } 1414 1415 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1416 assert(N == 2 && "Invalid number of operands!"); 1417 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1418 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1419 Inst.addOperand(MCOperand::CreateImm(Val)); 1420 } 1421 1422 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1423 assert(N == 2 && "Invalid number of operands!"); 1424 // The lower two bits are always zero and as such are not encoded. 1425 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1426 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1427 Inst.addOperand(MCOperand::CreateImm(Val)); 1428 } 1429 1430 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1431 assert(N == 2 && "Invalid number of operands!"); 1432 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1433 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1434 Inst.addOperand(MCOperand::CreateImm(Val)); 1435 } 1436 1437 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1438 addMemImm8OffsetOperands(Inst, N); 1439 } 1440 1441 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1442 addMemImm8OffsetOperands(Inst, N); 1443 } 1444 1445 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1446 assert(N == 2 && "Invalid number of operands!"); 1447 // If this is an immediate, it's a label reference. 1448 if (Kind == k_Immediate) { 1449 addExpr(Inst, getImm()); 1450 Inst.addOperand(MCOperand::CreateImm(0)); 1451 return; 1452 } 1453 1454 // Otherwise, it's a normal memory reg+offset. 1455 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1456 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1457 Inst.addOperand(MCOperand::CreateImm(Val)); 1458 } 1459 1460 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1461 assert(N == 2 && "Invalid number of operands!"); 1462 // If this is an immediate, it's a label reference. 1463 if (Kind == k_Immediate) { 1464 addExpr(Inst, getImm()); 1465 Inst.addOperand(MCOperand::CreateImm(0)); 1466 return; 1467 } 1468 1469 // Otherwise, it's a normal memory reg+offset. 1470 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1471 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1472 Inst.addOperand(MCOperand::CreateImm(Val)); 1473 } 1474 1475 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1476 assert(N == 2 && "Invalid number of operands!"); 1477 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1478 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1479 } 1480 1481 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1482 assert(N == 2 && "Invalid number of operands!"); 1483 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1484 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1485 } 1486 1487 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1488 assert(N == 3 && "Invalid number of operands!"); 1489 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1490 Memory.ShiftImm, Memory.ShiftType); 1491 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1492 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1493 Inst.addOperand(MCOperand::CreateImm(Val)); 1494 } 1495 1496 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1497 assert(N == 3 && "Invalid number of operands!"); 1498 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1499 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1500 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1501 } 1502 1503 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1504 assert(N == 2 && "Invalid number of operands!"); 1505 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1506 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1507 } 1508 1509 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1510 assert(N == 2 && "Invalid number of operands!"); 1511 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1512 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1513 Inst.addOperand(MCOperand::CreateImm(Val)); 1514 } 1515 1516 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1517 assert(N == 2 && "Invalid number of operands!"); 1518 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1519 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1520 Inst.addOperand(MCOperand::CreateImm(Val)); 1521 } 1522 1523 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1524 assert(N == 2 && "Invalid number of operands!"); 1525 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1526 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1527 Inst.addOperand(MCOperand::CreateImm(Val)); 1528 } 1529 1530 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1531 assert(N == 2 && "Invalid number of operands!"); 1532 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1533 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1534 Inst.addOperand(MCOperand::CreateImm(Val)); 1535 } 1536 1537 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1538 assert(N == 1 && "Invalid number of operands!"); 1539 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1540 assert(CE && "non-constant post-idx-imm8 operand!"); 1541 int Imm = CE->getValue(); 1542 bool isAdd = Imm >= 0; 1543 if (Imm == INT32_MIN) Imm = 0; 1544 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1545 Inst.addOperand(MCOperand::CreateImm(Imm)); 1546 } 1547 1548 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1549 assert(N == 1 && "Invalid number of operands!"); 1550 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1551 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1552 int Imm = CE->getValue(); 1553 bool isAdd = Imm >= 0; 1554 if (Imm == INT32_MIN) Imm = 0; 1555 // Immediate is scaled by 4. 1556 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1557 Inst.addOperand(MCOperand::CreateImm(Imm)); 1558 } 1559 1560 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1561 assert(N == 2 && "Invalid number of operands!"); 1562 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1563 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1564 } 1565 1566 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1567 assert(N == 2 && "Invalid number of operands!"); 1568 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1569 // The sign, shift type, and shift amount are encoded in a single operand 1570 // using the AM2 encoding helpers. 1571 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1572 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1573 PostIdxReg.ShiftTy); 1574 Inst.addOperand(MCOperand::CreateImm(Imm)); 1575 } 1576 1577 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1578 assert(N == 1 && "Invalid number of operands!"); 1579 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1580 } 1581 1582 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1583 assert(N == 1 && "Invalid number of operands!"); 1584 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1585 } 1586 1587 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1588 assert(N == 1 && "Invalid number of operands!"); 1589 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1590 } 1591 1592 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1593 assert(N == 1 && "Invalid number of operands!"); 1594 // Only the first register actually goes on the instruction. The rest 1595 // are implied by the opcode. 1596 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1597 } 1598 1599 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1600 assert(N == 1 && "Invalid number of operands!"); 1601 // Only the first register actually goes on the instruction. The rest 1602 // are implied by the opcode. 1603 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1604 } 1605 1606 void addVecListFourDOperands(MCInst &Inst, unsigned N) const { 1607 assert(N == 1 && "Invalid number of operands!"); 1608 // Only the first register actually goes on the instruction. The rest 1609 // are implied by the opcode. 1610 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1611 } 1612 1613 void addVecListTwoQOperands(MCInst &Inst, unsigned N) const { 1614 assert(N == 1 && "Invalid number of operands!"); 1615 // Only the first register actually goes on the instruction. The rest 1616 // are implied by the opcode. 1617 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1618 } 1619 1620 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1621 assert(N == 1 && "Invalid number of operands!"); 1622 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1623 } 1624 1625 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1626 assert(N == 1 && "Invalid number of operands!"); 1627 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1628 } 1629 1630 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1631 assert(N == 1 && "Invalid number of operands!"); 1632 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1633 } 1634 1635 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1636 assert(N == 1 && "Invalid number of operands!"); 1637 // The immediate encodes the type of constant as well as the value. 1638 // Mask in that this is an i8 splat. 1639 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1640 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1641 } 1642 1643 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1644 assert(N == 1 && "Invalid number of operands!"); 1645 // The immediate encodes the type of constant as well as the value. 1646 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1647 unsigned Value = CE->getValue(); 1648 if (Value >= 256) 1649 Value = (Value >> 8) | 0xa00; 1650 else 1651 Value |= 0x800; 1652 Inst.addOperand(MCOperand::CreateImm(Value)); 1653 } 1654 1655 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1656 assert(N == 1 && "Invalid number of operands!"); 1657 // The immediate encodes the type of constant as well as the value. 1658 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1659 unsigned Value = CE->getValue(); 1660 if (Value >= 256 && Value <= 0xff00) 1661 Value = (Value >> 8) | 0x200; 1662 else if (Value > 0xffff && Value <= 0xff0000) 1663 Value = (Value >> 16) | 0x400; 1664 else if (Value > 0xffffff) 1665 Value = (Value >> 24) | 0x600; 1666 Inst.addOperand(MCOperand::CreateImm(Value)); 1667 } 1668 1669 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1670 assert(N == 1 && "Invalid number of operands!"); 1671 // The immediate encodes the type of constant as well as the value. 1672 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1673 unsigned Value = CE->getValue(); 1674 if (Value >= 256 && Value <= 0xffff) 1675 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1676 else if (Value > 0xffff && Value <= 0xffffff) 1677 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1678 else if (Value > 0xffffff) 1679 Value = (Value >> 24) | 0x600; 1680 Inst.addOperand(MCOperand::CreateImm(Value)); 1681 } 1682 1683 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1684 assert(N == 1 && "Invalid number of operands!"); 1685 // The immediate encodes the type of constant as well as the value. 1686 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1687 uint64_t Value = CE->getValue(); 1688 unsigned Imm = 0; 1689 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1690 Imm |= (Value & 1) << i; 1691 } 1692 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1693 } 1694 1695 virtual void print(raw_ostream &OS) const; 1696 1697 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1698 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1699 Op->ITMask.Mask = Mask; 1700 Op->StartLoc = S; 1701 Op->EndLoc = S; 1702 return Op; 1703 } 1704 1705 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1706 ARMOperand *Op = new ARMOperand(k_CondCode); 1707 Op->CC.Val = CC; 1708 Op->StartLoc = S; 1709 Op->EndLoc = S; 1710 return Op; 1711 } 1712 1713 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1714 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1715 Op->Cop.Val = CopVal; 1716 Op->StartLoc = S; 1717 Op->EndLoc = S; 1718 return Op; 1719 } 1720 1721 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1722 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1723 Op->Cop.Val = CopVal; 1724 Op->StartLoc = S; 1725 Op->EndLoc = S; 1726 return Op; 1727 } 1728 1729 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1730 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1731 Op->Cop.Val = Val; 1732 Op->StartLoc = S; 1733 Op->EndLoc = E; 1734 return Op; 1735 } 1736 1737 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1738 ARMOperand *Op = new ARMOperand(k_CCOut); 1739 Op->Reg.RegNum = RegNum; 1740 Op->StartLoc = S; 1741 Op->EndLoc = S; 1742 return Op; 1743 } 1744 1745 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1746 ARMOperand *Op = new ARMOperand(k_Token); 1747 Op->Tok.Data = Str.data(); 1748 Op->Tok.Length = Str.size(); 1749 Op->StartLoc = S; 1750 Op->EndLoc = S; 1751 return Op; 1752 } 1753 1754 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1755 ARMOperand *Op = new ARMOperand(k_Register); 1756 Op->Reg.RegNum = RegNum; 1757 Op->StartLoc = S; 1758 Op->EndLoc = E; 1759 return Op; 1760 } 1761 1762 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1763 unsigned SrcReg, 1764 unsigned ShiftReg, 1765 unsigned ShiftImm, 1766 SMLoc S, SMLoc E) { 1767 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1768 Op->RegShiftedReg.ShiftTy = ShTy; 1769 Op->RegShiftedReg.SrcReg = SrcReg; 1770 Op->RegShiftedReg.ShiftReg = ShiftReg; 1771 Op->RegShiftedReg.ShiftImm = ShiftImm; 1772 Op->StartLoc = S; 1773 Op->EndLoc = E; 1774 return Op; 1775 } 1776 1777 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1778 unsigned SrcReg, 1779 unsigned ShiftImm, 1780 SMLoc S, SMLoc E) { 1781 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1782 Op->RegShiftedImm.ShiftTy = ShTy; 1783 Op->RegShiftedImm.SrcReg = SrcReg; 1784 Op->RegShiftedImm.ShiftImm = ShiftImm; 1785 Op->StartLoc = S; 1786 Op->EndLoc = E; 1787 return Op; 1788 } 1789 1790 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1791 SMLoc S, SMLoc E) { 1792 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1793 Op->ShifterImm.isASR = isASR; 1794 Op->ShifterImm.Imm = Imm; 1795 Op->StartLoc = S; 1796 Op->EndLoc = E; 1797 return Op; 1798 } 1799 1800 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1801 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1802 Op->RotImm.Imm = Imm; 1803 Op->StartLoc = S; 1804 Op->EndLoc = E; 1805 return Op; 1806 } 1807 1808 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1809 SMLoc S, SMLoc E) { 1810 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1811 Op->Bitfield.LSB = LSB; 1812 Op->Bitfield.Width = Width; 1813 Op->StartLoc = S; 1814 Op->EndLoc = E; 1815 return Op; 1816 } 1817 1818 static ARMOperand * 1819 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1820 SMLoc StartLoc, SMLoc EndLoc) { 1821 KindTy Kind = k_RegisterList; 1822 1823 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1824 Kind = k_DPRRegisterList; 1825 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1826 contains(Regs.front().first)) 1827 Kind = k_SPRRegisterList; 1828 1829 ARMOperand *Op = new ARMOperand(Kind); 1830 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1831 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1832 Op->Registers.push_back(I->first); 1833 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1834 Op->StartLoc = StartLoc; 1835 Op->EndLoc = EndLoc; 1836 return Op; 1837 } 1838 1839 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1840 SMLoc S, SMLoc E) { 1841 ARMOperand *Op = new ARMOperand(k_VectorList); 1842 Op->VectorList.RegNum = RegNum; 1843 Op->VectorList.Count = Count; 1844 Op->StartLoc = S; 1845 Op->EndLoc = E; 1846 return Op; 1847 } 1848 1849 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1850 MCContext &Ctx) { 1851 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1852 Op->VectorIndex.Val = Idx; 1853 Op->StartLoc = S; 1854 Op->EndLoc = E; 1855 return Op; 1856 } 1857 1858 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1859 ARMOperand *Op = new ARMOperand(k_Immediate); 1860 Op->Imm.Val = Val; 1861 Op->StartLoc = S; 1862 Op->EndLoc = E; 1863 return Op; 1864 } 1865 1866 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1867 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1868 Op->FPImm.Val = Val; 1869 Op->StartLoc = S; 1870 Op->EndLoc = S; 1871 return Op; 1872 } 1873 1874 static ARMOperand *CreateMem(unsigned BaseRegNum, 1875 const MCConstantExpr *OffsetImm, 1876 unsigned OffsetRegNum, 1877 ARM_AM::ShiftOpc ShiftType, 1878 unsigned ShiftImm, 1879 unsigned Alignment, 1880 bool isNegative, 1881 SMLoc S, SMLoc E) { 1882 ARMOperand *Op = new ARMOperand(k_Memory); 1883 Op->Memory.BaseRegNum = BaseRegNum; 1884 Op->Memory.OffsetImm = OffsetImm; 1885 Op->Memory.OffsetRegNum = OffsetRegNum; 1886 Op->Memory.ShiftType = ShiftType; 1887 Op->Memory.ShiftImm = ShiftImm; 1888 Op->Memory.Alignment = Alignment; 1889 Op->Memory.isNegative = isNegative; 1890 Op->StartLoc = S; 1891 Op->EndLoc = E; 1892 return Op; 1893 } 1894 1895 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1896 ARM_AM::ShiftOpc ShiftTy, 1897 unsigned ShiftImm, 1898 SMLoc S, SMLoc E) { 1899 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1900 Op->PostIdxReg.RegNum = RegNum; 1901 Op->PostIdxReg.isAdd = isAdd; 1902 Op->PostIdxReg.ShiftTy = ShiftTy; 1903 Op->PostIdxReg.ShiftImm = ShiftImm; 1904 Op->StartLoc = S; 1905 Op->EndLoc = E; 1906 return Op; 1907 } 1908 1909 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1910 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1911 Op->MBOpt.Val = Opt; 1912 Op->StartLoc = S; 1913 Op->EndLoc = S; 1914 return Op; 1915 } 1916 1917 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1918 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1919 Op->IFlags.Val = IFlags; 1920 Op->StartLoc = S; 1921 Op->EndLoc = S; 1922 return Op; 1923 } 1924 1925 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1926 ARMOperand *Op = new ARMOperand(k_MSRMask); 1927 Op->MMask.Val = MMask; 1928 Op->StartLoc = S; 1929 Op->EndLoc = S; 1930 return Op; 1931 } 1932}; 1933 1934} // end anonymous namespace. 1935 1936void ARMOperand::print(raw_ostream &OS) const { 1937 switch (Kind) { 1938 case k_FPImmediate: 1939 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1940 << ") >"; 1941 break; 1942 case k_CondCode: 1943 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1944 break; 1945 case k_CCOut: 1946 OS << "<ccout " << getReg() << ">"; 1947 break; 1948 case k_ITCondMask: { 1949 static const char *MaskStr[] = { 1950 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 1951 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 1952 }; 1953 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1954 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1955 break; 1956 } 1957 case k_CoprocNum: 1958 OS << "<coprocessor number: " << getCoproc() << ">"; 1959 break; 1960 case k_CoprocReg: 1961 OS << "<coprocessor register: " << getCoproc() << ">"; 1962 break; 1963 case k_CoprocOption: 1964 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1965 break; 1966 case k_MSRMask: 1967 OS << "<mask: " << getMSRMask() << ">"; 1968 break; 1969 case k_Immediate: 1970 getImm()->print(OS); 1971 break; 1972 case k_MemBarrierOpt: 1973 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1974 break; 1975 case k_Memory: 1976 OS << "<memory " 1977 << " base:" << Memory.BaseRegNum; 1978 OS << ">"; 1979 break; 1980 case k_PostIndexRegister: 1981 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1982 << PostIdxReg.RegNum; 1983 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1984 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1985 << PostIdxReg.ShiftImm; 1986 OS << ">"; 1987 break; 1988 case k_ProcIFlags: { 1989 OS << "<ARM_PROC::"; 1990 unsigned IFlags = getProcIFlags(); 1991 for (int i=2; i >= 0; --i) 1992 if (IFlags & (1 << i)) 1993 OS << ARM_PROC::IFlagsToString(1 << i); 1994 OS << ">"; 1995 break; 1996 } 1997 case k_Register: 1998 OS << "<register " << getReg() << ">"; 1999 break; 2000 case k_ShifterImmediate: 2001 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2002 << " #" << ShifterImm.Imm << ">"; 2003 break; 2004 case k_ShiftedRegister: 2005 OS << "<so_reg_reg " 2006 << RegShiftedReg.SrcReg 2007 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 2008 << ", " << RegShiftedReg.ShiftReg << ", " 2009 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 2010 << ">"; 2011 break; 2012 case k_ShiftedImmediate: 2013 OS << "<so_reg_imm " 2014 << RegShiftedImm.SrcReg 2015 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 2016 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 2017 << ">"; 2018 break; 2019 case k_RotateImmediate: 2020 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2021 break; 2022 case k_BitfieldDescriptor: 2023 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2024 << ", width: " << Bitfield.Width << ">"; 2025 break; 2026 case k_RegisterList: 2027 case k_DPRRegisterList: 2028 case k_SPRRegisterList: { 2029 OS << "<register_list "; 2030 2031 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2032 for (SmallVectorImpl<unsigned>::const_iterator 2033 I = RegList.begin(), E = RegList.end(); I != E; ) { 2034 OS << *I; 2035 if (++I < E) OS << ", "; 2036 } 2037 2038 OS << ">"; 2039 break; 2040 } 2041 case k_VectorList: 2042 OS << "<vector_list " << VectorList.Count << " * " 2043 << VectorList.RegNum << ">"; 2044 break; 2045 case k_Token: 2046 OS << "'" << getToken() << "'"; 2047 break; 2048 case k_VectorIndex: 2049 OS << "<vectorindex " << getVectorIndex() << ">"; 2050 break; 2051 } 2052} 2053 2054/// @name Auto-generated Match Functions 2055/// { 2056 2057static unsigned MatchRegisterName(StringRef Name); 2058 2059/// } 2060 2061bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2062 SMLoc &StartLoc, SMLoc &EndLoc) { 2063 RegNo = tryParseRegister(); 2064 2065 return (RegNo == (unsigned)-1); 2066} 2067 2068/// Try to parse a register name. The token must be an Identifier when called, 2069/// and if it is a register name the token is eaten and the register number is 2070/// returned. Otherwise return -1. 2071/// 2072int ARMAsmParser::tryParseRegister() { 2073 const AsmToken &Tok = Parser.getTok(); 2074 if (Tok.isNot(AsmToken::Identifier)) return -1; 2075 2076 // FIXME: Validate register for the current architecture; we have to do 2077 // validation later, so maybe there is no need for this here. 2078 std::string lowerCase = Tok.getString().lower(); 2079 unsigned RegNum = MatchRegisterName(lowerCase); 2080 if (!RegNum) { 2081 RegNum = StringSwitch<unsigned>(lowerCase) 2082 .Case("r13", ARM::SP) 2083 .Case("r14", ARM::LR) 2084 .Case("r15", ARM::PC) 2085 .Case("ip", ARM::R12) 2086 .Default(0); 2087 } 2088 if (!RegNum) return -1; 2089 2090 Parser.Lex(); // Eat identifier token. 2091 2092 return RegNum; 2093} 2094 2095// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2096// If a recoverable error occurs, return 1. If an irrecoverable error 2097// occurs, return -1. An irrecoverable error is one where tokens have been 2098// consumed in the process of trying to parse the shifter (i.e., when it is 2099// indeed a shifter operand, but malformed). 2100int ARMAsmParser::tryParseShiftRegister( 2101 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2102 SMLoc S = Parser.getTok().getLoc(); 2103 const AsmToken &Tok = Parser.getTok(); 2104 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2105 2106 std::string lowerCase = Tok.getString().lower(); 2107 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2108 .Case("lsl", ARM_AM::lsl) 2109 .Case("lsr", ARM_AM::lsr) 2110 .Case("asr", ARM_AM::asr) 2111 .Case("ror", ARM_AM::ror) 2112 .Case("rrx", ARM_AM::rrx) 2113 .Default(ARM_AM::no_shift); 2114 2115 if (ShiftTy == ARM_AM::no_shift) 2116 return 1; 2117 2118 Parser.Lex(); // Eat the operator. 2119 2120 // The source register for the shift has already been added to the 2121 // operand list, so we need to pop it off and combine it into the shifted 2122 // register operand instead. 2123 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2124 if (!PrevOp->isReg()) 2125 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2126 int SrcReg = PrevOp->getReg(); 2127 int64_t Imm = 0; 2128 int ShiftReg = 0; 2129 if (ShiftTy == ARM_AM::rrx) { 2130 // RRX Doesn't have an explicit shift amount. The encoder expects 2131 // the shift register to be the same as the source register. Seems odd, 2132 // but OK. 2133 ShiftReg = SrcReg; 2134 } else { 2135 // Figure out if this is shifted by a constant or a register (for non-RRX). 2136 if (Parser.getTok().is(AsmToken::Hash)) { 2137 Parser.Lex(); // Eat hash. 2138 SMLoc ImmLoc = Parser.getTok().getLoc(); 2139 const MCExpr *ShiftExpr = 0; 2140 if (getParser().ParseExpression(ShiftExpr)) { 2141 Error(ImmLoc, "invalid immediate shift value"); 2142 return -1; 2143 } 2144 // The expression must be evaluatable as an immediate. 2145 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2146 if (!CE) { 2147 Error(ImmLoc, "invalid immediate shift value"); 2148 return -1; 2149 } 2150 // Range check the immediate. 2151 // lsl, ror: 0 <= imm <= 31 2152 // lsr, asr: 0 <= imm <= 32 2153 Imm = CE->getValue(); 2154 if (Imm < 0 || 2155 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2156 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2157 Error(ImmLoc, "immediate shift value out of range"); 2158 return -1; 2159 } 2160 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2161 ShiftReg = tryParseRegister(); 2162 SMLoc L = Parser.getTok().getLoc(); 2163 if (ShiftReg == -1) { 2164 Error (L, "expected immediate or register in shift operand"); 2165 return -1; 2166 } 2167 } else { 2168 Error (Parser.getTok().getLoc(), 2169 "expected immediate or register in shift operand"); 2170 return -1; 2171 } 2172 } 2173 2174 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2175 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2176 ShiftReg, Imm, 2177 S, Parser.getTok().getLoc())); 2178 else 2179 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2180 S, Parser.getTok().getLoc())); 2181 2182 return 0; 2183} 2184 2185 2186/// Try to parse a register name. The token must be an Identifier when called. 2187/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2188/// if there is a "writeback". 'true' if it's not a register. 2189/// 2190/// TODO this is likely to change to allow different register types and or to 2191/// parse for a specific register type. 2192bool ARMAsmParser:: 2193tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2194 SMLoc S = Parser.getTok().getLoc(); 2195 int RegNo = tryParseRegister(); 2196 if (RegNo == -1) 2197 return true; 2198 2199 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2200 2201 const AsmToken &ExclaimTok = Parser.getTok(); 2202 if (ExclaimTok.is(AsmToken::Exclaim)) { 2203 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2204 ExclaimTok.getLoc())); 2205 Parser.Lex(); // Eat exclaim token 2206 return false; 2207 } 2208 2209 // Also check for an index operand. This is only legal for vector registers, 2210 // but that'll get caught OK in operand matching, so we don't need to 2211 // explicitly filter everything else out here. 2212 if (Parser.getTok().is(AsmToken::LBrac)) { 2213 SMLoc SIdx = Parser.getTok().getLoc(); 2214 Parser.Lex(); // Eat left bracket token. 2215 2216 const MCExpr *ImmVal; 2217 if (getParser().ParseExpression(ImmVal)) 2218 return MatchOperand_ParseFail; 2219 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2220 if (!MCE) { 2221 TokError("immediate value expected for vector index"); 2222 return MatchOperand_ParseFail; 2223 } 2224 2225 SMLoc E = Parser.getTok().getLoc(); 2226 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2227 Error(E, "']' expected"); 2228 return MatchOperand_ParseFail; 2229 } 2230 2231 Parser.Lex(); // Eat right bracket token. 2232 2233 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2234 SIdx, E, 2235 getContext())); 2236 } 2237 2238 return false; 2239} 2240 2241/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2242/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2243/// "c5", ... 2244static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2245 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2246 // but efficient. 2247 switch (Name.size()) { 2248 default: break; 2249 case 2: 2250 if (Name[0] != CoprocOp) 2251 return -1; 2252 switch (Name[1]) { 2253 default: return -1; 2254 case '0': return 0; 2255 case '1': return 1; 2256 case '2': return 2; 2257 case '3': return 3; 2258 case '4': return 4; 2259 case '5': return 5; 2260 case '6': return 6; 2261 case '7': return 7; 2262 case '8': return 8; 2263 case '9': return 9; 2264 } 2265 break; 2266 case 3: 2267 if (Name[0] != CoprocOp || Name[1] != '1') 2268 return -1; 2269 switch (Name[2]) { 2270 default: return -1; 2271 case '0': return 10; 2272 case '1': return 11; 2273 case '2': return 12; 2274 case '3': return 13; 2275 case '4': return 14; 2276 case '5': return 15; 2277 } 2278 break; 2279 } 2280 2281 return -1; 2282} 2283 2284/// parseITCondCode - Try to parse a condition code for an IT instruction. 2285ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2286parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2287 SMLoc S = Parser.getTok().getLoc(); 2288 const AsmToken &Tok = Parser.getTok(); 2289 if (!Tok.is(AsmToken::Identifier)) 2290 return MatchOperand_NoMatch; 2291 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2292 .Case("eq", ARMCC::EQ) 2293 .Case("ne", ARMCC::NE) 2294 .Case("hs", ARMCC::HS) 2295 .Case("cs", ARMCC::HS) 2296 .Case("lo", ARMCC::LO) 2297 .Case("cc", ARMCC::LO) 2298 .Case("mi", ARMCC::MI) 2299 .Case("pl", ARMCC::PL) 2300 .Case("vs", ARMCC::VS) 2301 .Case("vc", ARMCC::VC) 2302 .Case("hi", ARMCC::HI) 2303 .Case("ls", ARMCC::LS) 2304 .Case("ge", ARMCC::GE) 2305 .Case("lt", ARMCC::LT) 2306 .Case("gt", ARMCC::GT) 2307 .Case("le", ARMCC::LE) 2308 .Case("al", ARMCC::AL) 2309 .Default(~0U); 2310 if (CC == ~0U) 2311 return MatchOperand_NoMatch; 2312 Parser.Lex(); // Eat the token. 2313 2314 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2315 2316 return MatchOperand_Success; 2317} 2318 2319/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2320/// token must be an Identifier when called, and if it is a coprocessor 2321/// number, the token is eaten and the operand is added to the operand list. 2322ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2323parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2324 SMLoc S = Parser.getTok().getLoc(); 2325 const AsmToken &Tok = Parser.getTok(); 2326 if (Tok.isNot(AsmToken::Identifier)) 2327 return MatchOperand_NoMatch; 2328 2329 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2330 if (Num == -1) 2331 return MatchOperand_NoMatch; 2332 2333 Parser.Lex(); // Eat identifier token. 2334 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2335 return MatchOperand_Success; 2336} 2337 2338/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2339/// token must be an Identifier when called, and if it is a coprocessor 2340/// number, the token is eaten and the operand is added to the operand list. 2341ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2342parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2343 SMLoc S = Parser.getTok().getLoc(); 2344 const AsmToken &Tok = Parser.getTok(); 2345 if (Tok.isNot(AsmToken::Identifier)) 2346 return MatchOperand_NoMatch; 2347 2348 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2349 if (Reg == -1) 2350 return MatchOperand_NoMatch; 2351 2352 Parser.Lex(); // Eat identifier token. 2353 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2354 return MatchOperand_Success; 2355} 2356 2357/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2358/// coproc_option : '{' imm0_255 '}' 2359ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2360parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2361 SMLoc S = Parser.getTok().getLoc(); 2362 2363 // If this isn't a '{', this isn't a coprocessor immediate operand. 2364 if (Parser.getTok().isNot(AsmToken::LCurly)) 2365 return MatchOperand_NoMatch; 2366 Parser.Lex(); // Eat the '{' 2367 2368 const MCExpr *Expr; 2369 SMLoc Loc = Parser.getTok().getLoc(); 2370 if (getParser().ParseExpression(Expr)) { 2371 Error(Loc, "illegal expression"); 2372 return MatchOperand_ParseFail; 2373 } 2374 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2375 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2376 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2377 return MatchOperand_ParseFail; 2378 } 2379 int Val = CE->getValue(); 2380 2381 // Check for and consume the closing '}' 2382 if (Parser.getTok().isNot(AsmToken::RCurly)) 2383 return MatchOperand_ParseFail; 2384 SMLoc E = Parser.getTok().getLoc(); 2385 Parser.Lex(); // Eat the '}' 2386 2387 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2388 return MatchOperand_Success; 2389} 2390 2391// For register list parsing, we need to map from raw GPR register numbering 2392// to the enumeration values. The enumeration values aren't sorted by 2393// register number due to our using "sp", "lr" and "pc" as canonical names. 2394static unsigned getNextRegister(unsigned Reg) { 2395 // If this is a GPR, we need to do it manually, otherwise we can rely 2396 // on the sort ordering of the enumeration since the other reg-classes 2397 // are sane. 2398 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2399 return Reg + 1; 2400 switch(Reg) { 2401 default: assert(0 && "Invalid GPR number!"); 2402 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2403 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2404 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2405 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2406 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2407 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2408 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2409 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2410 } 2411} 2412 2413// Return the low-subreg of a given Q register. 2414static unsigned getDRegFromQReg(unsigned QReg) { 2415 switch (QReg) { 2416 default: llvm_unreachable("expected a Q register!"); 2417 case ARM::Q0: return ARM::D0; 2418 case ARM::Q1: return ARM::D2; 2419 case ARM::Q2: return ARM::D4; 2420 case ARM::Q3: return ARM::D6; 2421 case ARM::Q4: return ARM::D8; 2422 case ARM::Q5: return ARM::D10; 2423 case ARM::Q6: return ARM::D12; 2424 case ARM::Q7: return ARM::D14; 2425 case ARM::Q8: return ARM::D16; 2426 case ARM::Q9: return ARM::D19; 2427 case ARM::Q10: return ARM::D20; 2428 case ARM::Q11: return ARM::D22; 2429 case ARM::Q12: return ARM::D24; 2430 case ARM::Q13: return ARM::D26; 2431 case ARM::Q14: return ARM::D28; 2432 case ARM::Q15: return ARM::D30; 2433 } 2434} 2435 2436/// Parse a register list. 2437bool ARMAsmParser:: 2438parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2439 assert(Parser.getTok().is(AsmToken::LCurly) && 2440 "Token is not a Left Curly Brace"); 2441 SMLoc S = Parser.getTok().getLoc(); 2442 Parser.Lex(); // Eat '{' token. 2443 SMLoc RegLoc = Parser.getTok().getLoc(); 2444 2445 // Check the first register in the list to see what register class 2446 // this is a list of. 2447 int Reg = tryParseRegister(); 2448 if (Reg == -1) 2449 return Error(RegLoc, "register expected"); 2450 2451 // The reglist instructions have at most 16 registers, so reserve 2452 // space for that many. 2453 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2454 2455 // Allow Q regs and just interpret them as the two D sub-registers. 2456 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2457 Reg = getDRegFromQReg(Reg); 2458 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2459 ++Reg; 2460 } 2461 const MCRegisterClass *RC; 2462 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2463 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2464 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2465 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2466 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2467 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2468 else 2469 return Error(RegLoc, "invalid register in register list"); 2470 2471 // Store the register. 2472 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2473 2474 // This starts immediately after the first register token in the list, 2475 // so we can see either a comma or a minus (range separator) as a legal 2476 // next token. 2477 while (Parser.getTok().is(AsmToken::Comma) || 2478 Parser.getTok().is(AsmToken::Minus)) { 2479 if (Parser.getTok().is(AsmToken::Minus)) { 2480 Parser.Lex(); // Eat the comma. 2481 SMLoc EndLoc = Parser.getTok().getLoc(); 2482 int EndReg = tryParseRegister(); 2483 if (EndReg == -1) 2484 return Error(EndLoc, "register expected"); 2485 // Allow Q regs and just interpret them as the two D sub-registers. 2486 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2487 EndReg = getDRegFromQReg(EndReg) + 1; 2488 // If the register is the same as the start reg, there's nothing 2489 // more to do. 2490 if (Reg == EndReg) 2491 continue; 2492 // The register must be in the same register class as the first. 2493 if (!RC->contains(EndReg)) 2494 return Error(EndLoc, "invalid register in register list"); 2495 // Ranges must go from low to high. 2496 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2497 return Error(EndLoc, "bad range in register list"); 2498 2499 // Add all the registers in the range to the register list. 2500 while (Reg != EndReg) { 2501 Reg = getNextRegister(Reg); 2502 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2503 } 2504 continue; 2505 } 2506 Parser.Lex(); // Eat the comma. 2507 RegLoc = Parser.getTok().getLoc(); 2508 int OldReg = Reg; 2509 Reg = tryParseRegister(); 2510 if (Reg == -1) 2511 return Error(RegLoc, "register expected"); 2512 // Allow Q regs and just interpret them as the two D sub-registers. 2513 bool isQReg = false; 2514 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2515 Reg = getDRegFromQReg(Reg); 2516 isQReg = true; 2517 } 2518 // The register must be in the same register class as the first. 2519 if (!RC->contains(Reg)) 2520 return Error(RegLoc, "invalid register in register list"); 2521 // List must be monotonically increasing. 2522 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2523 return Error(RegLoc, "register list not in ascending order"); 2524 // VFP register lists must also be contiguous. 2525 // It's OK to use the enumeration values directly here rather, as the 2526 // VFP register classes have the enum sorted properly. 2527 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2528 Reg != OldReg + 1) 2529 return Error(RegLoc, "non-contiguous register range"); 2530 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2531 if (isQReg) 2532 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2533 } 2534 2535 SMLoc E = Parser.getTok().getLoc(); 2536 if (Parser.getTok().isNot(AsmToken::RCurly)) 2537 return Error(E, "'}' expected"); 2538 Parser.Lex(); // Eat '}' token. 2539 2540 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2541 return false; 2542} 2543 2544// parse a vector register list 2545ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2546parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2547 if(Parser.getTok().isNot(AsmToken::LCurly)) 2548 return MatchOperand_NoMatch; 2549 2550 SMLoc S = Parser.getTok().getLoc(); 2551 Parser.Lex(); // Eat '{' token. 2552 SMLoc RegLoc = Parser.getTok().getLoc(); 2553 2554 int Reg = tryParseRegister(); 2555 if (Reg == -1) { 2556 Error(RegLoc, "register expected"); 2557 return MatchOperand_ParseFail; 2558 } 2559 unsigned Count = 1; 2560 unsigned FirstReg = Reg; 2561 // The list is of D registers, but we also allow Q regs and just interpret 2562 // them as the two D sub-registers. 2563 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2564 FirstReg = Reg = getDRegFromQReg(Reg); 2565 ++Reg; 2566 ++Count; 2567 } 2568 2569 while (Parser.getTok().is(AsmToken::Comma)) { 2570 Parser.Lex(); // Eat the comma. 2571 RegLoc = Parser.getTok().getLoc(); 2572 int OldReg = Reg; 2573 Reg = tryParseRegister(); 2574 if (Reg == -1) { 2575 Error(RegLoc, "register expected"); 2576 return MatchOperand_ParseFail; 2577 } 2578 // vector register lists must be contiguous. 2579 // It's OK to use the enumeration values directly here rather, as the 2580 // VFP register classes have the enum sorted properly. 2581 // 2582 // The list is of D registers, but we also allow Q regs and just interpret 2583 // them as the two D sub-registers. 2584 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2585 Reg = getDRegFromQReg(Reg); 2586 if (Reg != OldReg + 1) { 2587 Error(RegLoc, "non-contiguous register range"); 2588 return MatchOperand_ParseFail; 2589 } 2590 ++Reg; 2591 Count += 2; 2592 continue; 2593 } 2594 // Normal D register. Just check that it's contiguous and keep going. 2595 if (Reg != OldReg + 1) { 2596 Error(RegLoc, "non-contiguous register range"); 2597 return MatchOperand_ParseFail; 2598 } 2599 ++Count; 2600 } 2601 2602 SMLoc E = Parser.getTok().getLoc(); 2603 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2604 Error(E, "'}' expected"); 2605 return MatchOperand_ParseFail; 2606 } 2607 Parser.Lex(); // Eat '}' token. 2608 2609 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2610 return MatchOperand_Success; 2611} 2612 2613/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2614ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2615parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2616 SMLoc S = Parser.getTok().getLoc(); 2617 const AsmToken &Tok = Parser.getTok(); 2618 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2619 StringRef OptStr = Tok.getString(); 2620 2621 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2622 .Case("sy", ARM_MB::SY) 2623 .Case("st", ARM_MB::ST) 2624 .Case("sh", ARM_MB::ISH) 2625 .Case("ish", ARM_MB::ISH) 2626 .Case("shst", ARM_MB::ISHST) 2627 .Case("ishst", ARM_MB::ISHST) 2628 .Case("nsh", ARM_MB::NSH) 2629 .Case("un", ARM_MB::NSH) 2630 .Case("nshst", ARM_MB::NSHST) 2631 .Case("unst", ARM_MB::NSHST) 2632 .Case("osh", ARM_MB::OSH) 2633 .Case("oshst", ARM_MB::OSHST) 2634 .Default(~0U); 2635 2636 if (Opt == ~0U) 2637 return MatchOperand_NoMatch; 2638 2639 Parser.Lex(); // Eat identifier token. 2640 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2641 return MatchOperand_Success; 2642} 2643 2644/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2645ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2646parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2647 SMLoc S = Parser.getTok().getLoc(); 2648 const AsmToken &Tok = Parser.getTok(); 2649 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2650 StringRef IFlagsStr = Tok.getString(); 2651 2652 // An iflags string of "none" is interpreted to mean that none of the AIF 2653 // bits are set. Not a terribly useful instruction, but a valid encoding. 2654 unsigned IFlags = 0; 2655 if (IFlagsStr != "none") { 2656 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2657 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2658 .Case("a", ARM_PROC::A) 2659 .Case("i", ARM_PROC::I) 2660 .Case("f", ARM_PROC::F) 2661 .Default(~0U); 2662 2663 // If some specific iflag is already set, it means that some letter is 2664 // present more than once, this is not acceptable. 2665 if (Flag == ~0U || (IFlags & Flag)) 2666 return MatchOperand_NoMatch; 2667 2668 IFlags |= Flag; 2669 } 2670 } 2671 2672 Parser.Lex(); // Eat identifier token. 2673 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2674 return MatchOperand_Success; 2675} 2676 2677/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2678ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2679parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2680 SMLoc S = Parser.getTok().getLoc(); 2681 const AsmToken &Tok = Parser.getTok(); 2682 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2683 StringRef Mask = Tok.getString(); 2684 2685 if (isMClass()) { 2686 // See ARMv6-M 10.1.1 2687 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2688 .Case("apsr", 0) 2689 .Case("iapsr", 1) 2690 .Case("eapsr", 2) 2691 .Case("xpsr", 3) 2692 .Case("ipsr", 5) 2693 .Case("epsr", 6) 2694 .Case("iepsr", 7) 2695 .Case("msp", 8) 2696 .Case("psp", 9) 2697 .Case("primask", 16) 2698 .Case("basepri", 17) 2699 .Case("basepri_max", 18) 2700 .Case("faultmask", 19) 2701 .Case("control", 20) 2702 .Default(~0U); 2703 2704 if (FlagsVal == ~0U) 2705 return MatchOperand_NoMatch; 2706 2707 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2708 // basepri, basepri_max and faultmask only valid for V7m. 2709 return MatchOperand_NoMatch; 2710 2711 Parser.Lex(); // Eat identifier token. 2712 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2713 return MatchOperand_Success; 2714 } 2715 2716 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2717 size_t Start = 0, Next = Mask.find('_'); 2718 StringRef Flags = ""; 2719 std::string SpecReg = Mask.slice(Start, Next).lower(); 2720 if (Next != StringRef::npos) 2721 Flags = Mask.slice(Next+1, Mask.size()); 2722 2723 // FlagsVal contains the complete mask: 2724 // 3-0: Mask 2725 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2726 unsigned FlagsVal = 0; 2727 2728 if (SpecReg == "apsr") { 2729 FlagsVal = StringSwitch<unsigned>(Flags) 2730 .Case("nzcvq", 0x8) // same as CPSR_f 2731 .Case("g", 0x4) // same as CPSR_s 2732 .Case("nzcvqg", 0xc) // same as CPSR_fs 2733 .Default(~0U); 2734 2735 if (FlagsVal == ~0U) { 2736 if (!Flags.empty()) 2737 return MatchOperand_NoMatch; 2738 else 2739 FlagsVal = 8; // No flag 2740 } 2741 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2742 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2743 Flags = "fc"; 2744 for (int i = 0, e = Flags.size(); i != e; ++i) { 2745 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2746 .Case("c", 1) 2747 .Case("x", 2) 2748 .Case("s", 4) 2749 .Case("f", 8) 2750 .Default(~0U); 2751 2752 // If some specific flag is already set, it means that some letter is 2753 // present more than once, this is not acceptable. 2754 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2755 return MatchOperand_NoMatch; 2756 FlagsVal |= Flag; 2757 } 2758 } else // No match for special register. 2759 return MatchOperand_NoMatch; 2760 2761 // Special register without flags is NOT equivalent to "fc" flags. 2762 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2763 // two lines would enable gas compatibility at the expense of breaking 2764 // round-tripping. 2765 // 2766 // if (!FlagsVal) 2767 // FlagsVal = 0x9; 2768 2769 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2770 if (SpecReg == "spsr") 2771 FlagsVal |= 16; 2772 2773 Parser.Lex(); // Eat identifier token. 2774 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2775 return MatchOperand_Success; 2776} 2777 2778ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2779parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2780 int Low, int High) { 2781 const AsmToken &Tok = Parser.getTok(); 2782 if (Tok.isNot(AsmToken::Identifier)) { 2783 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2784 return MatchOperand_ParseFail; 2785 } 2786 StringRef ShiftName = Tok.getString(); 2787 std::string LowerOp = Op.lower(); 2788 std::string UpperOp = Op.upper(); 2789 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2790 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2791 return MatchOperand_ParseFail; 2792 } 2793 Parser.Lex(); // Eat shift type token. 2794 2795 // There must be a '#' and a shift amount. 2796 if (Parser.getTok().isNot(AsmToken::Hash)) { 2797 Error(Parser.getTok().getLoc(), "'#' expected"); 2798 return MatchOperand_ParseFail; 2799 } 2800 Parser.Lex(); // Eat hash token. 2801 2802 const MCExpr *ShiftAmount; 2803 SMLoc Loc = Parser.getTok().getLoc(); 2804 if (getParser().ParseExpression(ShiftAmount)) { 2805 Error(Loc, "illegal expression"); 2806 return MatchOperand_ParseFail; 2807 } 2808 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2809 if (!CE) { 2810 Error(Loc, "constant expression expected"); 2811 return MatchOperand_ParseFail; 2812 } 2813 int Val = CE->getValue(); 2814 if (Val < Low || Val > High) { 2815 Error(Loc, "immediate value out of range"); 2816 return MatchOperand_ParseFail; 2817 } 2818 2819 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2820 2821 return MatchOperand_Success; 2822} 2823 2824ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2825parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2826 const AsmToken &Tok = Parser.getTok(); 2827 SMLoc S = Tok.getLoc(); 2828 if (Tok.isNot(AsmToken::Identifier)) { 2829 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2830 return MatchOperand_ParseFail; 2831 } 2832 int Val = StringSwitch<int>(Tok.getString()) 2833 .Case("be", 1) 2834 .Case("le", 0) 2835 .Default(-1); 2836 Parser.Lex(); // Eat the token. 2837 2838 if (Val == -1) { 2839 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2840 return MatchOperand_ParseFail; 2841 } 2842 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2843 getContext()), 2844 S, Parser.getTok().getLoc())); 2845 return MatchOperand_Success; 2846} 2847 2848/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2849/// instructions. Legal values are: 2850/// lsl #n 'n' in [0,31] 2851/// asr #n 'n' in [1,32] 2852/// n == 32 encoded as n == 0. 2853ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2854parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2855 const AsmToken &Tok = Parser.getTok(); 2856 SMLoc S = Tok.getLoc(); 2857 if (Tok.isNot(AsmToken::Identifier)) { 2858 Error(S, "shift operator 'asr' or 'lsl' expected"); 2859 return MatchOperand_ParseFail; 2860 } 2861 StringRef ShiftName = Tok.getString(); 2862 bool isASR; 2863 if (ShiftName == "lsl" || ShiftName == "LSL") 2864 isASR = false; 2865 else if (ShiftName == "asr" || ShiftName == "ASR") 2866 isASR = true; 2867 else { 2868 Error(S, "shift operator 'asr' or 'lsl' expected"); 2869 return MatchOperand_ParseFail; 2870 } 2871 Parser.Lex(); // Eat the operator. 2872 2873 // A '#' and a shift amount. 2874 if (Parser.getTok().isNot(AsmToken::Hash)) { 2875 Error(Parser.getTok().getLoc(), "'#' expected"); 2876 return MatchOperand_ParseFail; 2877 } 2878 Parser.Lex(); // Eat hash token. 2879 2880 const MCExpr *ShiftAmount; 2881 SMLoc E = Parser.getTok().getLoc(); 2882 if (getParser().ParseExpression(ShiftAmount)) { 2883 Error(E, "malformed shift expression"); 2884 return MatchOperand_ParseFail; 2885 } 2886 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2887 if (!CE) { 2888 Error(E, "shift amount must be an immediate"); 2889 return MatchOperand_ParseFail; 2890 } 2891 2892 int64_t Val = CE->getValue(); 2893 if (isASR) { 2894 // Shift amount must be in [1,32] 2895 if (Val < 1 || Val > 32) { 2896 Error(E, "'asr' shift amount must be in range [1,32]"); 2897 return MatchOperand_ParseFail; 2898 } 2899 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2900 if (isThumb() && Val == 32) { 2901 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2902 return MatchOperand_ParseFail; 2903 } 2904 if (Val == 32) Val = 0; 2905 } else { 2906 // Shift amount must be in [1,32] 2907 if (Val < 0 || Val > 31) { 2908 Error(E, "'lsr' shift amount must be in range [0,31]"); 2909 return MatchOperand_ParseFail; 2910 } 2911 } 2912 2913 E = Parser.getTok().getLoc(); 2914 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2915 2916 return MatchOperand_Success; 2917} 2918 2919/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2920/// of instructions. Legal values are: 2921/// ror #n 'n' in {0, 8, 16, 24} 2922ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2923parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2924 const AsmToken &Tok = Parser.getTok(); 2925 SMLoc S = Tok.getLoc(); 2926 if (Tok.isNot(AsmToken::Identifier)) 2927 return MatchOperand_NoMatch; 2928 StringRef ShiftName = Tok.getString(); 2929 if (ShiftName != "ror" && ShiftName != "ROR") 2930 return MatchOperand_NoMatch; 2931 Parser.Lex(); // Eat the operator. 2932 2933 // A '#' and a rotate amount. 2934 if (Parser.getTok().isNot(AsmToken::Hash)) { 2935 Error(Parser.getTok().getLoc(), "'#' expected"); 2936 return MatchOperand_ParseFail; 2937 } 2938 Parser.Lex(); // Eat hash token. 2939 2940 const MCExpr *ShiftAmount; 2941 SMLoc E = Parser.getTok().getLoc(); 2942 if (getParser().ParseExpression(ShiftAmount)) { 2943 Error(E, "malformed rotate expression"); 2944 return MatchOperand_ParseFail; 2945 } 2946 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2947 if (!CE) { 2948 Error(E, "rotate amount must be an immediate"); 2949 return MatchOperand_ParseFail; 2950 } 2951 2952 int64_t Val = CE->getValue(); 2953 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2954 // normally, zero is represented in asm by omitting the rotate operand 2955 // entirely. 2956 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2957 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2958 return MatchOperand_ParseFail; 2959 } 2960 2961 E = Parser.getTok().getLoc(); 2962 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2963 2964 return MatchOperand_Success; 2965} 2966 2967ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2968parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2969 SMLoc S = Parser.getTok().getLoc(); 2970 // The bitfield descriptor is really two operands, the LSB and the width. 2971 if (Parser.getTok().isNot(AsmToken::Hash)) { 2972 Error(Parser.getTok().getLoc(), "'#' expected"); 2973 return MatchOperand_ParseFail; 2974 } 2975 Parser.Lex(); // Eat hash token. 2976 2977 const MCExpr *LSBExpr; 2978 SMLoc E = Parser.getTok().getLoc(); 2979 if (getParser().ParseExpression(LSBExpr)) { 2980 Error(E, "malformed immediate expression"); 2981 return MatchOperand_ParseFail; 2982 } 2983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2984 if (!CE) { 2985 Error(E, "'lsb' operand must be an immediate"); 2986 return MatchOperand_ParseFail; 2987 } 2988 2989 int64_t LSB = CE->getValue(); 2990 // The LSB must be in the range [0,31] 2991 if (LSB < 0 || LSB > 31) { 2992 Error(E, "'lsb' operand must be in the range [0,31]"); 2993 return MatchOperand_ParseFail; 2994 } 2995 E = Parser.getTok().getLoc(); 2996 2997 // Expect another immediate operand. 2998 if (Parser.getTok().isNot(AsmToken::Comma)) { 2999 Error(Parser.getTok().getLoc(), "too few operands"); 3000 return MatchOperand_ParseFail; 3001 } 3002 Parser.Lex(); // Eat hash token. 3003 if (Parser.getTok().isNot(AsmToken::Hash)) { 3004 Error(Parser.getTok().getLoc(), "'#' expected"); 3005 return MatchOperand_ParseFail; 3006 } 3007 Parser.Lex(); // Eat hash token. 3008 3009 const MCExpr *WidthExpr; 3010 if (getParser().ParseExpression(WidthExpr)) { 3011 Error(E, "malformed immediate expression"); 3012 return MatchOperand_ParseFail; 3013 } 3014 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3015 if (!CE) { 3016 Error(E, "'width' operand must be an immediate"); 3017 return MatchOperand_ParseFail; 3018 } 3019 3020 int64_t Width = CE->getValue(); 3021 // The LSB must be in the range [1,32-lsb] 3022 if (Width < 1 || Width > 32 - LSB) { 3023 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3024 return MatchOperand_ParseFail; 3025 } 3026 E = Parser.getTok().getLoc(); 3027 3028 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3029 3030 return MatchOperand_Success; 3031} 3032 3033ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3034parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3035 // Check for a post-index addressing register operand. Specifically: 3036 // postidx_reg := '+' register {, shift} 3037 // | '-' register {, shift} 3038 // | register {, shift} 3039 3040 // This method must return MatchOperand_NoMatch without consuming any tokens 3041 // in the case where there is no match, as other alternatives take other 3042 // parse methods. 3043 AsmToken Tok = Parser.getTok(); 3044 SMLoc S = Tok.getLoc(); 3045 bool haveEaten = false; 3046 bool isAdd = true; 3047 int Reg = -1; 3048 if (Tok.is(AsmToken::Plus)) { 3049 Parser.Lex(); // Eat the '+' token. 3050 haveEaten = true; 3051 } else if (Tok.is(AsmToken::Minus)) { 3052 Parser.Lex(); // Eat the '-' token. 3053 isAdd = false; 3054 haveEaten = true; 3055 } 3056 if (Parser.getTok().is(AsmToken::Identifier)) 3057 Reg = tryParseRegister(); 3058 if (Reg == -1) { 3059 if (!haveEaten) 3060 return MatchOperand_NoMatch; 3061 Error(Parser.getTok().getLoc(), "register expected"); 3062 return MatchOperand_ParseFail; 3063 } 3064 SMLoc E = Parser.getTok().getLoc(); 3065 3066 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3067 unsigned ShiftImm = 0; 3068 if (Parser.getTok().is(AsmToken::Comma)) { 3069 Parser.Lex(); // Eat the ','. 3070 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3071 return MatchOperand_ParseFail; 3072 } 3073 3074 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3075 ShiftImm, S, E)); 3076 3077 return MatchOperand_Success; 3078} 3079 3080ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3081parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3082 // Check for a post-index addressing register operand. Specifically: 3083 // am3offset := '+' register 3084 // | '-' register 3085 // | register 3086 // | # imm 3087 // | # + imm 3088 // | # - imm 3089 3090 // This method must return MatchOperand_NoMatch without consuming any tokens 3091 // in the case where there is no match, as other alternatives take other 3092 // parse methods. 3093 AsmToken Tok = Parser.getTok(); 3094 SMLoc S = Tok.getLoc(); 3095 3096 // Do immediates first, as we always parse those if we have a '#'. 3097 if (Parser.getTok().is(AsmToken::Hash)) { 3098 Parser.Lex(); // Eat the '#'. 3099 // Explicitly look for a '-', as we need to encode negative zero 3100 // differently. 3101 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3102 const MCExpr *Offset; 3103 if (getParser().ParseExpression(Offset)) 3104 return MatchOperand_ParseFail; 3105 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3106 if (!CE) { 3107 Error(S, "constant expression expected"); 3108 return MatchOperand_ParseFail; 3109 } 3110 SMLoc E = Tok.getLoc(); 3111 // Negative zero is encoded as the flag value INT32_MIN. 3112 int32_t Val = CE->getValue(); 3113 if (isNegative && Val == 0) 3114 Val = INT32_MIN; 3115 3116 Operands.push_back( 3117 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3118 3119 return MatchOperand_Success; 3120 } 3121 3122 3123 bool haveEaten = false; 3124 bool isAdd = true; 3125 int Reg = -1; 3126 if (Tok.is(AsmToken::Plus)) { 3127 Parser.Lex(); // Eat the '+' token. 3128 haveEaten = true; 3129 } else if (Tok.is(AsmToken::Minus)) { 3130 Parser.Lex(); // Eat the '-' token. 3131 isAdd = false; 3132 haveEaten = true; 3133 } 3134 if (Parser.getTok().is(AsmToken::Identifier)) 3135 Reg = tryParseRegister(); 3136 if (Reg == -1) { 3137 if (!haveEaten) 3138 return MatchOperand_NoMatch; 3139 Error(Parser.getTok().getLoc(), "register expected"); 3140 return MatchOperand_ParseFail; 3141 } 3142 SMLoc E = Parser.getTok().getLoc(); 3143 3144 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3145 0, S, E)); 3146 3147 return MatchOperand_Success; 3148} 3149 3150/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3151/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3152/// when they refer multiple MIOperands inside a single one. 3153bool ARMAsmParser:: 3154cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3155 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3156 // Rt, Rt2 3157 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3158 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3159 // Create a writeback register dummy placeholder. 3160 Inst.addOperand(MCOperand::CreateReg(0)); 3161 // addr 3162 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3163 // pred 3164 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3165 return true; 3166} 3167 3168/// cvtT2StrdPre - Convert parsed operands to MCInst. 3169/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3170/// when they refer multiple MIOperands inside a single one. 3171bool ARMAsmParser:: 3172cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3173 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3174 // Create a writeback register dummy placeholder. 3175 Inst.addOperand(MCOperand::CreateReg(0)); 3176 // Rt, Rt2 3177 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3178 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3179 // addr 3180 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3181 // pred 3182 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3183 return true; 3184} 3185 3186/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3187/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3188/// when they refer multiple MIOperands inside a single one. 3189bool ARMAsmParser:: 3190cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3191 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3192 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3193 3194 // Create a writeback register dummy placeholder. 3195 Inst.addOperand(MCOperand::CreateImm(0)); 3196 3197 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3198 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3199 return true; 3200} 3201 3202/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3203/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3204/// when they refer multiple MIOperands inside a single one. 3205bool ARMAsmParser:: 3206cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3207 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3208 // Create a writeback register dummy placeholder. 3209 Inst.addOperand(MCOperand::CreateImm(0)); 3210 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3211 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3212 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3213 return true; 3214} 3215 3216/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3217/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3218/// when they refer multiple MIOperands inside a single one. 3219bool ARMAsmParser:: 3220cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3221 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3222 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3223 3224 // Create a writeback register dummy placeholder. 3225 Inst.addOperand(MCOperand::CreateImm(0)); 3226 3227 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3228 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3229 return true; 3230} 3231 3232/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3233/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3234/// when they refer multiple MIOperands inside a single one. 3235bool ARMAsmParser:: 3236cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3237 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3238 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3239 3240 // Create a writeback register dummy placeholder. 3241 Inst.addOperand(MCOperand::CreateImm(0)); 3242 3243 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3244 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3245 return true; 3246} 3247 3248 3249/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3250/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3251/// when they refer multiple MIOperands inside a single one. 3252bool ARMAsmParser:: 3253cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3254 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3255 // Create a writeback register dummy placeholder. 3256 Inst.addOperand(MCOperand::CreateImm(0)); 3257 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3258 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3259 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3260 return true; 3261} 3262 3263/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3264/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3265/// when they refer multiple MIOperands inside a single one. 3266bool ARMAsmParser:: 3267cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3268 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3269 // Create a writeback register dummy placeholder. 3270 Inst.addOperand(MCOperand::CreateImm(0)); 3271 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3272 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3273 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3274 return true; 3275} 3276 3277/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3278/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3279/// when they refer multiple MIOperands inside a single one. 3280bool ARMAsmParser:: 3281cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3282 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3283 // Create a writeback register dummy placeholder. 3284 Inst.addOperand(MCOperand::CreateImm(0)); 3285 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3286 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3287 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3288 return true; 3289} 3290 3291/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3292/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3293/// when they refer multiple MIOperands inside a single one. 3294bool ARMAsmParser:: 3295cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3296 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3297 // Rt 3298 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3299 // Create a writeback register dummy placeholder. 3300 Inst.addOperand(MCOperand::CreateImm(0)); 3301 // addr 3302 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3303 // offset 3304 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3305 // pred 3306 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3307 return true; 3308} 3309 3310/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3311/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3312/// when they refer multiple MIOperands inside a single one. 3313bool ARMAsmParser:: 3314cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3315 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3316 // Rt 3317 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3318 // Create a writeback register dummy placeholder. 3319 Inst.addOperand(MCOperand::CreateImm(0)); 3320 // addr 3321 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3322 // offset 3323 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3324 // pred 3325 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3326 return true; 3327} 3328 3329/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3330/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3331/// when they refer multiple MIOperands inside a single one. 3332bool ARMAsmParser:: 3333cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3334 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3335 // Create a writeback register dummy placeholder. 3336 Inst.addOperand(MCOperand::CreateImm(0)); 3337 // Rt 3338 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3339 // addr 3340 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3341 // offset 3342 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3343 // pred 3344 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3345 return true; 3346} 3347 3348/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3349/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3350/// when they refer multiple MIOperands inside a single one. 3351bool ARMAsmParser:: 3352cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3353 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3354 // Create a writeback register dummy placeholder. 3355 Inst.addOperand(MCOperand::CreateImm(0)); 3356 // Rt 3357 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3358 // addr 3359 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3360 // offset 3361 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3362 // pred 3363 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3364 return true; 3365} 3366 3367/// cvtLdrdPre - Convert parsed operands to MCInst. 3368/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3369/// when they refer multiple MIOperands inside a single one. 3370bool ARMAsmParser:: 3371cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3372 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3373 // Rt, Rt2 3374 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3375 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3376 // Create a writeback register dummy placeholder. 3377 Inst.addOperand(MCOperand::CreateImm(0)); 3378 // addr 3379 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3380 // pred 3381 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3382 return true; 3383} 3384 3385/// cvtStrdPre - Convert parsed operands to MCInst. 3386/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3387/// when they refer multiple MIOperands inside a single one. 3388bool ARMAsmParser:: 3389cvtStrdPre(MCInst &Inst, unsigned Opcode, 3390 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3391 // Create a writeback register dummy placeholder. 3392 Inst.addOperand(MCOperand::CreateImm(0)); 3393 // Rt, Rt2 3394 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3395 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3396 // addr 3397 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3398 // pred 3399 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3400 return true; 3401} 3402 3403/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3404/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3405/// when they refer multiple MIOperands inside a single one. 3406bool ARMAsmParser:: 3407cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3408 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3409 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3410 // Create a writeback register dummy placeholder. 3411 Inst.addOperand(MCOperand::CreateImm(0)); 3412 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3413 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3414 return true; 3415} 3416 3417/// cvtThumbMultiple- Convert parsed operands to MCInst. 3418/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3419/// when they refer multiple MIOperands inside a single one. 3420bool ARMAsmParser:: 3421cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3422 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3423 // The second source operand must be the same register as the destination 3424 // operand. 3425 if (Operands.size() == 6 && 3426 (((ARMOperand*)Operands[3])->getReg() != 3427 ((ARMOperand*)Operands[5])->getReg()) && 3428 (((ARMOperand*)Operands[3])->getReg() != 3429 ((ARMOperand*)Operands[4])->getReg())) { 3430 Error(Operands[3]->getStartLoc(), 3431 "destination register must match source register"); 3432 return false; 3433 } 3434 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3435 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3436 // If we have a three-operand form, make sure to set Rn to be the operand 3437 // that isn't the same as Rd. 3438 unsigned RegOp = 4; 3439 if (Operands.size() == 6 && 3440 ((ARMOperand*)Operands[4])->getReg() == 3441 ((ARMOperand*)Operands[3])->getReg()) 3442 RegOp = 5; 3443 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 3444 Inst.addOperand(Inst.getOperand(0)); 3445 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3446 3447 return true; 3448} 3449 3450bool ARMAsmParser:: 3451cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3452 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3453 // Vd 3454 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3455 // Create a writeback register dummy placeholder. 3456 Inst.addOperand(MCOperand::CreateImm(0)); 3457 // Vn 3458 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3459 // pred 3460 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3461 return true; 3462} 3463 3464bool ARMAsmParser:: 3465cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3466 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3467 // Vd 3468 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3469 // Create a writeback register dummy placeholder. 3470 Inst.addOperand(MCOperand::CreateImm(0)); 3471 // Vn 3472 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3473 // Vm 3474 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3475 // pred 3476 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3477 return true; 3478} 3479 3480bool ARMAsmParser:: 3481cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3482 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3483 // Create a writeback register dummy placeholder. 3484 Inst.addOperand(MCOperand::CreateImm(0)); 3485 // Vn 3486 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3487 // Vt 3488 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3489 // pred 3490 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3491 return true; 3492} 3493 3494bool ARMAsmParser:: 3495cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3496 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3497 // Create a writeback register dummy placeholder. 3498 Inst.addOperand(MCOperand::CreateImm(0)); 3499 // Vn 3500 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3501 // Vm 3502 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3503 // Vt 3504 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3505 // pred 3506 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3507 return true; 3508} 3509 3510/// Parse an ARM memory expression, return false if successful else return true 3511/// or an error. The first token must be a '[' when called. 3512bool ARMAsmParser:: 3513parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3514 SMLoc S, E; 3515 assert(Parser.getTok().is(AsmToken::LBrac) && 3516 "Token is not a Left Bracket"); 3517 S = Parser.getTok().getLoc(); 3518 Parser.Lex(); // Eat left bracket token. 3519 3520 const AsmToken &BaseRegTok = Parser.getTok(); 3521 int BaseRegNum = tryParseRegister(); 3522 if (BaseRegNum == -1) 3523 return Error(BaseRegTok.getLoc(), "register expected"); 3524 3525 // The next token must either be a comma or a closing bracket. 3526 const AsmToken &Tok = Parser.getTok(); 3527 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3528 return Error(Tok.getLoc(), "malformed memory operand"); 3529 3530 if (Tok.is(AsmToken::RBrac)) { 3531 E = Tok.getLoc(); 3532 Parser.Lex(); // Eat right bracket token. 3533 3534 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3535 0, 0, false, S, E)); 3536 3537 // If there's a pre-indexing writeback marker, '!', just add it as a token 3538 // operand. It's rather odd, but syntactically valid. 3539 if (Parser.getTok().is(AsmToken::Exclaim)) { 3540 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3541 Parser.Lex(); // Eat the '!'. 3542 } 3543 3544 return false; 3545 } 3546 3547 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3548 Parser.Lex(); // Eat the comma. 3549 3550 // If we have a ':', it's an alignment specifier. 3551 if (Parser.getTok().is(AsmToken::Colon)) { 3552 Parser.Lex(); // Eat the ':'. 3553 E = Parser.getTok().getLoc(); 3554 3555 const MCExpr *Expr; 3556 if (getParser().ParseExpression(Expr)) 3557 return true; 3558 3559 // The expression has to be a constant. Memory references with relocations 3560 // don't come through here, as they use the <label> forms of the relevant 3561 // instructions. 3562 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3563 if (!CE) 3564 return Error (E, "constant expression expected"); 3565 3566 unsigned Align = 0; 3567 switch (CE->getValue()) { 3568 default: 3569 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3570 case 64: Align = 8; break; 3571 case 128: Align = 16; break; 3572 case 256: Align = 32; break; 3573 } 3574 3575 // Now we should have the closing ']' 3576 E = Parser.getTok().getLoc(); 3577 if (Parser.getTok().isNot(AsmToken::RBrac)) 3578 return Error(E, "']' expected"); 3579 Parser.Lex(); // Eat right bracket token. 3580 3581 // Don't worry about range checking the value here. That's handled by 3582 // the is*() predicates. 3583 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3584 ARM_AM::no_shift, 0, Align, 3585 false, S, E)); 3586 3587 // If there's a pre-indexing writeback marker, '!', just add it as a token 3588 // operand. 3589 if (Parser.getTok().is(AsmToken::Exclaim)) { 3590 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3591 Parser.Lex(); // Eat the '!'. 3592 } 3593 3594 return false; 3595 } 3596 3597 // If we have a '#', it's an immediate offset, else assume it's a register 3598 // offset. 3599 if (Parser.getTok().is(AsmToken::Hash)) { 3600 Parser.Lex(); // Eat the '#'. 3601 E = Parser.getTok().getLoc(); 3602 3603 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3604 const MCExpr *Offset; 3605 if (getParser().ParseExpression(Offset)) 3606 return true; 3607 3608 // The expression has to be a constant. Memory references with relocations 3609 // don't come through here, as they use the <label> forms of the relevant 3610 // instructions. 3611 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3612 if (!CE) 3613 return Error (E, "constant expression expected"); 3614 3615 // If the constant was #-0, represent it as INT32_MIN. 3616 int32_t Val = CE->getValue(); 3617 if (isNegative && Val == 0) 3618 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3619 3620 // Now we should have the closing ']' 3621 E = Parser.getTok().getLoc(); 3622 if (Parser.getTok().isNot(AsmToken::RBrac)) 3623 return Error(E, "']' expected"); 3624 Parser.Lex(); // Eat right bracket token. 3625 3626 // Don't worry about range checking the value here. That's handled by 3627 // the is*() predicates. 3628 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3629 ARM_AM::no_shift, 0, 0, 3630 false, S, E)); 3631 3632 // If there's a pre-indexing writeback marker, '!', just add it as a token 3633 // operand. 3634 if (Parser.getTok().is(AsmToken::Exclaim)) { 3635 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3636 Parser.Lex(); // Eat the '!'. 3637 } 3638 3639 return false; 3640 } 3641 3642 // The register offset is optionally preceded by a '+' or '-' 3643 bool isNegative = false; 3644 if (Parser.getTok().is(AsmToken::Minus)) { 3645 isNegative = true; 3646 Parser.Lex(); // Eat the '-'. 3647 } else if (Parser.getTok().is(AsmToken::Plus)) { 3648 // Nothing to do. 3649 Parser.Lex(); // Eat the '+'. 3650 } 3651 3652 E = Parser.getTok().getLoc(); 3653 int OffsetRegNum = tryParseRegister(); 3654 if (OffsetRegNum == -1) 3655 return Error(E, "register expected"); 3656 3657 // If there's a shift operator, handle it. 3658 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3659 unsigned ShiftImm = 0; 3660 if (Parser.getTok().is(AsmToken::Comma)) { 3661 Parser.Lex(); // Eat the ','. 3662 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3663 return true; 3664 } 3665 3666 // Now we should have the closing ']' 3667 E = Parser.getTok().getLoc(); 3668 if (Parser.getTok().isNot(AsmToken::RBrac)) 3669 return Error(E, "']' expected"); 3670 Parser.Lex(); // Eat right bracket token. 3671 3672 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3673 ShiftType, ShiftImm, 0, isNegative, 3674 S, E)); 3675 3676 // If there's a pre-indexing writeback marker, '!', just add it as a token 3677 // operand. 3678 if (Parser.getTok().is(AsmToken::Exclaim)) { 3679 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3680 Parser.Lex(); // Eat the '!'. 3681 } 3682 3683 return false; 3684} 3685 3686/// parseMemRegOffsetShift - one of these two: 3687/// ( lsl | lsr | asr | ror ) , # shift_amount 3688/// rrx 3689/// return true if it parses a shift otherwise it returns false. 3690bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3691 unsigned &Amount) { 3692 SMLoc Loc = Parser.getTok().getLoc(); 3693 const AsmToken &Tok = Parser.getTok(); 3694 if (Tok.isNot(AsmToken::Identifier)) 3695 return true; 3696 StringRef ShiftName = Tok.getString(); 3697 if (ShiftName == "lsl" || ShiftName == "LSL") 3698 St = ARM_AM::lsl; 3699 else if (ShiftName == "lsr" || ShiftName == "LSR") 3700 St = ARM_AM::lsr; 3701 else if (ShiftName == "asr" || ShiftName == "ASR") 3702 St = ARM_AM::asr; 3703 else if (ShiftName == "ror" || ShiftName == "ROR") 3704 St = ARM_AM::ror; 3705 else if (ShiftName == "rrx" || ShiftName == "RRX") 3706 St = ARM_AM::rrx; 3707 else 3708 return Error(Loc, "illegal shift operator"); 3709 Parser.Lex(); // Eat shift type token. 3710 3711 // rrx stands alone. 3712 Amount = 0; 3713 if (St != ARM_AM::rrx) { 3714 Loc = Parser.getTok().getLoc(); 3715 // A '#' and a shift amount. 3716 const AsmToken &HashTok = Parser.getTok(); 3717 if (HashTok.isNot(AsmToken::Hash)) 3718 return Error(HashTok.getLoc(), "'#' expected"); 3719 Parser.Lex(); // Eat hash token. 3720 3721 const MCExpr *Expr; 3722 if (getParser().ParseExpression(Expr)) 3723 return true; 3724 // Range check the immediate. 3725 // lsl, ror: 0 <= imm <= 31 3726 // lsr, asr: 0 <= imm <= 32 3727 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3728 if (!CE) 3729 return Error(Loc, "shift amount must be an immediate"); 3730 int64_t Imm = CE->getValue(); 3731 if (Imm < 0 || 3732 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3733 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3734 return Error(Loc, "immediate shift value out of range"); 3735 Amount = Imm; 3736 } 3737 3738 return false; 3739} 3740 3741/// parseFPImm - A floating point immediate expression operand. 3742ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3743parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3744 SMLoc S = Parser.getTok().getLoc(); 3745 3746 if (Parser.getTok().isNot(AsmToken::Hash)) 3747 return MatchOperand_NoMatch; 3748 3749 // Disambiguate the VMOV forms that can accept an FP immediate. 3750 // vmov.f32 <sreg>, #imm 3751 // vmov.f64 <dreg>, #imm 3752 // vmov.f32 <dreg>, #imm @ vector f32x2 3753 // vmov.f32 <qreg>, #imm @ vector f32x4 3754 // 3755 // There are also the NEON VMOV instructions which expect an 3756 // integer constant. Make sure we don't try to parse an FPImm 3757 // for these: 3758 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3759 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3760 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3761 TyOp->getToken() != ".f64")) 3762 return MatchOperand_NoMatch; 3763 3764 Parser.Lex(); // Eat the '#'. 3765 3766 // Handle negation, as that still comes through as a separate token. 3767 bool isNegative = false; 3768 if (Parser.getTok().is(AsmToken::Minus)) { 3769 isNegative = true; 3770 Parser.Lex(); 3771 } 3772 const AsmToken &Tok = Parser.getTok(); 3773 if (Tok.is(AsmToken::Real)) { 3774 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3775 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3776 // If we had a '-' in front, toggle the sign bit. 3777 IntVal ^= (uint64_t)isNegative << 63; 3778 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3779 Parser.Lex(); // Eat the token. 3780 if (Val == -1) { 3781 TokError("floating point value out of range"); 3782 return MatchOperand_ParseFail; 3783 } 3784 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3785 return MatchOperand_Success; 3786 } 3787 if (Tok.is(AsmToken::Integer)) { 3788 int64_t Val = Tok.getIntVal(); 3789 Parser.Lex(); // Eat the token. 3790 if (Val > 255 || Val < 0) { 3791 TokError("encoded floating point value out of range"); 3792 return MatchOperand_ParseFail; 3793 } 3794 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3795 return MatchOperand_Success; 3796 } 3797 3798 TokError("invalid floating point immediate"); 3799 return MatchOperand_ParseFail; 3800} 3801/// Parse a arm instruction operand. For now this parses the operand regardless 3802/// of the mnemonic. 3803bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3804 StringRef Mnemonic) { 3805 SMLoc S, E; 3806 3807 // Check if the current operand has a custom associated parser, if so, try to 3808 // custom parse the operand, or fallback to the general approach. 3809 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3810 if (ResTy == MatchOperand_Success) 3811 return false; 3812 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3813 // there was a match, but an error occurred, in which case, just return that 3814 // the operand parsing failed. 3815 if (ResTy == MatchOperand_ParseFail) 3816 return true; 3817 3818 switch (getLexer().getKind()) { 3819 default: 3820 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3821 return true; 3822 case AsmToken::Identifier: { 3823 // If this is VMRS, check for the apsr_nzcv operand. 3824 if (!tryParseRegisterWithWriteBack(Operands)) 3825 return false; 3826 int Res = tryParseShiftRegister(Operands); 3827 if (Res == 0) // success 3828 return false; 3829 else if (Res == -1) // irrecoverable error 3830 return true; 3831 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3832 S = Parser.getTok().getLoc(); 3833 Parser.Lex(); 3834 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3835 return false; 3836 } 3837 3838 // Fall though for the Identifier case that is not a register or a 3839 // special name. 3840 } 3841 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 3842 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3843 case AsmToken::String: // quoted label names. 3844 case AsmToken::Dot: { // . as a branch target 3845 // This was not a register so parse other operands that start with an 3846 // identifier (like labels) as expressions and create them as immediates. 3847 const MCExpr *IdVal; 3848 S = Parser.getTok().getLoc(); 3849 if (getParser().ParseExpression(IdVal)) 3850 return true; 3851 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3852 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3853 return false; 3854 } 3855 case AsmToken::LBrac: 3856 return parseMemory(Operands); 3857 case AsmToken::LCurly: 3858 return parseRegisterList(Operands); 3859 case AsmToken::Hash: { 3860 // #42 -> immediate. 3861 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3862 S = Parser.getTok().getLoc(); 3863 Parser.Lex(); 3864 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3865 const MCExpr *ImmVal; 3866 if (getParser().ParseExpression(ImmVal)) 3867 return true; 3868 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3869 if (CE) { 3870 int32_t Val = CE->getValue(); 3871 if (isNegative && Val == 0) 3872 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3873 } 3874 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3875 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3876 return false; 3877 } 3878 case AsmToken::Colon: { 3879 // ":lower16:" and ":upper16:" expression prefixes 3880 // FIXME: Check it's an expression prefix, 3881 // e.g. (FOO - :lower16:BAR) isn't legal. 3882 ARMMCExpr::VariantKind RefKind; 3883 if (parsePrefix(RefKind)) 3884 return true; 3885 3886 const MCExpr *SubExprVal; 3887 if (getParser().ParseExpression(SubExprVal)) 3888 return true; 3889 3890 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3891 getContext()); 3892 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3893 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3894 return false; 3895 } 3896 } 3897} 3898 3899// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3900// :lower16: and :upper16:. 3901bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3902 RefKind = ARMMCExpr::VK_ARM_None; 3903 3904 // :lower16: and :upper16: modifiers 3905 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3906 Parser.Lex(); // Eat ':' 3907 3908 if (getLexer().isNot(AsmToken::Identifier)) { 3909 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3910 return true; 3911 } 3912 3913 StringRef IDVal = Parser.getTok().getIdentifier(); 3914 if (IDVal == "lower16") { 3915 RefKind = ARMMCExpr::VK_ARM_LO16; 3916 } else if (IDVal == "upper16") { 3917 RefKind = ARMMCExpr::VK_ARM_HI16; 3918 } else { 3919 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3920 return true; 3921 } 3922 Parser.Lex(); 3923 3924 if (getLexer().isNot(AsmToken::Colon)) { 3925 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3926 return true; 3927 } 3928 Parser.Lex(); // Eat the last ':' 3929 return false; 3930} 3931 3932/// \brief Given a mnemonic, split out possible predication code and carry 3933/// setting letters to form a canonical mnemonic and flags. 3934// 3935// FIXME: Would be nice to autogen this. 3936// FIXME: This is a bit of a maze of special cases. 3937StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3938 unsigned &PredicationCode, 3939 bool &CarrySetting, 3940 unsigned &ProcessorIMod, 3941 StringRef &ITMask) { 3942 PredicationCode = ARMCC::AL; 3943 CarrySetting = false; 3944 ProcessorIMod = 0; 3945 3946 // Ignore some mnemonics we know aren't predicated forms. 3947 // 3948 // FIXME: Would be nice to autogen this. 3949 if ((Mnemonic == "movs" && isThumb()) || 3950 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3951 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3952 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3953 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3954 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3955 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3956 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3957 return Mnemonic; 3958 3959 // First, split out any predication code. Ignore mnemonics we know aren't 3960 // predicated but do have a carry-set and so weren't caught above. 3961 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3962 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3963 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3964 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3965 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3966 .Case("eq", ARMCC::EQ) 3967 .Case("ne", ARMCC::NE) 3968 .Case("hs", ARMCC::HS) 3969 .Case("cs", ARMCC::HS) 3970 .Case("lo", ARMCC::LO) 3971 .Case("cc", ARMCC::LO) 3972 .Case("mi", ARMCC::MI) 3973 .Case("pl", ARMCC::PL) 3974 .Case("vs", ARMCC::VS) 3975 .Case("vc", ARMCC::VC) 3976 .Case("hi", ARMCC::HI) 3977 .Case("ls", ARMCC::LS) 3978 .Case("ge", ARMCC::GE) 3979 .Case("lt", ARMCC::LT) 3980 .Case("gt", ARMCC::GT) 3981 .Case("le", ARMCC::LE) 3982 .Case("al", ARMCC::AL) 3983 .Default(~0U); 3984 if (CC != ~0U) { 3985 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3986 PredicationCode = CC; 3987 } 3988 } 3989 3990 // Next, determine if we have a carry setting bit. We explicitly ignore all 3991 // the instructions we know end in 's'. 3992 if (Mnemonic.endswith("s") && 3993 !(Mnemonic == "cps" || Mnemonic == "mls" || 3994 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3995 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3996 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3997 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3998 (Mnemonic == "movs" && isThumb()))) { 3999 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4000 CarrySetting = true; 4001 } 4002 4003 // The "cps" instruction can have a interrupt mode operand which is glued into 4004 // the mnemonic. Check if this is the case, split it and parse the imod op 4005 if (Mnemonic.startswith("cps")) { 4006 // Split out any imod code. 4007 unsigned IMod = 4008 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4009 .Case("ie", ARM_PROC::IE) 4010 .Case("id", ARM_PROC::ID) 4011 .Default(~0U); 4012 if (IMod != ~0U) { 4013 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4014 ProcessorIMod = IMod; 4015 } 4016 } 4017 4018 // The "it" instruction has the condition mask on the end of the mnemonic. 4019 if (Mnemonic.startswith("it")) { 4020 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4021 Mnemonic = Mnemonic.slice(0, 2); 4022 } 4023 4024 return Mnemonic; 4025} 4026 4027/// \brief Given a canonical mnemonic, determine if the instruction ever allows 4028/// inclusion of carry set or predication code operands. 4029// 4030// FIXME: It would be nice to autogen this. 4031void ARMAsmParser:: 4032getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4033 bool &CanAcceptPredicationCode) { 4034 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4035 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4036 Mnemonic == "add" || Mnemonic == "adc" || 4037 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4038 Mnemonic == "orr" || Mnemonic == "mvn" || 4039 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4040 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4041 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4042 Mnemonic == "mla" || Mnemonic == "smlal" || 4043 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4044 CanAcceptCarrySet = true; 4045 } else 4046 CanAcceptCarrySet = false; 4047 4048 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4049 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4050 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4051 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4052 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4053 (Mnemonic == "clrex" && !isThumb()) || 4054 (Mnemonic == "nop" && isThumbOne()) || 4055 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4056 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4057 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4058 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4059 !isThumb()) || 4060 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4061 CanAcceptPredicationCode = false; 4062 } else 4063 CanAcceptPredicationCode = true; 4064 4065 if (isThumb()) { 4066 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4067 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4068 CanAcceptPredicationCode = false; 4069 } 4070} 4071 4072bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4073 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4074 // FIXME: This is all horribly hacky. We really need a better way to deal 4075 // with optional operands like this in the matcher table. 4076 4077 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4078 // another does not. Specifically, the MOVW instruction does not. So we 4079 // special case it here and remove the defaulted (non-setting) cc_out 4080 // operand if that's the instruction we're trying to match. 4081 // 4082 // We do this as post-processing of the explicit operands rather than just 4083 // conditionally adding the cc_out in the first place because we need 4084 // to check the type of the parsed immediate operand. 4085 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4086 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4087 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4088 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4089 return true; 4090 4091 // Register-register 'add' for thumb does not have a cc_out operand 4092 // when there are only two register operands. 4093 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4094 static_cast<ARMOperand*>(Operands[3])->isReg() && 4095 static_cast<ARMOperand*>(Operands[4])->isReg() && 4096 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4097 return true; 4098 // Register-register 'add' for thumb does not have a cc_out operand 4099 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4100 // have to check the immediate range here since Thumb2 has a variant 4101 // that can handle a different range and has a cc_out operand. 4102 if (((isThumb() && Mnemonic == "add") || 4103 (isThumbTwo() && Mnemonic == "sub")) && 4104 Operands.size() == 6 && 4105 static_cast<ARMOperand*>(Operands[3])->isReg() && 4106 static_cast<ARMOperand*>(Operands[4])->isReg() && 4107 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4108 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4109 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4110 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4111 return true; 4112 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4113 // imm0_4095 variant. That's the least-preferred variant when 4114 // selecting via the generic "add" mnemonic, so to know that we 4115 // should remove the cc_out operand, we have to explicitly check that 4116 // it's not one of the other variants. Ugh. 4117 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4118 Operands.size() == 6 && 4119 static_cast<ARMOperand*>(Operands[3])->isReg() && 4120 static_cast<ARMOperand*>(Operands[4])->isReg() && 4121 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4122 // Nest conditions rather than one big 'if' statement for readability. 4123 // 4124 // If either register is a high reg, it's either one of the SP 4125 // variants (handled above) or a 32-bit encoding, so we just 4126 // check against T3. 4127 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4128 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4129 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4130 return false; 4131 // If both registers are low, we're in an IT block, and the immediate is 4132 // in range, we should use encoding T1 instead, which has a cc_out. 4133 if (inITBlock() && 4134 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4135 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4136 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4137 return false; 4138 4139 // Otherwise, we use encoding T4, which does not have a cc_out 4140 // operand. 4141 return true; 4142 } 4143 4144 // The thumb2 multiply instruction doesn't have a CCOut register, so 4145 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4146 // use the 16-bit encoding or not. 4147 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4148 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4149 static_cast<ARMOperand*>(Operands[3])->isReg() && 4150 static_cast<ARMOperand*>(Operands[4])->isReg() && 4151 static_cast<ARMOperand*>(Operands[5])->isReg() && 4152 // If the registers aren't low regs, the destination reg isn't the 4153 // same as one of the source regs, or the cc_out operand is zero 4154 // outside of an IT block, we have to use the 32-bit encoding, so 4155 // remove the cc_out operand. 4156 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4157 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4158 !inITBlock() || 4159 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4160 static_cast<ARMOperand*>(Operands[5])->getReg() && 4161 static_cast<ARMOperand*>(Operands[3])->getReg() != 4162 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4163 return true; 4164 4165 4166 4167 // Register-register 'add/sub' for thumb does not have a cc_out operand 4168 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4169 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4170 // right, this will result in better diagnostics (which operand is off) 4171 // anyway. 4172 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4173 (Operands.size() == 5 || Operands.size() == 6) && 4174 static_cast<ARMOperand*>(Operands[3])->isReg() && 4175 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4176 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4177 return true; 4178 4179 return false; 4180} 4181 4182/// Parse an arm instruction mnemonic followed by its operands. 4183bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4184 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4185 // Create the leading tokens for the mnemonic, split by '.' characters. 4186 size_t Start = 0, Next = Name.find('.'); 4187 StringRef Mnemonic = Name.slice(Start, Next); 4188 4189 // Split out the predication code and carry setting flag from the mnemonic. 4190 unsigned PredicationCode; 4191 unsigned ProcessorIMod; 4192 bool CarrySetting; 4193 StringRef ITMask; 4194 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4195 ProcessorIMod, ITMask); 4196 4197 // In Thumb1, only the branch (B) instruction can be predicated. 4198 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4199 Parser.EatToEndOfStatement(); 4200 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4201 } 4202 4203 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4204 4205 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4206 // is the mask as it will be for the IT encoding if the conditional 4207 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4208 // where the conditional bit0 is zero, the instruction post-processing 4209 // will adjust the mask accordingly. 4210 if (Mnemonic == "it") { 4211 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4212 if (ITMask.size() > 3) { 4213 Parser.EatToEndOfStatement(); 4214 return Error(Loc, "too many conditions on IT instruction"); 4215 } 4216 unsigned Mask = 8; 4217 for (unsigned i = ITMask.size(); i != 0; --i) { 4218 char pos = ITMask[i - 1]; 4219 if (pos != 't' && pos != 'e') { 4220 Parser.EatToEndOfStatement(); 4221 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4222 } 4223 Mask >>= 1; 4224 if (ITMask[i - 1] == 't') 4225 Mask |= 8; 4226 } 4227 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4228 } 4229 4230 // FIXME: This is all a pretty gross hack. We should automatically handle 4231 // optional operands like this via tblgen. 4232 4233 // Next, add the CCOut and ConditionCode operands, if needed. 4234 // 4235 // For mnemonics which can ever incorporate a carry setting bit or predication 4236 // code, our matching model involves us always generating CCOut and 4237 // ConditionCode operands to match the mnemonic "as written" and then we let 4238 // the matcher deal with finding the right instruction or generating an 4239 // appropriate error. 4240 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4241 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4242 4243 // If we had a carry-set on an instruction that can't do that, issue an 4244 // error. 4245 if (!CanAcceptCarrySet && CarrySetting) { 4246 Parser.EatToEndOfStatement(); 4247 return Error(NameLoc, "instruction '" + Mnemonic + 4248 "' can not set flags, but 's' suffix specified"); 4249 } 4250 // If we had a predication code on an instruction that can't do that, issue an 4251 // error. 4252 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4253 Parser.EatToEndOfStatement(); 4254 return Error(NameLoc, "instruction '" + Mnemonic + 4255 "' is not predicable, but condition code specified"); 4256 } 4257 4258 // Add the carry setting operand, if necessary. 4259 if (CanAcceptCarrySet) { 4260 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4261 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4262 Loc)); 4263 } 4264 4265 // Add the predication code operand, if necessary. 4266 if (CanAcceptPredicationCode) { 4267 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4268 CarrySetting); 4269 Operands.push_back(ARMOperand::CreateCondCode( 4270 ARMCC::CondCodes(PredicationCode), Loc)); 4271 } 4272 4273 // Add the processor imod operand, if necessary. 4274 if (ProcessorIMod) { 4275 Operands.push_back(ARMOperand::CreateImm( 4276 MCConstantExpr::Create(ProcessorIMod, getContext()), 4277 NameLoc, NameLoc)); 4278 } 4279 4280 // Add the remaining tokens in the mnemonic. 4281 while (Next != StringRef::npos) { 4282 Start = Next; 4283 Next = Name.find('.', Start + 1); 4284 StringRef ExtraToken = Name.slice(Start, Next); 4285 4286 if (ExtraToken != ".n") { 4287 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4288 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4289 } 4290 } 4291 4292 // Read the remaining operands. 4293 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4294 // Read the first operand. 4295 if (parseOperand(Operands, Mnemonic)) { 4296 Parser.EatToEndOfStatement(); 4297 return true; 4298 } 4299 4300 while (getLexer().is(AsmToken::Comma)) { 4301 Parser.Lex(); // Eat the comma. 4302 4303 // Parse and remember the operand. 4304 if (parseOperand(Operands, Mnemonic)) { 4305 Parser.EatToEndOfStatement(); 4306 return true; 4307 } 4308 } 4309 } 4310 4311 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4312 SMLoc Loc = getLexer().getLoc(); 4313 Parser.EatToEndOfStatement(); 4314 return Error(Loc, "unexpected token in argument list"); 4315 } 4316 4317 Parser.Lex(); // Consume the EndOfStatement 4318 4319 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4320 // do and don't have a cc_out optional-def operand. With some spot-checks 4321 // of the operand list, we can figure out which variant we're trying to 4322 // parse and adjust accordingly before actually matching. We shouldn't ever 4323 // try to remove a cc_out operand that was explicitly set on the the 4324 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4325 // table driven matcher doesn't fit well with the ARM instruction set. 4326 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4327 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4328 Operands.erase(Operands.begin() + 1); 4329 delete Op; 4330 } 4331 4332 // ARM mode 'blx' need special handling, as the register operand version 4333 // is predicable, but the label operand version is not. So, we can't rely 4334 // on the Mnemonic based checking to correctly figure out when to put 4335 // a k_CondCode operand in the list. If we're trying to match the label 4336 // version, remove the k_CondCode operand here. 4337 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4338 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4339 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4340 Operands.erase(Operands.begin() + 1); 4341 delete Op; 4342 } 4343 4344 // The vector-compare-to-zero instructions have a literal token "#0" at 4345 // the end that comes to here as an immediate operand. Convert it to a 4346 // token to play nicely with the matcher. 4347 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4348 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4349 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4350 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4351 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4352 if (CE && CE->getValue() == 0) { 4353 Operands.erase(Operands.begin() + 5); 4354 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4355 delete Op; 4356 } 4357 } 4358 // VCMP{E} does the same thing, but with a different operand count. 4359 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4360 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4361 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4362 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4363 if (CE && CE->getValue() == 0) { 4364 Operands.erase(Operands.begin() + 4); 4365 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4366 delete Op; 4367 } 4368 } 4369 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4370 // end. Convert it to a token here. 4371 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4372 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4373 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4374 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4375 if (CE && CE->getValue() == 0) { 4376 Operands.erase(Operands.begin() + 5); 4377 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4378 delete Op; 4379 } 4380 } 4381 4382 return false; 4383} 4384 4385// Validate context-sensitive operand constraints. 4386 4387// return 'true' if register list contains non-low GPR registers, 4388// 'false' otherwise. If Reg is in the register list or is HiReg, set 4389// 'containsReg' to true. 4390static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4391 unsigned HiReg, bool &containsReg) { 4392 containsReg = false; 4393 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4394 unsigned OpReg = Inst.getOperand(i).getReg(); 4395 if (OpReg == Reg) 4396 containsReg = true; 4397 // Anything other than a low register isn't legal here. 4398 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4399 return true; 4400 } 4401 return false; 4402} 4403 4404// Check if the specified regisgter is in the register list of the inst, 4405// starting at the indicated operand number. 4406static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4407 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4408 unsigned OpReg = Inst.getOperand(i).getReg(); 4409 if (OpReg == Reg) 4410 return true; 4411 } 4412 return false; 4413} 4414 4415// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4416// the ARMInsts array) instead. Getting that here requires awkward 4417// API changes, though. Better way? 4418namespace llvm { 4419extern const MCInstrDesc ARMInsts[]; 4420} 4421static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4422 return ARMInsts[Opcode]; 4423} 4424 4425// FIXME: We would really like to be able to tablegen'erate this. 4426bool ARMAsmParser:: 4427validateInstruction(MCInst &Inst, 4428 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4429 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4430 SMLoc Loc = Operands[0]->getStartLoc(); 4431 // Check the IT block state first. 4432 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4433 // being allowed in IT blocks, but not being predicable. It just always 4434 // executes. 4435 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4436 unsigned bit = 1; 4437 if (ITState.FirstCond) 4438 ITState.FirstCond = false; 4439 else 4440 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4441 // The instruction must be predicable. 4442 if (!MCID.isPredicable()) 4443 return Error(Loc, "instructions in IT block must be predicable"); 4444 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4445 unsigned ITCond = bit ? ITState.Cond : 4446 ARMCC::getOppositeCondition(ITState.Cond); 4447 if (Cond != ITCond) { 4448 // Find the condition code Operand to get its SMLoc information. 4449 SMLoc CondLoc; 4450 for (unsigned i = 1; i < Operands.size(); ++i) 4451 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4452 CondLoc = Operands[i]->getStartLoc(); 4453 return Error(CondLoc, "incorrect condition in IT block; got '" + 4454 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4455 "', but expected '" + 4456 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4457 } 4458 // Check for non-'al' condition codes outside of the IT block. 4459 } else if (isThumbTwo() && MCID.isPredicable() && 4460 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4461 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4462 Inst.getOpcode() != ARM::t2B) 4463 return Error(Loc, "predicated instructions must be in IT block"); 4464 4465 switch (Inst.getOpcode()) { 4466 case ARM::LDRD: 4467 case ARM::LDRD_PRE: 4468 case ARM::LDRD_POST: 4469 case ARM::LDREXD: { 4470 // Rt2 must be Rt + 1. 4471 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4472 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4473 if (Rt2 != Rt + 1) 4474 return Error(Operands[3]->getStartLoc(), 4475 "destination operands must be sequential"); 4476 return false; 4477 } 4478 case ARM::STRD: { 4479 // Rt2 must be Rt + 1. 4480 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4481 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4482 if (Rt2 != Rt + 1) 4483 return Error(Operands[3]->getStartLoc(), 4484 "source operands must be sequential"); 4485 return false; 4486 } 4487 case ARM::STRD_PRE: 4488 case ARM::STRD_POST: 4489 case ARM::STREXD: { 4490 // Rt2 must be Rt + 1. 4491 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4492 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4493 if (Rt2 != Rt + 1) 4494 return Error(Operands[3]->getStartLoc(), 4495 "source operands must be sequential"); 4496 return false; 4497 } 4498 case ARM::SBFX: 4499 case ARM::UBFX: { 4500 // width must be in range [1, 32-lsb] 4501 unsigned lsb = Inst.getOperand(2).getImm(); 4502 unsigned widthm1 = Inst.getOperand(3).getImm(); 4503 if (widthm1 >= 32 - lsb) 4504 return Error(Operands[5]->getStartLoc(), 4505 "bitfield width must be in range [1,32-lsb]"); 4506 return false; 4507 } 4508 case ARM::tLDMIA: { 4509 // If we're parsing Thumb2, the .w variant is available and handles 4510 // most cases that are normally illegal for a Thumb1 LDM 4511 // instruction. We'll make the transformation in processInstruction() 4512 // if necessary. 4513 // 4514 // Thumb LDM instructions are writeback iff the base register is not 4515 // in the register list. 4516 unsigned Rn = Inst.getOperand(0).getReg(); 4517 bool hasWritebackToken = 4518 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4519 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4520 bool listContainsBase; 4521 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4522 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4523 "registers must be in range r0-r7"); 4524 // If we should have writeback, then there should be a '!' token. 4525 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4526 return Error(Operands[2]->getStartLoc(), 4527 "writeback operator '!' expected"); 4528 // If we should not have writeback, there must not be a '!'. This is 4529 // true even for the 32-bit wide encodings. 4530 if (listContainsBase && hasWritebackToken) 4531 return Error(Operands[3]->getStartLoc(), 4532 "writeback operator '!' not allowed when base register " 4533 "in register list"); 4534 4535 break; 4536 } 4537 case ARM::t2LDMIA_UPD: { 4538 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4539 return Error(Operands[4]->getStartLoc(), 4540 "writeback operator '!' not allowed when base register " 4541 "in register list"); 4542 break; 4543 } 4544 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 4545 // so only issue a diagnostic for thumb1. The instructions will be 4546 // switched to the t2 encodings in processInstruction() if necessary. 4547 case ARM::tPOP: { 4548 bool listContainsBase; 4549 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 4550 !isThumbTwo()) 4551 return Error(Operands[2]->getStartLoc(), 4552 "registers must be in range r0-r7 or pc"); 4553 break; 4554 } 4555 case ARM::tPUSH: { 4556 bool listContainsBase; 4557 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 4558 !isThumbTwo()) 4559 return Error(Operands[2]->getStartLoc(), 4560 "registers must be in range r0-r7 or lr"); 4561 break; 4562 } 4563 case ARM::tSTMIA_UPD: { 4564 bool listContainsBase; 4565 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4566 return Error(Operands[4]->getStartLoc(), 4567 "registers must be in range r0-r7"); 4568 break; 4569 } 4570 } 4571 4572 return false; 4573} 4574 4575bool ARMAsmParser:: 4576processInstruction(MCInst &Inst, 4577 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4578 switch (Inst.getOpcode()) { 4579 // Handle the MOV complex aliases. 4580 case ARM::ASRi: 4581 case ARM::LSRi: 4582 case ARM::LSLi: 4583 case ARM::RORi: { 4584 ARM_AM::ShiftOpc ShiftTy; 4585 unsigned Amt = Inst.getOperand(2).getImm(); 4586 switch(Inst.getOpcode()) { 4587 default: llvm_unreachable("unexpected opcode!"); 4588 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 4589 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 4590 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 4591 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 4592 } 4593 // A shift by zero is a plain MOVr, not a MOVsi. 4594 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 4595 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 4596 MCInst TmpInst; 4597 TmpInst.setOpcode(Opc); 4598 TmpInst.addOperand(Inst.getOperand(0)); // Rd 4599 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4600 if (Opc == ARM::MOVsi) 4601 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 4602 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 4603 TmpInst.addOperand(Inst.getOperand(4)); 4604 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 4605 Inst = TmpInst; 4606 return true; 4607 } 4608 case ARM::t2LDMIA_UPD: { 4609 // If this is a load of a single register, then we should use 4610 // a post-indexed LDR instruction instead, per the ARM ARM. 4611 if (Inst.getNumOperands() != 5) 4612 return false; 4613 MCInst TmpInst; 4614 TmpInst.setOpcode(ARM::t2LDR_POST); 4615 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4616 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4617 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4618 TmpInst.addOperand(MCOperand::CreateImm(4)); 4619 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4620 TmpInst.addOperand(Inst.getOperand(3)); 4621 Inst = TmpInst; 4622 return true; 4623 } 4624 case ARM::t2STMDB_UPD: { 4625 // If this is a store of a single register, then we should use 4626 // a pre-indexed STR instruction instead, per the ARM ARM. 4627 if (Inst.getNumOperands() != 5) 4628 return false; 4629 MCInst TmpInst; 4630 TmpInst.setOpcode(ARM::t2STR_PRE); 4631 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4632 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4633 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4634 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4635 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4636 TmpInst.addOperand(Inst.getOperand(3)); 4637 Inst = TmpInst; 4638 return true; 4639 } 4640 case ARM::LDMIA_UPD: 4641 // If this is a load of a single register via a 'pop', then we should use 4642 // a post-indexed LDR instruction instead, per the ARM ARM. 4643 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4644 Inst.getNumOperands() == 5) { 4645 MCInst TmpInst; 4646 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4647 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4648 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4649 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4650 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4651 TmpInst.addOperand(MCOperand::CreateImm(4)); 4652 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4653 TmpInst.addOperand(Inst.getOperand(3)); 4654 Inst = TmpInst; 4655 return true; 4656 } 4657 break; 4658 case ARM::STMDB_UPD: 4659 // If this is a store of a single register via a 'push', then we should use 4660 // a pre-indexed STR instruction instead, per the ARM ARM. 4661 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4662 Inst.getNumOperands() == 5) { 4663 MCInst TmpInst; 4664 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4665 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4666 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4667 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4668 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4669 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4670 TmpInst.addOperand(Inst.getOperand(3)); 4671 Inst = TmpInst; 4672 } 4673 break; 4674 case ARM::tADDi8: 4675 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4676 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4677 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4678 // to encoding T1 if <Rd> is omitted." 4679 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 4680 Inst.setOpcode(ARM::tADDi3); 4681 return true; 4682 } 4683 break; 4684 case ARM::tSUBi8: 4685 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4686 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4687 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4688 // to encoding T1 if <Rd> is omitted." 4689 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 4690 Inst.setOpcode(ARM::tSUBi3); 4691 return true; 4692 } 4693 break; 4694 case ARM::tB: 4695 // A Thumb conditional branch outside of an IT block is a tBcc. 4696 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 4697 Inst.setOpcode(ARM::tBcc); 4698 return true; 4699 } 4700 break; 4701 case ARM::t2B: 4702 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4703 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 4704 Inst.setOpcode(ARM::t2Bcc); 4705 return true; 4706 } 4707 break; 4708 case ARM::t2Bcc: 4709 // If the conditional is AL or we're in an IT block, we really want t2B. 4710 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 4711 Inst.setOpcode(ARM::t2B); 4712 return true; 4713 } 4714 break; 4715 case ARM::tBcc: 4716 // If the conditional is AL, we really want tB. 4717 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 4718 Inst.setOpcode(ARM::tB); 4719 return true; 4720 } 4721 break; 4722 case ARM::tLDMIA: { 4723 // If the register list contains any high registers, or if the writeback 4724 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4725 // instead if we're in Thumb2. Otherwise, this should have generated 4726 // an error in validateInstruction(). 4727 unsigned Rn = Inst.getOperand(0).getReg(); 4728 bool hasWritebackToken = 4729 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4730 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4731 bool listContainsBase; 4732 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4733 (!listContainsBase && !hasWritebackToken) || 4734 (listContainsBase && hasWritebackToken)) { 4735 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4736 assert (isThumbTwo()); 4737 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4738 // If we're switching to the updating version, we need to insert 4739 // the writeback tied operand. 4740 if (hasWritebackToken) 4741 Inst.insert(Inst.begin(), 4742 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4743 return true; 4744 } 4745 break; 4746 } 4747 case ARM::tSTMIA_UPD: { 4748 // If the register list contains any high registers, we need to use 4749 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4750 // should have generated an error in validateInstruction(). 4751 unsigned Rn = Inst.getOperand(0).getReg(); 4752 bool listContainsBase; 4753 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4754 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4755 assert (isThumbTwo()); 4756 Inst.setOpcode(ARM::t2STMIA_UPD); 4757 return true; 4758 } 4759 break; 4760 } 4761 case ARM::tPOP: { 4762 bool listContainsBase; 4763 // If the register list contains any high registers, we need to use 4764 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4765 // should have generated an error in validateInstruction(). 4766 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 4767 return false; 4768 assert (isThumbTwo()); 4769 Inst.setOpcode(ARM::t2LDMIA_UPD); 4770 // Add the base register and writeback operands. 4771 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4772 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4773 return true; 4774 } 4775 case ARM::tPUSH: { 4776 bool listContainsBase; 4777 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 4778 return false; 4779 assert (isThumbTwo()); 4780 Inst.setOpcode(ARM::t2STMDB_UPD); 4781 // Add the base register and writeback operands. 4782 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4783 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4784 return true; 4785 } 4786 case ARM::t2MOVi: { 4787 // If we can use the 16-bit encoding and the user didn't explicitly 4788 // request the 32-bit variant, transform it here. 4789 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4790 Inst.getOperand(1).getImm() <= 255 && 4791 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4792 Inst.getOperand(4).getReg() == ARM::CPSR) || 4793 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4794 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4795 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4796 // The operands aren't in the same order for tMOVi8... 4797 MCInst TmpInst; 4798 TmpInst.setOpcode(ARM::tMOVi8); 4799 TmpInst.addOperand(Inst.getOperand(0)); 4800 TmpInst.addOperand(Inst.getOperand(4)); 4801 TmpInst.addOperand(Inst.getOperand(1)); 4802 TmpInst.addOperand(Inst.getOperand(2)); 4803 TmpInst.addOperand(Inst.getOperand(3)); 4804 Inst = TmpInst; 4805 return true; 4806 } 4807 break; 4808 } 4809 case ARM::t2MOVr: { 4810 // If we can use the 16-bit encoding and the user didn't explicitly 4811 // request the 32-bit variant, transform it here. 4812 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4813 isARMLowRegister(Inst.getOperand(1).getReg()) && 4814 Inst.getOperand(2).getImm() == ARMCC::AL && 4815 Inst.getOperand(4).getReg() == ARM::CPSR && 4816 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4817 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4818 // The operands aren't the same for tMOV[S]r... (no cc_out) 4819 MCInst TmpInst; 4820 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4821 TmpInst.addOperand(Inst.getOperand(0)); 4822 TmpInst.addOperand(Inst.getOperand(1)); 4823 TmpInst.addOperand(Inst.getOperand(2)); 4824 TmpInst.addOperand(Inst.getOperand(3)); 4825 Inst = TmpInst; 4826 return true; 4827 } 4828 break; 4829 } 4830 case ARM::t2SXTH: 4831 case ARM::t2SXTB: 4832 case ARM::t2UXTH: 4833 case ARM::t2UXTB: { 4834 // If we can use the 16-bit encoding and the user didn't explicitly 4835 // request the 32-bit variant, transform it here. 4836 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4837 isARMLowRegister(Inst.getOperand(1).getReg()) && 4838 Inst.getOperand(2).getImm() == 0 && 4839 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4840 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4841 unsigned NewOpc; 4842 switch (Inst.getOpcode()) { 4843 default: llvm_unreachable("Illegal opcode!"); 4844 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4845 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4846 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4847 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4848 } 4849 // The operands aren't the same for thumb1 (no rotate operand). 4850 MCInst TmpInst; 4851 TmpInst.setOpcode(NewOpc); 4852 TmpInst.addOperand(Inst.getOperand(0)); 4853 TmpInst.addOperand(Inst.getOperand(1)); 4854 TmpInst.addOperand(Inst.getOperand(3)); 4855 TmpInst.addOperand(Inst.getOperand(4)); 4856 Inst = TmpInst; 4857 return true; 4858 } 4859 break; 4860 } 4861 case ARM::t2IT: { 4862 // The mask bits for all but the first condition are represented as 4863 // the low bit of the condition code value implies 't'. We currently 4864 // always have 1 implies 't', so XOR toggle the bits if the low bit 4865 // of the condition code is zero. The encoding also expects the low 4866 // bit of the condition to be encoded as bit 4 of the mask operand, 4867 // so mask that in if needed 4868 MCOperand &MO = Inst.getOperand(1); 4869 unsigned Mask = MO.getImm(); 4870 unsigned OrigMask = Mask; 4871 unsigned TZ = CountTrailingZeros_32(Mask); 4872 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4873 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4874 for (unsigned i = 3; i != TZ; --i) 4875 Mask ^= 1 << i; 4876 } else 4877 Mask |= 0x10; 4878 MO.setImm(Mask); 4879 4880 // Set up the IT block state according to the IT instruction we just 4881 // matched. 4882 assert(!inITBlock() && "nested IT blocks?!"); 4883 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4884 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4885 ITState.CurPosition = 0; 4886 ITState.FirstCond = true; 4887 break; 4888 } 4889 } 4890 return false; 4891} 4892 4893unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4894 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4895 // suffix depending on whether they're in an IT block or not. 4896 unsigned Opc = Inst.getOpcode(); 4897 const MCInstrDesc &MCID = getInstDesc(Opc); 4898 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4899 assert(MCID.hasOptionalDef() && 4900 "optionally flag setting instruction missing optional def operand"); 4901 assert(MCID.NumOperands == Inst.getNumOperands() && 4902 "operand count mismatch!"); 4903 // Find the optional-def operand (cc_out). 4904 unsigned OpNo; 4905 for (OpNo = 0; 4906 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4907 ++OpNo) 4908 ; 4909 // If we're parsing Thumb1, reject it completely. 4910 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4911 return Match_MnemonicFail; 4912 // If we're parsing Thumb2, which form is legal depends on whether we're 4913 // in an IT block. 4914 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4915 !inITBlock()) 4916 return Match_RequiresITBlock; 4917 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4918 inITBlock()) 4919 return Match_RequiresNotITBlock; 4920 } 4921 // Some high-register supporting Thumb1 encodings only allow both registers 4922 // to be from r0-r7 when in Thumb2. 4923 else if (Opc == ARM::tADDhirr && isThumbOne() && 4924 isARMLowRegister(Inst.getOperand(1).getReg()) && 4925 isARMLowRegister(Inst.getOperand(2).getReg())) 4926 return Match_RequiresThumb2; 4927 // Others only require ARMv6 or later. 4928 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4929 isARMLowRegister(Inst.getOperand(0).getReg()) && 4930 isARMLowRegister(Inst.getOperand(1).getReg())) 4931 return Match_RequiresV6; 4932 return Match_Success; 4933} 4934 4935bool ARMAsmParser:: 4936MatchAndEmitInstruction(SMLoc IDLoc, 4937 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4938 MCStreamer &Out) { 4939 MCInst Inst; 4940 unsigned ErrorInfo; 4941 unsigned MatchResult; 4942 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4943 switch (MatchResult) { 4944 default: break; 4945 case Match_Success: 4946 // Context sensitive operand constraints aren't handled by the matcher, 4947 // so check them here. 4948 if (validateInstruction(Inst, Operands)) { 4949 // Still progress the IT block, otherwise one wrong condition causes 4950 // nasty cascading errors. 4951 forwardITPosition(); 4952 return true; 4953 } 4954 4955 // Some instructions need post-processing to, for example, tweak which 4956 // encoding is selected. Loop on it while changes happen so the 4957 // individual transformations can chain off each other. E.g., 4958 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 4959 while (processInstruction(Inst, Operands)) 4960 ; 4961 4962 // Only move forward at the very end so that everything in validate 4963 // and process gets a consistent answer about whether we're in an IT 4964 // block. 4965 forwardITPosition(); 4966 4967 Out.EmitInstruction(Inst); 4968 return false; 4969 case Match_MissingFeature: 4970 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4971 return true; 4972 case Match_InvalidOperand: { 4973 SMLoc ErrorLoc = IDLoc; 4974 if (ErrorInfo != ~0U) { 4975 if (ErrorInfo >= Operands.size()) 4976 return Error(IDLoc, "too few operands for instruction"); 4977 4978 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4979 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4980 } 4981 4982 return Error(ErrorLoc, "invalid operand for instruction"); 4983 } 4984 case Match_MnemonicFail: 4985 return Error(IDLoc, "invalid instruction"); 4986 case Match_ConversionFail: 4987 // The converter function will have already emited a diagnostic. 4988 return true; 4989 case Match_RequiresNotITBlock: 4990 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4991 case Match_RequiresITBlock: 4992 return Error(IDLoc, "instruction only valid inside IT block"); 4993 case Match_RequiresV6: 4994 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4995 case Match_RequiresThumb2: 4996 return Error(IDLoc, "instruction variant requires Thumb2"); 4997 } 4998 4999 llvm_unreachable("Implement any new match types added!"); 5000 return true; 5001} 5002 5003/// parseDirective parses the arm specific directives 5004bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 5005 StringRef IDVal = DirectiveID.getIdentifier(); 5006 if (IDVal == ".word") 5007 return parseDirectiveWord(4, DirectiveID.getLoc()); 5008 else if (IDVal == ".thumb") 5009 return parseDirectiveThumb(DirectiveID.getLoc()); 5010 else if (IDVal == ".thumb_func") 5011 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 5012 else if (IDVal == ".code") 5013 return parseDirectiveCode(DirectiveID.getLoc()); 5014 else if (IDVal == ".syntax") 5015 return parseDirectiveSyntax(DirectiveID.getLoc()); 5016 return true; 5017} 5018 5019/// parseDirectiveWord 5020/// ::= .word [ expression (, expression)* ] 5021bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 5022 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5023 for (;;) { 5024 const MCExpr *Value; 5025 if (getParser().ParseExpression(Value)) 5026 return true; 5027 5028 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 5029 5030 if (getLexer().is(AsmToken::EndOfStatement)) 5031 break; 5032 5033 // FIXME: Improve diagnostic. 5034 if (getLexer().isNot(AsmToken::Comma)) 5035 return Error(L, "unexpected token in directive"); 5036 Parser.Lex(); 5037 } 5038 } 5039 5040 Parser.Lex(); 5041 return false; 5042} 5043 5044/// parseDirectiveThumb 5045/// ::= .thumb 5046bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 5047 if (getLexer().isNot(AsmToken::EndOfStatement)) 5048 return Error(L, "unexpected token in directive"); 5049 Parser.Lex(); 5050 5051 // TODO: set thumb mode 5052 // TODO: tell the MC streamer the mode 5053 // getParser().getStreamer().Emit???(); 5054 return false; 5055} 5056 5057/// parseDirectiveThumbFunc 5058/// ::= .thumbfunc symbol_name 5059bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 5060 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 5061 bool isMachO = MAI.hasSubsectionsViaSymbols(); 5062 StringRef Name; 5063 5064 // Darwin asm has function name after .thumb_func direction 5065 // ELF doesn't 5066 if (isMachO) { 5067 const AsmToken &Tok = Parser.getTok(); 5068 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 5069 return Error(L, "unexpected token in .thumb_func directive"); 5070 Name = Tok.getIdentifier(); 5071 Parser.Lex(); // Consume the identifier token. 5072 } 5073 5074 if (getLexer().isNot(AsmToken::EndOfStatement)) 5075 return Error(L, "unexpected token in directive"); 5076 Parser.Lex(); 5077 5078 // FIXME: assuming function name will be the line following .thumb_func 5079 if (!isMachO) { 5080 Name = Parser.getTok().getIdentifier(); 5081 } 5082 5083 // Mark symbol as a thumb symbol. 5084 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 5085 getParser().getStreamer().EmitThumbFunc(Func); 5086 return false; 5087} 5088 5089/// parseDirectiveSyntax 5090/// ::= .syntax unified | divided 5091bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 5092 const AsmToken &Tok = Parser.getTok(); 5093 if (Tok.isNot(AsmToken::Identifier)) 5094 return Error(L, "unexpected token in .syntax directive"); 5095 StringRef Mode = Tok.getString(); 5096 if (Mode == "unified" || Mode == "UNIFIED") 5097 Parser.Lex(); 5098 else if (Mode == "divided" || Mode == "DIVIDED") 5099 return Error(L, "'.syntax divided' arm asssembly not supported"); 5100 else 5101 return Error(L, "unrecognized syntax mode in .syntax directive"); 5102 5103 if (getLexer().isNot(AsmToken::EndOfStatement)) 5104 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5105 Parser.Lex(); 5106 5107 // TODO tell the MC streamer the mode 5108 // getParser().getStreamer().Emit???(); 5109 return false; 5110} 5111 5112/// parseDirectiveCode 5113/// ::= .code 16 | 32 5114bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 5115 const AsmToken &Tok = Parser.getTok(); 5116 if (Tok.isNot(AsmToken::Integer)) 5117 return Error(L, "unexpected token in .code directive"); 5118 int64_t Val = Parser.getTok().getIntVal(); 5119 if (Val == 16) 5120 Parser.Lex(); 5121 else if (Val == 32) 5122 Parser.Lex(); 5123 else 5124 return Error(L, "invalid operand to .code directive"); 5125 5126 if (getLexer().isNot(AsmToken::EndOfStatement)) 5127 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5128 Parser.Lex(); 5129 5130 if (Val == 16) { 5131 if (!isThumb()) 5132 SwitchMode(); 5133 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5134 } else { 5135 if (isThumb()) 5136 SwitchMode(); 5137 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5138 } 5139 5140 return false; 5141} 5142 5143extern "C" void LLVMInitializeARMAsmLexer(); 5144 5145/// Force static initialization. 5146extern "C" void LLVMInitializeARMAsmParser() { 5147 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 5148 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 5149 LLVMInitializeARMAsmLexer(); 5150} 5151 5152#define GET_REGISTER_MATCHER 5153#define GET_MATCHER_IMPLEMENTATION 5154#include "ARMGenAsmMatcher.inc" 5155