ARMAsmParser.cpp revision 9b8f2a0b365ea62a5fef80bbaab3cf0252db2fcf
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringExtras.h" 34#include "llvm/ADT/StringSwitch.h" 35#include "llvm/ADT/Twine.h" 36 37using namespace llvm; 38 39namespace { 40 41class ARMOperand; 42 43class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131#define GET_ASSEMBLER_HEADER 132#include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 165 // Asm Match Converter Methods 166 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 167 const SmallVectorImpl<MCParsedAsmOperand*> &); 168 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 169 const SmallVectorImpl<MCParsedAsmOperand*> &); 170 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 201 bool validateInstruction(MCInst &Inst, 202 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 203 void processInstruction(MCInst &Inst, 204 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 205 bool shouldOmitCCOutOperand(StringRef Mnemonic, 206 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 207 208public: 209 enum ARMMatchResultTy { 210 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 211 Match_RequiresNotITBlock, 212 Match_RequiresV6, 213 Match_RequiresThumb2 214 }; 215 216 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 217 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 218 MCAsmParserExtension::Initialize(_Parser); 219 220 // Initialize the set of available features. 221 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 222 223 // Not in an ITBlock to start with. 224 ITState.CurPosition = ~0U; 225 } 226 227 // Implementation of the MCTargetAsmParser interface: 228 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 229 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 230 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 231 bool ParseDirective(AsmToken DirectiveID); 232 233 unsigned checkTargetMatchPredicate(MCInst &Inst); 234 235 bool MatchAndEmitInstruction(SMLoc IDLoc, 236 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 237 MCStreamer &Out); 238}; 239} // end anonymous namespace 240 241namespace { 242 243/// ARMOperand - Instances of this class represent a parsed ARM machine 244/// instruction. 245class ARMOperand : public MCParsedAsmOperand { 246 enum KindTy { 247 k_CondCode, 248 k_CCOut, 249 k_ITCondMask, 250 k_CoprocNum, 251 k_CoprocReg, 252 k_CoprocOption, 253 k_Immediate, 254 k_FPImmediate, 255 k_MemBarrierOpt, 256 k_Memory, 257 k_PostIndexRegister, 258 k_MSRMask, 259 k_ProcIFlags, 260 k_VectorIndex, 261 k_Register, 262 k_RegisterList, 263 k_DPRRegisterList, 264 k_SPRRegisterList, 265 k_ShiftedRegister, 266 k_ShiftedImmediate, 267 k_ShifterImmediate, 268 k_RotateImmediate, 269 k_BitfieldDescriptor, 270 k_Token 271 } Kind; 272 273 SMLoc StartLoc, EndLoc; 274 SmallVector<unsigned, 8> Registers; 275 276 union { 277 struct { 278 ARMCC::CondCodes Val; 279 } CC; 280 281 struct { 282 unsigned Val; 283 } Cop; 284 285 struct { 286 unsigned Val; 287 } CoprocOption; 288 289 struct { 290 unsigned Mask:4; 291 } ITMask; 292 293 struct { 294 ARM_MB::MemBOpt Val; 295 } MBOpt; 296 297 struct { 298 ARM_PROC::IFlags Val; 299 } IFlags; 300 301 struct { 302 unsigned Val; 303 } MMask; 304 305 struct { 306 const char *Data; 307 unsigned Length; 308 } Tok; 309 310 struct { 311 unsigned RegNum; 312 } Reg; 313 314 struct { 315 unsigned Val; 316 } VectorIndex; 317 318 struct { 319 const MCExpr *Val; 320 } Imm; 321 322 struct { 323 unsigned Val; // encoded 8-bit representation 324 } FPImm; 325 326 /// Combined record for all forms of ARM address expressions. 327 struct { 328 unsigned BaseRegNum; 329 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 330 // was specified. 331 const MCConstantExpr *OffsetImm; // Offset immediate value 332 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 333 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 334 unsigned ShiftImm; // shift for OffsetReg. 335 unsigned Alignment; // 0 = no alignment specified 336 // n = alignment in bytes (8, 16, or 32) 337 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 338 } Memory; 339 340 struct { 341 unsigned RegNum; 342 bool isAdd; 343 ARM_AM::ShiftOpc ShiftTy; 344 unsigned ShiftImm; 345 } PostIdxReg; 346 347 struct { 348 bool isASR; 349 unsigned Imm; 350 } ShifterImm; 351 struct { 352 ARM_AM::ShiftOpc ShiftTy; 353 unsigned SrcReg; 354 unsigned ShiftReg; 355 unsigned ShiftImm; 356 } RegShiftedReg; 357 struct { 358 ARM_AM::ShiftOpc ShiftTy; 359 unsigned SrcReg; 360 unsigned ShiftImm; 361 } RegShiftedImm; 362 struct { 363 unsigned Imm; 364 } RotImm; 365 struct { 366 unsigned LSB; 367 unsigned Width; 368 } Bitfield; 369 }; 370 371 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 372public: 373 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 374 Kind = o.Kind; 375 StartLoc = o.StartLoc; 376 EndLoc = o.EndLoc; 377 switch (Kind) { 378 case k_CondCode: 379 CC = o.CC; 380 break; 381 case k_ITCondMask: 382 ITMask = o.ITMask; 383 break; 384 case k_Token: 385 Tok = o.Tok; 386 break; 387 case k_CCOut: 388 case k_Register: 389 Reg = o.Reg; 390 break; 391 case k_RegisterList: 392 case k_DPRRegisterList: 393 case k_SPRRegisterList: 394 Registers = o.Registers; 395 break; 396 case k_CoprocNum: 397 case k_CoprocReg: 398 Cop = o.Cop; 399 break; 400 case k_CoprocOption: 401 CoprocOption = o.CoprocOption; 402 break; 403 case k_Immediate: 404 Imm = o.Imm; 405 break; 406 case k_FPImmediate: 407 FPImm = o.FPImm; 408 break; 409 case k_MemBarrierOpt: 410 MBOpt = o.MBOpt; 411 break; 412 case k_Memory: 413 Memory = o.Memory; 414 break; 415 case k_PostIndexRegister: 416 PostIdxReg = o.PostIdxReg; 417 break; 418 case k_MSRMask: 419 MMask = o.MMask; 420 break; 421 case k_ProcIFlags: 422 IFlags = o.IFlags; 423 break; 424 case k_ShifterImmediate: 425 ShifterImm = o.ShifterImm; 426 break; 427 case k_ShiftedRegister: 428 RegShiftedReg = o.RegShiftedReg; 429 break; 430 case k_ShiftedImmediate: 431 RegShiftedImm = o.RegShiftedImm; 432 break; 433 case k_RotateImmediate: 434 RotImm = o.RotImm; 435 break; 436 case k_BitfieldDescriptor: 437 Bitfield = o.Bitfield; 438 break; 439 case k_VectorIndex: 440 VectorIndex = o.VectorIndex; 441 break; 442 } 443 } 444 445 /// getStartLoc - Get the location of the first token of this operand. 446 SMLoc getStartLoc() const { return StartLoc; } 447 /// getEndLoc - Get the location of the last token of this operand. 448 SMLoc getEndLoc() const { return EndLoc; } 449 450 ARMCC::CondCodes getCondCode() const { 451 assert(Kind == k_CondCode && "Invalid access!"); 452 return CC.Val; 453 } 454 455 unsigned getCoproc() const { 456 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 457 return Cop.Val; 458 } 459 460 StringRef getToken() const { 461 assert(Kind == k_Token && "Invalid access!"); 462 return StringRef(Tok.Data, Tok.Length); 463 } 464 465 unsigned getReg() const { 466 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 467 return Reg.RegNum; 468 } 469 470 const SmallVectorImpl<unsigned> &getRegList() const { 471 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 472 Kind == k_SPRRegisterList) && "Invalid access!"); 473 return Registers; 474 } 475 476 const MCExpr *getImm() const { 477 assert(Kind == k_Immediate && "Invalid access!"); 478 return Imm.Val; 479 } 480 481 unsigned getFPImm() const { 482 assert(Kind == k_FPImmediate && "Invalid access!"); 483 return FPImm.Val; 484 } 485 486 unsigned getVectorIndex() const { 487 assert(Kind == k_VectorIndex && "Invalid access!"); 488 return VectorIndex.Val; 489 } 490 491 ARM_MB::MemBOpt getMemBarrierOpt() const { 492 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 493 return MBOpt.Val; 494 } 495 496 ARM_PROC::IFlags getProcIFlags() const { 497 assert(Kind == k_ProcIFlags && "Invalid access!"); 498 return IFlags.Val; 499 } 500 501 unsigned getMSRMask() const { 502 assert(Kind == k_MSRMask && "Invalid access!"); 503 return MMask.Val; 504 } 505 506 bool isCoprocNum() const { return Kind == k_CoprocNum; } 507 bool isCoprocReg() const { return Kind == k_CoprocReg; } 508 bool isCoprocOption() const { return Kind == k_CoprocOption; } 509 bool isCondCode() const { return Kind == k_CondCode; } 510 bool isCCOut() const { return Kind == k_CCOut; } 511 bool isITMask() const { return Kind == k_ITCondMask; } 512 bool isITCondCode() const { return Kind == k_CondCode; } 513 bool isImm() const { return Kind == k_Immediate; } 514 bool isFPImm() const { return Kind == k_FPImmediate; } 515 bool isImm8s4() const { 516 if (Kind != k_Immediate) 517 return false; 518 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 519 if (!CE) return false; 520 int64_t Value = CE->getValue(); 521 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 522 } 523 bool isImm0_1020s4() const { 524 if (Kind != k_Immediate) 525 return false; 526 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 527 if (!CE) return false; 528 int64_t Value = CE->getValue(); 529 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 530 } 531 bool isImm0_508s4() const { 532 if (Kind != k_Immediate) 533 return false; 534 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 535 if (!CE) return false; 536 int64_t Value = CE->getValue(); 537 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 538 } 539 bool isImm0_255() const { 540 if (Kind != k_Immediate) 541 return false; 542 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 543 if (!CE) return false; 544 int64_t Value = CE->getValue(); 545 return Value >= 0 && Value < 256; 546 } 547 bool isImm0_7() const { 548 if (Kind != k_Immediate) 549 return false; 550 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 551 if (!CE) return false; 552 int64_t Value = CE->getValue(); 553 return Value >= 0 && Value < 8; 554 } 555 bool isImm0_15() const { 556 if (Kind != k_Immediate) 557 return false; 558 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 559 if (!CE) return false; 560 int64_t Value = CE->getValue(); 561 return Value >= 0 && Value < 16; 562 } 563 bool isImm0_31() const { 564 if (Kind != k_Immediate) 565 return false; 566 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 567 if (!CE) return false; 568 int64_t Value = CE->getValue(); 569 return Value >= 0 && Value < 32; 570 } 571 bool isImm1_16() const { 572 if (Kind != k_Immediate) 573 return false; 574 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 575 if (!CE) return false; 576 int64_t Value = CE->getValue(); 577 return Value > 0 && Value < 17; 578 } 579 bool isImm1_32() const { 580 if (Kind != k_Immediate) 581 return false; 582 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 583 if (!CE) return false; 584 int64_t Value = CE->getValue(); 585 return Value > 0 && Value < 33; 586 } 587 bool isImm0_65535() const { 588 if (Kind != k_Immediate) 589 return false; 590 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 591 if (!CE) return false; 592 int64_t Value = CE->getValue(); 593 return Value >= 0 && Value < 65536; 594 } 595 bool isImm0_65535Expr() const { 596 if (Kind != k_Immediate) 597 return false; 598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 599 // If it's not a constant expression, it'll generate a fixup and be 600 // handled later. 601 if (!CE) return true; 602 int64_t Value = CE->getValue(); 603 return Value >= 0 && Value < 65536; 604 } 605 bool isImm24bit() const { 606 if (Kind != k_Immediate) 607 return false; 608 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 609 if (!CE) return false; 610 int64_t Value = CE->getValue(); 611 return Value >= 0 && Value <= 0xffffff; 612 } 613 bool isImmThumbSR() const { 614 if (Kind != k_Immediate) 615 return false; 616 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 617 if (!CE) return false; 618 int64_t Value = CE->getValue(); 619 return Value > 0 && Value < 33; 620 } 621 bool isPKHLSLImm() const { 622 if (Kind != k_Immediate) 623 return false; 624 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 625 if (!CE) return false; 626 int64_t Value = CE->getValue(); 627 return Value >= 0 && Value < 32; 628 } 629 bool isPKHASRImm() const { 630 if (Kind != k_Immediate) 631 return false; 632 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 633 if (!CE) return false; 634 int64_t Value = CE->getValue(); 635 return Value > 0 && Value <= 32; 636 } 637 bool isARMSOImm() const { 638 if (Kind != k_Immediate) 639 return false; 640 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 641 if (!CE) return false; 642 int64_t Value = CE->getValue(); 643 return ARM_AM::getSOImmVal(Value) != -1; 644 } 645 bool isT2SOImm() const { 646 if (Kind != k_Immediate) 647 return false; 648 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 649 if (!CE) return false; 650 int64_t Value = CE->getValue(); 651 return ARM_AM::getT2SOImmVal(Value) != -1; 652 } 653 bool isSetEndImm() const { 654 if (Kind != k_Immediate) 655 return false; 656 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 657 if (!CE) return false; 658 int64_t Value = CE->getValue(); 659 return Value == 1 || Value == 0; 660 } 661 bool isReg() const { return Kind == k_Register; } 662 bool isRegList() const { return Kind == k_RegisterList; } 663 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 664 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 665 bool isToken() const { return Kind == k_Token; } 666 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 667 bool isMemory() const { return Kind == k_Memory; } 668 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 669 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 670 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 671 bool isRotImm() const { return Kind == k_RotateImmediate; } 672 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 673 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 674 bool isPostIdxReg() const { 675 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 676 } 677 bool isMemNoOffset(bool alignOK = false) const { 678 if (!isMemory()) 679 return false; 680 // No offset of any kind. 681 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 682 (alignOK || Memory.Alignment == 0); 683 } 684 bool isAlignedMemory() const { 685 return isMemNoOffset(true); 686 } 687 bool isAddrMode2() const { 688 if (!isMemory() || Memory.Alignment != 0) return false; 689 // Check for register offset. 690 if (Memory.OffsetRegNum) return true; 691 // Immediate offset in range [-4095, 4095]. 692 if (!Memory.OffsetImm) return true; 693 int64_t Val = Memory.OffsetImm->getValue(); 694 return Val > -4096 && Val < 4096; 695 } 696 bool isAM2OffsetImm() const { 697 if (Kind != k_Immediate) 698 return false; 699 // Immediate offset in range [-4095, 4095]. 700 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 701 if (!CE) return false; 702 int64_t Val = CE->getValue(); 703 return Val > -4096 && Val < 4096; 704 } 705 bool isAddrMode3() const { 706 if (!isMemory() || Memory.Alignment != 0) return false; 707 // No shifts are legal for AM3. 708 if (Memory.ShiftType != ARM_AM::no_shift) return false; 709 // Check for register offset. 710 if (Memory.OffsetRegNum) return true; 711 // Immediate offset in range [-255, 255]. 712 if (!Memory.OffsetImm) return true; 713 int64_t Val = Memory.OffsetImm->getValue(); 714 return Val > -256 && Val < 256; 715 } 716 bool isAM3Offset() const { 717 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 718 return false; 719 if (Kind == k_PostIndexRegister) 720 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 721 // Immediate offset in range [-255, 255]. 722 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 723 if (!CE) return false; 724 int64_t Val = CE->getValue(); 725 // Special case, #-0 is INT32_MIN. 726 return (Val > -256 && Val < 256) || Val == INT32_MIN; 727 } 728 bool isAddrMode5() const { 729 if (!isMemory() || Memory.Alignment != 0) return false; 730 // Check for register offset. 731 if (Memory.OffsetRegNum) return false; 732 // Immediate offset in range [-1020, 1020] and a multiple of 4. 733 if (!Memory.OffsetImm) return true; 734 int64_t Val = Memory.OffsetImm->getValue(); 735 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 736 Val == INT32_MIN; 737 } 738 bool isMemTBB() const { 739 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 740 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 741 return false; 742 return true; 743 } 744 bool isMemTBH() const { 745 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 746 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 747 Memory.Alignment != 0 ) 748 return false; 749 return true; 750 } 751 bool isMemRegOffset() const { 752 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 753 return false; 754 return true; 755 } 756 bool isT2MemRegOffset() const { 757 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 758 Memory.Alignment != 0) 759 return false; 760 // Only lsl #{0, 1, 2, 3} allowed. 761 if (Memory.ShiftType == ARM_AM::no_shift) 762 return true; 763 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 764 return false; 765 return true; 766 } 767 bool isMemThumbRR() const { 768 // Thumb reg+reg addressing is simple. Just two registers, a base and 769 // an offset. No shifts, negations or any other complicating factors. 770 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 771 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 772 return false; 773 return isARMLowRegister(Memory.BaseRegNum) && 774 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 775 } 776 bool isMemThumbRIs4() const { 777 if (!isMemory() || Memory.OffsetRegNum != 0 || 778 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 779 return false; 780 // Immediate offset, multiple of 4 in range [0, 124]. 781 if (!Memory.OffsetImm) return true; 782 int64_t Val = Memory.OffsetImm->getValue(); 783 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 784 } 785 bool isMemThumbRIs2() const { 786 if (!isMemory() || Memory.OffsetRegNum != 0 || 787 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 788 return false; 789 // Immediate offset, multiple of 4 in range [0, 62]. 790 if (!Memory.OffsetImm) return true; 791 int64_t Val = Memory.OffsetImm->getValue(); 792 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 793 } 794 bool isMemThumbRIs1() const { 795 if (!isMemory() || Memory.OffsetRegNum != 0 || 796 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 797 return false; 798 // Immediate offset in range [0, 31]. 799 if (!Memory.OffsetImm) return true; 800 int64_t Val = Memory.OffsetImm->getValue(); 801 return Val >= 0 && Val <= 31; 802 } 803 bool isMemThumbSPI() const { 804 if (!isMemory() || Memory.OffsetRegNum != 0 || 805 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 806 return false; 807 // Immediate offset, multiple of 4 in range [0, 1020]. 808 if (!Memory.OffsetImm) return true; 809 int64_t Val = Memory.OffsetImm->getValue(); 810 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 811 } 812 bool isMemImm8s4Offset() const { 813 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 814 return false; 815 // Immediate offset a multiple of 4 in range [-1020, 1020]. 816 if (!Memory.OffsetImm) return true; 817 int64_t Val = Memory.OffsetImm->getValue(); 818 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 819 } 820 bool isMemImm0_1020s4Offset() const { 821 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 822 return false; 823 // Immediate offset a multiple of 4 in range [0, 1020]. 824 if (!Memory.OffsetImm) return true; 825 int64_t Val = Memory.OffsetImm->getValue(); 826 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 827 } 828 bool isMemImm8Offset() const { 829 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 830 return false; 831 // Immediate offset in range [-255, 255]. 832 if (!Memory.OffsetImm) return true; 833 int64_t Val = Memory.OffsetImm->getValue(); 834 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 835 } 836 bool isMemPosImm8Offset() const { 837 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 838 return false; 839 // Immediate offset in range [0, 255]. 840 if (!Memory.OffsetImm) return true; 841 int64_t Val = Memory.OffsetImm->getValue(); 842 return Val >= 0 && Val < 256; 843 } 844 bool isMemNegImm8Offset() const { 845 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 846 return false; 847 // Immediate offset in range [-255, -1]. 848 if (!Memory.OffsetImm) return true; 849 int64_t Val = Memory.OffsetImm->getValue(); 850 return Val > -256 && Val < 0; 851 } 852 bool isMemUImm12Offset() const { 853 // If we have an immediate that's not a constant, treat it as a label 854 // reference needing a fixup. If it is a constant, it's something else 855 // and we reject it. 856 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 857 return true; 858 859 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 860 return false; 861 // Immediate offset in range [0, 4095]. 862 if (!Memory.OffsetImm) return true; 863 int64_t Val = Memory.OffsetImm->getValue(); 864 return (Val >= 0 && Val < 4096); 865 } 866 bool isMemImm12Offset() const { 867 // If we have an immediate that's not a constant, treat it as a label 868 // reference needing a fixup. If it is a constant, it's something else 869 // and we reject it. 870 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 871 return true; 872 873 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 874 return false; 875 // Immediate offset in range [-4095, 4095]. 876 if (!Memory.OffsetImm) return true; 877 int64_t Val = Memory.OffsetImm->getValue(); 878 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 879 } 880 bool isPostIdxImm8() const { 881 if (Kind != k_Immediate) 882 return false; 883 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 884 if (!CE) return false; 885 int64_t Val = CE->getValue(); 886 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 887 } 888 bool isPostIdxImm8s4() const { 889 if (Kind != k_Immediate) 890 return false; 891 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 892 if (!CE) return false; 893 int64_t Val = CE->getValue(); 894 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 895 (Val == INT32_MIN); 896 } 897 898 bool isMSRMask() const { return Kind == k_MSRMask; } 899 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 900 901 bool isVectorIndex8() const { 902 if (Kind != k_VectorIndex) return false; 903 return VectorIndex.Val < 8; 904 } 905 bool isVectorIndex16() const { 906 if (Kind != k_VectorIndex) return false; 907 return VectorIndex.Val < 4; 908 } 909 bool isVectorIndex32() const { 910 if (Kind != k_VectorIndex) return false; 911 return VectorIndex.Val < 2; 912 } 913 914 915 916 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 917 // Add as immediates when possible. Null MCExpr = 0. 918 if (Expr == 0) 919 Inst.addOperand(MCOperand::CreateImm(0)); 920 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 921 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 922 else 923 Inst.addOperand(MCOperand::CreateExpr(Expr)); 924 } 925 926 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 927 assert(N == 2 && "Invalid number of operands!"); 928 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 929 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 930 Inst.addOperand(MCOperand::CreateReg(RegNum)); 931 } 932 933 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 934 assert(N == 1 && "Invalid number of operands!"); 935 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 936 } 937 938 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 939 assert(N == 1 && "Invalid number of operands!"); 940 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 941 } 942 943 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 944 assert(N == 1 && "Invalid number of operands!"); 945 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 946 } 947 948 void addITMaskOperands(MCInst &Inst, unsigned N) const { 949 assert(N == 1 && "Invalid number of operands!"); 950 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 951 } 952 953 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 954 assert(N == 1 && "Invalid number of operands!"); 955 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 956 } 957 958 void addCCOutOperands(MCInst &Inst, unsigned N) const { 959 assert(N == 1 && "Invalid number of operands!"); 960 Inst.addOperand(MCOperand::CreateReg(getReg())); 961 } 962 963 void addRegOperands(MCInst &Inst, unsigned N) const { 964 assert(N == 1 && "Invalid number of operands!"); 965 Inst.addOperand(MCOperand::CreateReg(getReg())); 966 } 967 968 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 969 assert(N == 3 && "Invalid number of operands!"); 970 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 971 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 972 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 973 Inst.addOperand(MCOperand::CreateImm( 974 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 975 } 976 977 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 978 assert(N == 2 && "Invalid number of operands!"); 979 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 980 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 981 Inst.addOperand(MCOperand::CreateImm( 982 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 983 } 984 985 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 986 assert(N == 1 && "Invalid number of operands!"); 987 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 988 ShifterImm.Imm)); 989 } 990 991 void addRegListOperands(MCInst &Inst, unsigned N) const { 992 assert(N == 1 && "Invalid number of operands!"); 993 const SmallVectorImpl<unsigned> &RegList = getRegList(); 994 for (SmallVectorImpl<unsigned>::const_iterator 995 I = RegList.begin(), E = RegList.end(); I != E; ++I) 996 Inst.addOperand(MCOperand::CreateReg(*I)); 997 } 998 999 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1000 addRegListOperands(Inst, N); 1001 } 1002 1003 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1004 addRegListOperands(Inst, N); 1005 } 1006 1007 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1008 assert(N == 1 && "Invalid number of operands!"); 1009 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1010 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1011 } 1012 1013 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1014 assert(N == 1 && "Invalid number of operands!"); 1015 // Munge the lsb/width into a bitfield mask. 1016 unsigned lsb = Bitfield.LSB; 1017 unsigned width = Bitfield.Width; 1018 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1019 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1020 (32 - (lsb + width))); 1021 Inst.addOperand(MCOperand::CreateImm(Mask)); 1022 } 1023 1024 void addImmOperands(MCInst &Inst, unsigned N) const { 1025 assert(N == 1 && "Invalid number of operands!"); 1026 addExpr(Inst, getImm()); 1027 } 1028 1029 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1030 assert(N == 1 && "Invalid number of operands!"); 1031 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1032 } 1033 1034 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1035 assert(N == 1 && "Invalid number of operands!"); 1036 // FIXME: We really want to scale the value here, but the LDRD/STRD 1037 // instruction don't encode operands that way yet. 1038 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1039 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1040 } 1041 1042 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1043 assert(N == 1 && "Invalid number of operands!"); 1044 // The immediate is scaled by four in the encoding and is stored 1045 // in the MCInst as such. Lop off the low two bits here. 1046 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1047 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1048 } 1049 1050 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1051 assert(N == 1 && "Invalid number of operands!"); 1052 // The immediate is scaled by four in the encoding and is stored 1053 // in the MCInst as such. Lop off the low two bits here. 1054 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1055 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1056 } 1057 1058 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1059 assert(N == 1 && "Invalid number of operands!"); 1060 addExpr(Inst, getImm()); 1061 } 1062 1063 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1064 assert(N == 1 && "Invalid number of operands!"); 1065 addExpr(Inst, getImm()); 1066 } 1067 1068 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1069 assert(N == 1 && "Invalid number of operands!"); 1070 addExpr(Inst, getImm()); 1071 } 1072 1073 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1074 assert(N == 1 && "Invalid number of operands!"); 1075 addExpr(Inst, getImm()); 1076 } 1077 1078 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1079 assert(N == 1 && "Invalid number of operands!"); 1080 // The constant encodes as the immediate-1, and we store in the instruction 1081 // the bits as encoded, so subtract off one here. 1082 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1083 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1084 } 1085 1086 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1087 assert(N == 1 && "Invalid number of operands!"); 1088 // The constant encodes as the immediate-1, and we store in the instruction 1089 // the bits as encoded, so subtract off one here. 1090 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1091 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1092 } 1093 1094 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1095 assert(N == 1 && "Invalid number of operands!"); 1096 addExpr(Inst, getImm()); 1097 } 1098 1099 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1100 assert(N == 1 && "Invalid number of operands!"); 1101 addExpr(Inst, getImm()); 1102 } 1103 1104 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1105 assert(N == 1 && "Invalid number of operands!"); 1106 addExpr(Inst, getImm()); 1107 } 1108 1109 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1110 assert(N == 1 && "Invalid number of operands!"); 1111 // The constant encodes as the immediate, except for 32, which encodes as 1112 // zero. 1113 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1114 unsigned Imm = CE->getValue(); 1115 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1116 } 1117 1118 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1119 assert(N == 1 && "Invalid number of operands!"); 1120 addExpr(Inst, getImm()); 1121 } 1122 1123 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1124 assert(N == 1 && "Invalid number of operands!"); 1125 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1126 // the instruction as well. 1127 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1128 int Val = CE->getValue(); 1129 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1130 } 1131 1132 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1133 assert(N == 1 && "Invalid number of operands!"); 1134 addExpr(Inst, getImm()); 1135 } 1136 1137 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1138 assert(N == 1 && "Invalid number of operands!"); 1139 addExpr(Inst, getImm()); 1140 } 1141 1142 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1143 assert(N == 1 && "Invalid number of operands!"); 1144 addExpr(Inst, getImm()); 1145 } 1146 1147 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1148 assert(N == 1 && "Invalid number of operands!"); 1149 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1150 } 1151 1152 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1153 assert(N == 1 && "Invalid number of operands!"); 1154 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1155 } 1156 1157 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1158 assert(N == 2 && "Invalid number of operands!"); 1159 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1160 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1161 } 1162 1163 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1164 assert(N == 3 && "Invalid number of operands!"); 1165 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1166 if (!Memory.OffsetRegNum) { 1167 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1168 // Special case for #-0 1169 if (Val == INT32_MIN) Val = 0; 1170 if (Val < 0) Val = -Val; 1171 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1172 } else { 1173 // For register offset, we encode the shift type and negation flag 1174 // here. 1175 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1176 Memory.ShiftImm, Memory.ShiftType); 1177 } 1178 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1179 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1180 Inst.addOperand(MCOperand::CreateImm(Val)); 1181 } 1182 1183 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1184 assert(N == 2 && "Invalid number of operands!"); 1185 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1186 assert(CE && "non-constant AM2OffsetImm operand!"); 1187 int32_t Val = CE->getValue(); 1188 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1189 // Special case for #-0 1190 if (Val == INT32_MIN) Val = 0; 1191 if (Val < 0) Val = -Val; 1192 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1193 Inst.addOperand(MCOperand::CreateReg(0)); 1194 Inst.addOperand(MCOperand::CreateImm(Val)); 1195 } 1196 1197 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1198 assert(N == 3 && "Invalid number of operands!"); 1199 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1200 if (!Memory.OffsetRegNum) { 1201 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1202 // Special case for #-0 1203 if (Val == INT32_MIN) Val = 0; 1204 if (Val < 0) Val = -Val; 1205 Val = ARM_AM::getAM3Opc(AddSub, Val); 1206 } else { 1207 // For register offset, we encode the shift type and negation flag 1208 // here. 1209 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1210 } 1211 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1212 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1213 Inst.addOperand(MCOperand::CreateImm(Val)); 1214 } 1215 1216 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1217 assert(N == 2 && "Invalid number of operands!"); 1218 if (Kind == k_PostIndexRegister) { 1219 int32_t Val = 1220 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1221 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1222 Inst.addOperand(MCOperand::CreateImm(Val)); 1223 return; 1224 } 1225 1226 // Constant offset. 1227 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1228 int32_t Val = CE->getValue(); 1229 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1230 // Special case for #-0 1231 if (Val == INT32_MIN) Val = 0; 1232 if (Val < 0) Val = -Val; 1233 Val = ARM_AM::getAM3Opc(AddSub, Val); 1234 Inst.addOperand(MCOperand::CreateReg(0)); 1235 Inst.addOperand(MCOperand::CreateImm(Val)); 1236 } 1237 1238 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1239 assert(N == 2 && "Invalid number of operands!"); 1240 // The lower two bits are always zero and as such are not encoded. 1241 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1242 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1243 // Special case for #-0 1244 if (Val == INT32_MIN) Val = 0; 1245 if (Val < 0) Val = -Val; 1246 Val = ARM_AM::getAM5Opc(AddSub, Val); 1247 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1248 Inst.addOperand(MCOperand::CreateImm(Val)); 1249 } 1250 1251 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1252 assert(N == 2 && "Invalid number of operands!"); 1253 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1254 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1255 Inst.addOperand(MCOperand::CreateImm(Val)); 1256 } 1257 1258 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1259 assert(N == 2 && "Invalid number of operands!"); 1260 // The lower two bits are always zero and as such are not encoded. 1261 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1262 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1263 Inst.addOperand(MCOperand::CreateImm(Val)); 1264 } 1265 1266 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1267 assert(N == 2 && "Invalid number of operands!"); 1268 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1269 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1270 Inst.addOperand(MCOperand::CreateImm(Val)); 1271 } 1272 1273 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1274 addMemImm8OffsetOperands(Inst, N); 1275 } 1276 1277 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1278 addMemImm8OffsetOperands(Inst, N); 1279 } 1280 1281 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1282 assert(N == 2 && "Invalid number of operands!"); 1283 // If this is an immediate, it's a label reference. 1284 if (Kind == k_Immediate) { 1285 addExpr(Inst, getImm()); 1286 Inst.addOperand(MCOperand::CreateImm(0)); 1287 return; 1288 } 1289 1290 // Otherwise, it's a normal memory reg+offset. 1291 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1292 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1293 Inst.addOperand(MCOperand::CreateImm(Val)); 1294 } 1295 1296 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1297 assert(N == 2 && "Invalid number of operands!"); 1298 // If this is an immediate, it's a label reference. 1299 if (Kind == k_Immediate) { 1300 addExpr(Inst, getImm()); 1301 Inst.addOperand(MCOperand::CreateImm(0)); 1302 return; 1303 } 1304 1305 // Otherwise, it's a normal memory reg+offset. 1306 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1307 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1308 Inst.addOperand(MCOperand::CreateImm(Val)); 1309 } 1310 1311 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1312 assert(N == 2 && "Invalid number of operands!"); 1313 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1314 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1315 } 1316 1317 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1318 assert(N == 2 && "Invalid number of operands!"); 1319 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1320 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1321 } 1322 1323 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1324 assert(N == 3 && "Invalid number of operands!"); 1325 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1326 Memory.ShiftImm, Memory.ShiftType); 1327 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1328 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1329 Inst.addOperand(MCOperand::CreateImm(Val)); 1330 } 1331 1332 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1333 assert(N == 3 && "Invalid number of operands!"); 1334 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1335 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1336 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1337 } 1338 1339 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1340 assert(N == 2 && "Invalid number of operands!"); 1341 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1342 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1343 } 1344 1345 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1346 assert(N == 2 && "Invalid number of operands!"); 1347 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1348 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1349 Inst.addOperand(MCOperand::CreateImm(Val)); 1350 } 1351 1352 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1353 assert(N == 2 && "Invalid number of operands!"); 1354 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1355 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1356 Inst.addOperand(MCOperand::CreateImm(Val)); 1357 } 1358 1359 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1360 assert(N == 2 && "Invalid number of operands!"); 1361 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1362 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1363 Inst.addOperand(MCOperand::CreateImm(Val)); 1364 } 1365 1366 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1367 assert(N == 2 && "Invalid number of operands!"); 1368 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1369 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1370 Inst.addOperand(MCOperand::CreateImm(Val)); 1371 } 1372 1373 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1374 assert(N == 1 && "Invalid number of operands!"); 1375 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1376 assert(CE && "non-constant post-idx-imm8 operand!"); 1377 int Imm = CE->getValue(); 1378 bool isAdd = Imm >= 0; 1379 if (Imm == INT32_MIN) Imm = 0; 1380 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1381 Inst.addOperand(MCOperand::CreateImm(Imm)); 1382 } 1383 1384 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1385 assert(N == 1 && "Invalid number of operands!"); 1386 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1387 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1388 int Imm = CE->getValue(); 1389 bool isAdd = Imm >= 0; 1390 if (Imm == INT32_MIN) Imm = 0; 1391 // Immediate is scaled by 4. 1392 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1393 Inst.addOperand(MCOperand::CreateImm(Imm)); 1394 } 1395 1396 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1397 assert(N == 2 && "Invalid number of operands!"); 1398 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1399 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1400 } 1401 1402 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1403 assert(N == 2 && "Invalid number of operands!"); 1404 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1405 // The sign, shift type, and shift amount are encoded in a single operand 1406 // using the AM2 encoding helpers. 1407 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1408 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1409 PostIdxReg.ShiftTy); 1410 Inst.addOperand(MCOperand::CreateImm(Imm)); 1411 } 1412 1413 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1414 assert(N == 1 && "Invalid number of operands!"); 1415 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1416 } 1417 1418 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1419 assert(N == 1 && "Invalid number of operands!"); 1420 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1421 } 1422 1423 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1424 assert(N == 1 && "Invalid number of operands!"); 1425 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1426 } 1427 1428 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1429 assert(N == 1 && "Invalid number of operands!"); 1430 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1431 } 1432 1433 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1434 assert(N == 1 && "Invalid number of operands!"); 1435 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1436 } 1437 1438 virtual void print(raw_ostream &OS) const; 1439 1440 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1441 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1442 Op->ITMask.Mask = Mask; 1443 Op->StartLoc = S; 1444 Op->EndLoc = S; 1445 return Op; 1446 } 1447 1448 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1449 ARMOperand *Op = new ARMOperand(k_CondCode); 1450 Op->CC.Val = CC; 1451 Op->StartLoc = S; 1452 Op->EndLoc = S; 1453 return Op; 1454 } 1455 1456 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1457 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1458 Op->Cop.Val = CopVal; 1459 Op->StartLoc = S; 1460 Op->EndLoc = S; 1461 return Op; 1462 } 1463 1464 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1465 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1466 Op->Cop.Val = CopVal; 1467 Op->StartLoc = S; 1468 Op->EndLoc = S; 1469 return Op; 1470 } 1471 1472 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1473 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1474 Op->Cop.Val = Val; 1475 Op->StartLoc = S; 1476 Op->EndLoc = E; 1477 return Op; 1478 } 1479 1480 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1481 ARMOperand *Op = new ARMOperand(k_CCOut); 1482 Op->Reg.RegNum = RegNum; 1483 Op->StartLoc = S; 1484 Op->EndLoc = S; 1485 return Op; 1486 } 1487 1488 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1489 ARMOperand *Op = new ARMOperand(k_Token); 1490 Op->Tok.Data = Str.data(); 1491 Op->Tok.Length = Str.size(); 1492 Op->StartLoc = S; 1493 Op->EndLoc = S; 1494 return Op; 1495 } 1496 1497 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1498 ARMOperand *Op = new ARMOperand(k_Register); 1499 Op->Reg.RegNum = RegNum; 1500 Op->StartLoc = S; 1501 Op->EndLoc = E; 1502 return Op; 1503 } 1504 1505 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1506 unsigned SrcReg, 1507 unsigned ShiftReg, 1508 unsigned ShiftImm, 1509 SMLoc S, SMLoc E) { 1510 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1511 Op->RegShiftedReg.ShiftTy = ShTy; 1512 Op->RegShiftedReg.SrcReg = SrcReg; 1513 Op->RegShiftedReg.ShiftReg = ShiftReg; 1514 Op->RegShiftedReg.ShiftImm = ShiftImm; 1515 Op->StartLoc = S; 1516 Op->EndLoc = E; 1517 return Op; 1518 } 1519 1520 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1521 unsigned SrcReg, 1522 unsigned ShiftImm, 1523 SMLoc S, SMLoc E) { 1524 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1525 Op->RegShiftedImm.ShiftTy = ShTy; 1526 Op->RegShiftedImm.SrcReg = SrcReg; 1527 Op->RegShiftedImm.ShiftImm = ShiftImm; 1528 Op->StartLoc = S; 1529 Op->EndLoc = E; 1530 return Op; 1531 } 1532 1533 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1534 SMLoc S, SMLoc E) { 1535 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1536 Op->ShifterImm.isASR = isASR; 1537 Op->ShifterImm.Imm = Imm; 1538 Op->StartLoc = S; 1539 Op->EndLoc = E; 1540 return Op; 1541 } 1542 1543 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1544 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1545 Op->RotImm.Imm = Imm; 1546 Op->StartLoc = S; 1547 Op->EndLoc = E; 1548 return Op; 1549 } 1550 1551 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1552 SMLoc S, SMLoc E) { 1553 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1554 Op->Bitfield.LSB = LSB; 1555 Op->Bitfield.Width = Width; 1556 Op->StartLoc = S; 1557 Op->EndLoc = E; 1558 return Op; 1559 } 1560 1561 static ARMOperand * 1562 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1563 SMLoc StartLoc, SMLoc EndLoc) { 1564 KindTy Kind = k_RegisterList; 1565 1566 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1567 Kind = k_DPRRegisterList; 1568 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1569 contains(Regs.front().first)) 1570 Kind = k_SPRRegisterList; 1571 1572 ARMOperand *Op = new ARMOperand(Kind); 1573 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1574 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1575 Op->Registers.push_back(I->first); 1576 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1577 Op->StartLoc = StartLoc; 1578 Op->EndLoc = EndLoc; 1579 return Op; 1580 } 1581 1582 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1583 MCContext &Ctx) { 1584 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1585 Op->VectorIndex.Val = Idx; 1586 Op->StartLoc = S; 1587 Op->EndLoc = E; 1588 return Op; 1589 } 1590 1591 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1592 ARMOperand *Op = new ARMOperand(k_Immediate); 1593 Op->Imm.Val = Val; 1594 Op->StartLoc = S; 1595 Op->EndLoc = E; 1596 return Op; 1597 } 1598 1599 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1600 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1601 Op->FPImm.Val = Val; 1602 Op->StartLoc = S; 1603 Op->EndLoc = S; 1604 return Op; 1605 } 1606 1607 static ARMOperand *CreateMem(unsigned BaseRegNum, 1608 const MCConstantExpr *OffsetImm, 1609 unsigned OffsetRegNum, 1610 ARM_AM::ShiftOpc ShiftType, 1611 unsigned ShiftImm, 1612 unsigned Alignment, 1613 bool isNegative, 1614 SMLoc S, SMLoc E) { 1615 ARMOperand *Op = new ARMOperand(k_Memory); 1616 Op->Memory.BaseRegNum = BaseRegNum; 1617 Op->Memory.OffsetImm = OffsetImm; 1618 Op->Memory.OffsetRegNum = OffsetRegNum; 1619 Op->Memory.ShiftType = ShiftType; 1620 Op->Memory.ShiftImm = ShiftImm; 1621 Op->Memory.Alignment = Alignment; 1622 Op->Memory.isNegative = isNegative; 1623 Op->StartLoc = S; 1624 Op->EndLoc = E; 1625 return Op; 1626 } 1627 1628 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1629 ARM_AM::ShiftOpc ShiftTy, 1630 unsigned ShiftImm, 1631 SMLoc S, SMLoc E) { 1632 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1633 Op->PostIdxReg.RegNum = RegNum; 1634 Op->PostIdxReg.isAdd = isAdd; 1635 Op->PostIdxReg.ShiftTy = ShiftTy; 1636 Op->PostIdxReg.ShiftImm = ShiftImm; 1637 Op->StartLoc = S; 1638 Op->EndLoc = E; 1639 return Op; 1640 } 1641 1642 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1643 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1644 Op->MBOpt.Val = Opt; 1645 Op->StartLoc = S; 1646 Op->EndLoc = S; 1647 return Op; 1648 } 1649 1650 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1651 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1652 Op->IFlags.Val = IFlags; 1653 Op->StartLoc = S; 1654 Op->EndLoc = S; 1655 return Op; 1656 } 1657 1658 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1659 ARMOperand *Op = new ARMOperand(k_MSRMask); 1660 Op->MMask.Val = MMask; 1661 Op->StartLoc = S; 1662 Op->EndLoc = S; 1663 return Op; 1664 } 1665}; 1666 1667} // end anonymous namespace. 1668 1669void ARMOperand::print(raw_ostream &OS) const { 1670 switch (Kind) { 1671 case k_FPImmediate: 1672 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1673 << ") >"; 1674 break; 1675 case k_CondCode: 1676 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1677 break; 1678 case k_CCOut: 1679 OS << "<ccout " << getReg() << ">"; 1680 break; 1681 case k_ITCondMask: { 1682 static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)", 1683 "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)", 1684 "(tee)", "(eee)" }; 1685 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1686 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1687 break; 1688 } 1689 case k_CoprocNum: 1690 OS << "<coprocessor number: " << getCoproc() << ">"; 1691 break; 1692 case k_CoprocReg: 1693 OS << "<coprocessor register: " << getCoproc() << ">"; 1694 break; 1695 case k_CoprocOption: 1696 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1697 break; 1698 case k_MSRMask: 1699 OS << "<mask: " << getMSRMask() << ">"; 1700 break; 1701 case k_Immediate: 1702 getImm()->print(OS); 1703 break; 1704 case k_MemBarrierOpt: 1705 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1706 break; 1707 case k_Memory: 1708 OS << "<memory " 1709 << " base:" << Memory.BaseRegNum; 1710 OS << ">"; 1711 break; 1712 case k_PostIndexRegister: 1713 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1714 << PostIdxReg.RegNum; 1715 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1716 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1717 << PostIdxReg.ShiftImm; 1718 OS << ">"; 1719 break; 1720 case k_ProcIFlags: { 1721 OS << "<ARM_PROC::"; 1722 unsigned IFlags = getProcIFlags(); 1723 for (int i=2; i >= 0; --i) 1724 if (IFlags & (1 << i)) 1725 OS << ARM_PROC::IFlagsToString(1 << i); 1726 OS << ">"; 1727 break; 1728 } 1729 case k_Register: 1730 OS << "<register " << getReg() << ">"; 1731 break; 1732 case k_ShifterImmediate: 1733 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1734 << " #" << ShifterImm.Imm << ">"; 1735 break; 1736 case k_ShiftedRegister: 1737 OS << "<so_reg_reg " 1738 << RegShiftedReg.SrcReg 1739 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1740 << ", " << RegShiftedReg.ShiftReg << ", " 1741 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1742 << ">"; 1743 break; 1744 case k_ShiftedImmediate: 1745 OS << "<so_reg_imm " 1746 << RegShiftedImm.SrcReg 1747 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1748 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1749 << ">"; 1750 break; 1751 case k_RotateImmediate: 1752 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1753 break; 1754 case k_BitfieldDescriptor: 1755 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1756 << ", width: " << Bitfield.Width << ">"; 1757 break; 1758 case k_RegisterList: 1759 case k_DPRRegisterList: 1760 case k_SPRRegisterList: { 1761 OS << "<register_list "; 1762 1763 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1764 for (SmallVectorImpl<unsigned>::const_iterator 1765 I = RegList.begin(), E = RegList.end(); I != E; ) { 1766 OS << *I; 1767 if (++I < E) OS << ", "; 1768 } 1769 1770 OS << ">"; 1771 break; 1772 } 1773 case k_Token: 1774 OS << "'" << getToken() << "'"; 1775 break; 1776 case k_VectorIndex: 1777 OS << "<vectorindex " << getVectorIndex() << ">"; 1778 break; 1779 } 1780} 1781 1782/// @name Auto-generated Match Functions 1783/// { 1784 1785static unsigned MatchRegisterName(StringRef Name); 1786 1787/// } 1788 1789bool ARMAsmParser::ParseRegister(unsigned &RegNo, 1790 SMLoc &StartLoc, SMLoc &EndLoc) { 1791 RegNo = tryParseRegister(); 1792 1793 return (RegNo == (unsigned)-1); 1794} 1795 1796/// Try to parse a register name. The token must be an Identifier when called, 1797/// and if it is a register name the token is eaten and the register number is 1798/// returned. Otherwise return -1. 1799/// 1800int ARMAsmParser::tryParseRegister() { 1801 const AsmToken &Tok = Parser.getTok(); 1802 if (Tok.isNot(AsmToken::Identifier)) return -1; 1803 1804 // FIXME: Validate register for the current architecture; we have to do 1805 // validation later, so maybe there is no need for this here. 1806 std::string upperCase = Tok.getString().str(); 1807 std::string lowerCase = LowercaseString(upperCase); 1808 unsigned RegNum = MatchRegisterName(lowerCase); 1809 if (!RegNum) { 1810 RegNum = StringSwitch<unsigned>(lowerCase) 1811 .Case("r13", ARM::SP) 1812 .Case("r14", ARM::LR) 1813 .Case("r15", ARM::PC) 1814 .Case("ip", ARM::R12) 1815 .Default(0); 1816 } 1817 if (!RegNum) return -1; 1818 1819 Parser.Lex(); // Eat identifier token. 1820 1821#if 0 1822 // Also check for an index operand. This is only legal for vector registers, 1823 // but that'll get caught OK in operand matching, so we don't need to 1824 // explicitly filter everything else out here. 1825 if (Parser.getTok().is(AsmToken::LBrac)) { 1826 SMLoc SIdx = Parser.getTok().getLoc(); 1827 Parser.Lex(); // Eat left bracket token. 1828 1829 const MCExpr *ImmVal; 1830 SMLoc ExprLoc = Parser.getTok().getLoc(); 1831 if (getParser().ParseExpression(ImmVal)) 1832 return MatchOperand_ParseFail; 1833 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 1834 if (!MCE) { 1835 TokError("immediate value expected for vector index"); 1836 return MatchOperand_ParseFail; 1837 } 1838 1839 SMLoc E = Parser.getTok().getLoc(); 1840 if (Parser.getTok().isNot(AsmToken::RBrac)) { 1841 Error(E, "']' expected"); 1842 return MatchOperand_ParseFail; 1843 } 1844 1845 Parser.Lex(); // Eat right bracket token. 1846 1847 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 1848 SIdx, E, 1849 getContext())); 1850 } 1851#endif 1852 1853 return RegNum; 1854} 1855 1856// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 1857// If a recoverable error occurs, return 1. If an irrecoverable error 1858// occurs, return -1. An irrecoverable error is one where tokens have been 1859// consumed in the process of trying to parse the shifter (i.e., when it is 1860// indeed a shifter operand, but malformed). 1861int ARMAsmParser::tryParseShiftRegister( 1862 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1863 SMLoc S = Parser.getTok().getLoc(); 1864 const AsmToken &Tok = Parser.getTok(); 1865 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 1866 1867 std::string upperCase = Tok.getString().str(); 1868 std::string lowerCase = LowercaseString(upperCase); 1869 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 1870 .Case("lsl", ARM_AM::lsl) 1871 .Case("lsr", ARM_AM::lsr) 1872 .Case("asr", ARM_AM::asr) 1873 .Case("ror", ARM_AM::ror) 1874 .Case("rrx", ARM_AM::rrx) 1875 .Default(ARM_AM::no_shift); 1876 1877 if (ShiftTy == ARM_AM::no_shift) 1878 return 1; 1879 1880 Parser.Lex(); // Eat the operator. 1881 1882 // The source register for the shift has already been added to the 1883 // operand list, so we need to pop it off and combine it into the shifted 1884 // register operand instead. 1885 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 1886 if (!PrevOp->isReg()) 1887 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 1888 int SrcReg = PrevOp->getReg(); 1889 int64_t Imm = 0; 1890 int ShiftReg = 0; 1891 if (ShiftTy == ARM_AM::rrx) { 1892 // RRX Doesn't have an explicit shift amount. The encoder expects 1893 // the shift register to be the same as the source register. Seems odd, 1894 // but OK. 1895 ShiftReg = SrcReg; 1896 } else { 1897 // Figure out if this is shifted by a constant or a register (for non-RRX). 1898 if (Parser.getTok().is(AsmToken::Hash)) { 1899 Parser.Lex(); // Eat hash. 1900 SMLoc ImmLoc = Parser.getTok().getLoc(); 1901 const MCExpr *ShiftExpr = 0; 1902 if (getParser().ParseExpression(ShiftExpr)) { 1903 Error(ImmLoc, "invalid immediate shift value"); 1904 return -1; 1905 } 1906 // The expression must be evaluatable as an immediate. 1907 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 1908 if (!CE) { 1909 Error(ImmLoc, "invalid immediate shift value"); 1910 return -1; 1911 } 1912 // Range check the immediate. 1913 // lsl, ror: 0 <= imm <= 31 1914 // lsr, asr: 0 <= imm <= 32 1915 Imm = CE->getValue(); 1916 if (Imm < 0 || 1917 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 1918 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 1919 Error(ImmLoc, "immediate shift value out of range"); 1920 return -1; 1921 } 1922 } else if (Parser.getTok().is(AsmToken::Identifier)) { 1923 ShiftReg = tryParseRegister(); 1924 SMLoc L = Parser.getTok().getLoc(); 1925 if (ShiftReg == -1) { 1926 Error (L, "expected immediate or register in shift operand"); 1927 return -1; 1928 } 1929 } else { 1930 Error (Parser.getTok().getLoc(), 1931 "expected immediate or register in shift operand"); 1932 return -1; 1933 } 1934 } 1935 1936 if (ShiftReg && ShiftTy != ARM_AM::rrx) 1937 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 1938 ShiftReg, Imm, 1939 S, Parser.getTok().getLoc())); 1940 else 1941 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 1942 S, Parser.getTok().getLoc())); 1943 1944 return 0; 1945} 1946 1947 1948/// Try to parse a register name. The token must be an Identifier when called. 1949/// If it's a register, an AsmOperand is created. Another AsmOperand is created 1950/// if there is a "writeback". 'true' if it's not a register. 1951/// 1952/// TODO this is likely to change to allow different register types and or to 1953/// parse for a specific register type. 1954bool ARMAsmParser:: 1955tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1956 SMLoc S = Parser.getTok().getLoc(); 1957 int RegNo = tryParseRegister(); 1958 if (RegNo == -1) 1959 return true; 1960 1961 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 1962 1963 const AsmToken &ExclaimTok = Parser.getTok(); 1964 if (ExclaimTok.is(AsmToken::Exclaim)) { 1965 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 1966 ExclaimTok.getLoc())); 1967 Parser.Lex(); // Eat exclaim token 1968 return false; 1969 } 1970 1971 // Also check for an index operand. This is only legal for vector registers, 1972 // but that'll get caught OK in operand matching, so we don't need to 1973 // explicitly filter everything else out here. 1974 if (Parser.getTok().is(AsmToken::LBrac)) { 1975 SMLoc SIdx = Parser.getTok().getLoc(); 1976 Parser.Lex(); // Eat left bracket token. 1977 1978 const MCExpr *ImmVal; 1979 SMLoc ExprLoc = Parser.getTok().getLoc(); 1980 if (getParser().ParseExpression(ImmVal)) 1981 return MatchOperand_ParseFail; 1982 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 1983 if (!MCE) { 1984 TokError("immediate value expected for vector index"); 1985 return MatchOperand_ParseFail; 1986 } 1987 1988 SMLoc E = Parser.getTok().getLoc(); 1989 if (Parser.getTok().isNot(AsmToken::RBrac)) { 1990 Error(E, "']' expected"); 1991 return MatchOperand_ParseFail; 1992 } 1993 1994 Parser.Lex(); // Eat right bracket token. 1995 1996 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 1997 SIdx, E, 1998 getContext())); 1999 } 2000 2001 return false; 2002} 2003 2004/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2005/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2006/// "c5", ... 2007static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2008 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2009 // but efficient. 2010 switch (Name.size()) { 2011 default: break; 2012 case 2: 2013 if (Name[0] != CoprocOp) 2014 return -1; 2015 switch (Name[1]) { 2016 default: return -1; 2017 case '0': return 0; 2018 case '1': return 1; 2019 case '2': return 2; 2020 case '3': return 3; 2021 case '4': return 4; 2022 case '5': return 5; 2023 case '6': return 6; 2024 case '7': return 7; 2025 case '8': return 8; 2026 case '9': return 9; 2027 } 2028 break; 2029 case 3: 2030 if (Name[0] != CoprocOp || Name[1] != '1') 2031 return -1; 2032 switch (Name[2]) { 2033 default: return -1; 2034 case '0': return 10; 2035 case '1': return 11; 2036 case '2': return 12; 2037 case '3': return 13; 2038 case '4': return 14; 2039 case '5': return 15; 2040 } 2041 break; 2042 } 2043 2044 return -1; 2045} 2046 2047/// parseITCondCode - Try to parse a condition code for an IT instruction. 2048ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2049parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2050 SMLoc S = Parser.getTok().getLoc(); 2051 const AsmToken &Tok = Parser.getTok(); 2052 if (!Tok.is(AsmToken::Identifier)) 2053 return MatchOperand_NoMatch; 2054 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2055 .Case("eq", ARMCC::EQ) 2056 .Case("ne", ARMCC::NE) 2057 .Case("hs", ARMCC::HS) 2058 .Case("cs", ARMCC::HS) 2059 .Case("lo", ARMCC::LO) 2060 .Case("cc", ARMCC::LO) 2061 .Case("mi", ARMCC::MI) 2062 .Case("pl", ARMCC::PL) 2063 .Case("vs", ARMCC::VS) 2064 .Case("vc", ARMCC::VC) 2065 .Case("hi", ARMCC::HI) 2066 .Case("ls", ARMCC::LS) 2067 .Case("ge", ARMCC::GE) 2068 .Case("lt", ARMCC::LT) 2069 .Case("gt", ARMCC::GT) 2070 .Case("le", ARMCC::LE) 2071 .Case("al", ARMCC::AL) 2072 .Default(~0U); 2073 if (CC == ~0U) 2074 return MatchOperand_NoMatch; 2075 Parser.Lex(); // Eat the token. 2076 2077 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2078 2079 return MatchOperand_Success; 2080} 2081 2082/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2083/// token must be an Identifier when called, and if it is a coprocessor 2084/// number, the token is eaten and the operand is added to the operand list. 2085ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2086parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2087 SMLoc S = Parser.getTok().getLoc(); 2088 const AsmToken &Tok = Parser.getTok(); 2089 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2090 2091 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2092 if (Num == -1) 2093 return MatchOperand_NoMatch; 2094 2095 Parser.Lex(); // Eat identifier token. 2096 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2097 return MatchOperand_Success; 2098} 2099 2100/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2101/// token must be an Identifier when called, and if it is a coprocessor 2102/// number, the token is eaten and the operand is added to the operand list. 2103ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2104parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2105 SMLoc S = Parser.getTok().getLoc(); 2106 const AsmToken &Tok = Parser.getTok(); 2107 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2108 2109 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2110 if (Reg == -1) 2111 return MatchOperand_NoMatch; 2112 2113 Parser.Lex(); // Eat identifier token. 2114 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2115 return MatchOperand_Success; 2116} 2117 2118/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2119/// coproc_option : '{' imm0_255 '}' 2120ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2121parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2122 SMLoc S = Parser.getTok().getLoc(); 2123 2124 // If this isn't a '{', this isn't a coprocessor immediate operand. 2125 if (Parser.getTok().isNot(AsmToken::LCurly)) 2126 return MatchOperand_NoMatch; 2127 Parser.Lex(); // Eat the '{' 2128 2129 const MCExpr *Expr; 2130 SMLoc Loc = Parser.getTok().getLoc(); 2131 if (getParser().ParseExpression(Expr)) { 2132 Error(Loc, "illegal expression"); 2133 return MatchOperand_ParseFail; 2134 } 2135 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2136 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2137 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2138 return MatchOperand_ParseFail; 2139 } 2140 int Val = CE->getValue(); 2141 2142 // Check for and consume the closing '}' 2143 if (Parser.getTok().isNot(AsmToken::RCurly)) 2144 return MatchOperand_ParseFail; 2145 SMLoc E = Parser.getTok().getLoc(); 2146 Parser.Lex(); // Eat the '}' 2147 2148 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2149 return MatchOperand_Success; 2150} 2151 2152// For register list parsing, we need to map from raw GPR register numbering 2153// to the enumeration values. The enumeration values aren't sorted by 2154// register number due to our using "sp", "lr" and "pc" as canonical names. 2155static unsigned getNextRegister(unsigned Reg) { 2156 // If this is a GPR, we need to do it manually, otherwise we can rely 2157 // on the sort ordering of the enumeration since the other reg-classes 2158 // are sane. 2159 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2160 return Reg + 1; 2161 switch(Reg) { 2162 default: assert(0 && "Invalid GPR number!"); 2163 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2164 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2165 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2166 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2167 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2168 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2169 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2170 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2171 } 2172} 2173 2174/// Parse a register list. 2175bool ARMAsmParser:: 2176parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2177 assert(Parser.getTok().is(AsmToken::LCurly) && 2178 "Token is not a Left Curly Brace"); 2179 SMLoc S = Parser.getTok().getLoc(); 2180 Parser.Lex(); // Eat '{' token. 2181 SMLoc RegLoc = Parser.getTok().getLoc(); 2182 2183 // Check the first register in the list to see what register class 2184 // this is a list of. 2185 int Reg = tryParseRegister(); 2186 if (Reg == -1) 2187 return Error(RegLoc, "register expected"); 2188 2189 MCRegisterClass *RC; 2190 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2191 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2192 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2193 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2194 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2195 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2196 else 2197 return Error(RegLoc, "invalid register in register list"); 2198 2199 // The reglist instructions have at most 16 registers, so reserve 2200 // space for that many. 2201 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2202 // Store the first register. 2203 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2204 2205 // This starts immediately after the first register token in the list, 2206 // so we can see either a comma or a minus (range separator) as a legal 2207 // next token. 2208 while (Parser.getTok().is(AsmToken::Comma) || 2209 Parser.getTok().is(AsmToken::Minus)) { 2210 if (Parser.getTok().is(AsmToken::Minus)) { 2211 Parser.Lex(); // Eat the comma. 2212 SMLoc EndLoc = Parser.getTok().getLoc(); 2213 int EndReg = tryParseRegister(); 2214 if (EndReg == -1) 2215 return Error(EndLoc, "register expected"); 2216 // If the register is the same as the start reg, there's nothing 2217 // more to do. 2218 if (Reg == EndReg) 2219 continue; 2220 // The register must be in the same register class as the first. 2221 if (!RC->contains(EndReg)) 2222 return Error(EndLoc, "invalid register in register list"); 2223 // Ranges must go from low to high. 2224 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2225 return Error(EndLoc, "bad range in register list"); 2226 2227 // Add all the registers in the range to the register list. 2228 while (Reg != EndReg) { 2229 Reg = getNextRegister(Reg); 2230 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2231 } 2232 continue; 2233 } 2234 Parser.Lex(); // Eat the comma. 2235 RegLoc = Parser.getTok().getLoc(); 2236 int OldReg = Reg; 2237 Reg = tryParseRegister(); 2238 if (Reg == -1) 2239 return Error(RegLoc, "register expected"); 2240 // The register must be in the same register class as the first. 2241 if (!RC->contains(Reg)) 2242 return Error(RegLoc, "invalid register in register list"); 2243 // List must be monotonically increasing. 2244 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2245 return Error(RegLoc, "register list not in ascending order"); 2246 // VFP register lists must also be contiguous. 2247 // It's OK to use the enumeration values directly here rather, as the 2248 // VFP register classes have the enum sorted properly. 2249 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2250 Reg != OldReg + 1) 2251 return Error(RegLoc, "non-contiguous register range"); 2252 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2253 } 2254 2255 SMLoc E = Parser.getTok().getLoc(); 2256 if (Parser.getTok().isNot(AsmToken::RCurly)) 2257 return Error(E, "'}' expected"); 2258 Parser.Lex(); // Eat '}' token. 2259 2260 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2261 return false; 2262} 2263 2264/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2265ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2266parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2267 SMLoc S = Parser.getTok().getLoc(); 2268 const AsmToken &Tok = Parser.getTok(); 2269 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2270 StringRef OptStr = Tok.getString(); 2271 2272 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2273 .Case("sy", ARM_MB::SY) 2274 .Case("st", ARM_MB::ST) 2275 .Case("sh", ARM_MB::ISH) 2276 .Case("ish", ARM_MB::ISH) 2277 .Case("shst", ARM_MB::ISHST) 2278 .Case("ishst", ARM_MB::ISHST) 2279 .Case("nsh", ARM_MB::NSH) 2280 .Case("un", ARM_MB::NSH) 2281 .Case("nshst", ARM_MB::NSHST) 2282 .Case("unst", ARM_MB::NSHST) 2283 .Case("osh", ARM_MB::OSH) 2284 .Case("oshst", ARM_MB::OSHST) 2285 .Default(~0U); 2286 2287 if (Opt == ~0U) 2288 return MatchOperand_NoMatch; 2289 2290 Parser.Lex(); // Eat identifier token. 2291 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2292 return MatchOperand_Success; 2293} 2294 2295/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2296ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2297parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2298 SMLoc S = Parser.getTok().getLoc(); 2299 const AsmToken &Tok = Parser.getTok(); 2300 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2301 StringRef IFlagsStr = Tok.getString(); 2302 2303 // An iflags string of "none" is interpreted to mean that none of the AIF 2304 // bits are set. Not a terribly useful instruction, but a valid encoding. 2305 unsigned IFlags = 0; 2306 if (IFlagsStr != "none") { 2307 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2308 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2309 .Case("a", ARM_PROC::A) 2310 .Case("i", ARM_PROC::I) 2311 .Case("f", ARM_PROC::F) 2312 .Default(~0U); 2313 2314 // If some specific iflag is already set, it means that some letter is 2315 // present more than once, this is not acceptable. 2316 if (Flag == ~0U || (IFlags & Flag)) 2317 return MatchOperand_NoMatch; 2318 2319 IFlags |= Flag; 2320 } 2321 } 2322 2323 Parser.Lex(); // Eat identifier token. 2324 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2325 return MatchOperand_Success; 2326} 2327 2328/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2329ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2330parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2331 SMLoc S = Parser.getTok().getLoc(); 2332 const AsmToken &Tok = Parser.getTok(); 2333 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2334 StringRef Mask = Tok.getString(); 2335 2336 if (isMClass()) { 2337 // See ARMv6-M 10.1.1 2338 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2339 .Case("apsr", 0) 2340 .Case("iapsr", 1) 2341 .Case("eapsr", 2) 2342 .Case("xpsr", 3) 2343 .Case("ipsr", 5) 2344 .Case("epsr", 6) 2345 .Case("iepsr", 7) 2346 .Case("msp", 8) 2347 .Case("psp", 9) 2348 .Case("primask", 16) 2349 .Case("basepri", 17) 2350 .Case("basepri_max", 18) 2351 .Case("faultmask", 19) 2352 .Case("control", 20) 2353 .Default(~0U); 2354 2355 if (FlagsVal == ~0U) 2356 return MatchOperand_NoMatch; 2357 2358 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2359 // basepri, basepri_max and faultmask only valid for V7m. 2360 return MatchOperand_NoMatch; 2361 2362 Parser.Lex(); // Eat identifier token. 2363 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2364 return MatchOperand_Success; 2365 } 2366 2367 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2368 size_t Start = 0, Next = Mask.find('_'); 2369 StringRef Flags = ""; 2370 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2371 if (Next != StringRef::npos) 2372 Flags = Mask.slice(Next+1, Mask.size()); 2373 2374 // FlagsVal contains the complete mask: 2375 // 3-0: Mask 2376 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2377 unsigned FlagsVal = 0; 2378 2379 if (SpecReg == "apsr") { 2380 FlagsVal = StringSwitch<unsigned>(Flags) 2381 .Case("nzcvq", 0x8) // same as CPSR_f 2382 .Case("g", 0x4) // same as CPSR_s 2383 .Case("nzcvqg", 0xc) // same as CPSR_fs 2384 .Default(~0U); 2385 2386 if (FlagsVal == ~0U) { 2387 if (!Flags.empty()) 2388 return MatchOperand_NoMatch; 2389 else 2390 FlagsVal = 8; // No flag 2391 } 2392 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2393 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2394 Flags = "fc"; 2395 for (int i = 0, e = Flags.size(); i != e; ++i) { 2396 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2397 .Case("c", 1) 2398 .Case("x", 2) 2399 .Case("s", 4) 2400 .Case("f", 8) 2401 .Default(~0U); 2402 2403 // If some specific flag is already set, it means that some letter is 2404 // present more than once, this is not acceptable. 2405 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2406 return MatchOperand_NoMatch; 2407 FlagsVal |= Flag; 2408 } 2409 } else // No match for special register. 2410 return MatchOperand_NoMatch; 2411 2412 // Special register without flags are equivalent to "fc" flags. 2413 if (!FlagsVal) 2414 FlagsVal = 0x9; 2415 2416 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2417 if (SpecReg == "spsr") 2418 FlagsVal |= 16; 2419 2420 Parser.Lex(); // Eat identifier token. 2421 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2422 return MatchOperand_Success; 2423} 2424 2425ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2426parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2427 int Low, int High) { 2428 const AsmToken &Tok = Parser.getTok(); 2429 if (Tok.isNot(AsmToken::Identifier)) { 2430 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2431 return MatchOperand_ParseFail; 2432 } 2433 StringRef ShiftName = Tok.getString(); 2434 std::string LowerOp = LowercaseString(Op); 2435 std::string UpperOp = UppercaseString(Op); 2436 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2437 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2438 return MatchOperand_ParseFail; 2439 } 2440 Parser.Lex(); // Eat shift type token. 2441 2442 // There must be a '#' and a shift amount. 2443 if (Parser.getTok().isNot(AsmToken::Hash)) { 2444 Error(Parser.getTok().getLoc(), "'#' expected"); 2445 return MatchOperand_ParseFail; 2446 } 2447 Parser.Lex(); // Eat hash token. 2448 2449 const MCExpr *ShiftAmount; 2450 SMLoc Loc = Parser.getTok().getLoc(); 2451 if (getParser().ParseExpression(ShiftAmount)) { 2452 Error(Loc, "illegal expression"); 2453 return MatchOperand_ParseFail; 2454 } 2455 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2456 if (!CE) { 2457 Error(Loc, "constant expression expected"); 2458 return MatchOperand_ParseFail; 2459 } 2460 int Val = CE->getValue(); 2461 if (Val < Low || Val > High) { 2462 Error(Loc, "immediate value out of range"); 2463 return MatchOperand_ParseFail; 2464 } 2465 2466 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2467 2468 return MatchOperand_Success; 2469} 2470 2471ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2472parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2473 const AsmToken &Tok = Parser.getTok(); 2474 SMLoc S = Tok.getLoc(); 2475 if (Tok.isNot(AsmToken::Identifier)) { 2476 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2477 return MatchOperand_ParseFail; 2478 } 2479 int Val = StringSwitch<int>(Tok.getString()) 2480 .Case("be", 1) 2481 .Case("le", 0) 2482 .Default(-1); 2483 Parser.Lex(); // Eat the token. 2484 2485 if (Val == -1) { 2486 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2487 return MatchOperand_ParseFail; 2488 } 2489 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2490 getContext()), 2491 S, Parser.getTok().getLoc())); 2492 return MatchOperand_Success; 2493} 2494 2495/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2496/// instructions. Legal values are: 2497/// lsl #n 'n' in [0,31] 2498/// asr #n 'n' in [1,32] 2499/// n == 32 encoded as n == 0. 2500ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2501parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2502 const AsmToken &Tok = Parser.getTok(); 2503 SMLoc S = Tok.getLoc(); 2504 if (Tok.isNot(AsmToken::Identifier)) { 2505 Error(S, "shift operator 'asr' or 'lsl' expected"); 2506 return MatchOperand_ParseFail; 2507 } 2508 StringRef ShiftName = Tok.getString(); 2509 bool isASR; 2510 if (ShiftName == "lsl" || ShiftName == "LSL") 2511 isASR = false; 2512 else if (ShiftName == "asr" || ShiftName == "ASR") 2513 isASR = true; 2514 else { 2515 Error(S, "shift operator 'asr' or 'lsl' expected"); 2516 return MatchOperand_ParseFail; 2517 } 2518 Parser.Lex(); // Eat the operator. 2519 2520 // A '#' and a shift amount. 2521 if (Parser.getTok().isNot(AsmToken::Hash)) { 2522 Error(Parser.getTok().getLoc(), "'#' expected"); 2523 return MatchOperand_ParseFail; 2524 } 2525 Parser.Lex(); // Eat hash token. 2526 2527 const MCExpr *ShiftAmount; 2528 SMLoc E = Parser.getTok().getLoc(); 2529 if (getParser().ParseExpression(ShiftAmount)) { 2530 Error(E, "malformed shift expression"); 2531 return MatchOperand_ParseFail; 2532 } 2533 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2534 if (!CE) { 2535 Error(E, "shift amount must be an immediate"); 2536 return MatchOperand_ParseFail; 2537 } 2538 2539 int64_t Val = CE->getValue(); 2540 if (isASR) { 2541 // Shift amount must be in [1,32] 2542 if (Val < 1 || Val > 32) { 2543 Error(E, "'asr' shift amount must be in range [1,32]"); 2544 return MatchOperand_ParseFail; 2545 } 2546 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2547 if (isThumb() && Val == 32) { 2548 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2549 return MatchOperand_ParseFail; 2550 } 2551 if (Val == 32) Val = 0; 2552 } else { 2553 // Shift amount must be in [1,32] 2554 if (Val < 0 || Val > 31) { 2555 Error(E, "'lsr' shift amount must be in range [0,31]"); 2556 return MatchOperand_ParseFail; 2557 } 2558 } 2559 2560 E = Parser.getTok().getLoc(); 2561 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2562 2563 return MatchOperand_Success; 2564} 2565 2566/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2567/// of instructions. Legal values are: 2568/// ror #n 'n' in {0, 8, 16, 24} 2569ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2570parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2571 const AsmToken &Tok = Parser.getTok(); 2572 SMLoc S = Tok.getLoc(); 2573 if (Tok.isNot(AsmToken::Identifier)) 2574 return MatchOperand_NoMatch; 2575 StringRef ShiftName = Tok.getString(); 2576 if (ShiftName != "ror" && ShiftName != "ROR") 2577 return MatchOperand_NoMatch; 2578 Parser.Lex(); // Eat the operator. 2579 2580 // A '#' and a rotate amount. 2581 if (Parser.getTok().isNot(AsmToken::Hash)) { 2582 Error(Parser.getTok().getLoc(), "'#' expected"); 2583 return MatchOperand_ParseFail; 2584 } 2585 Parser.Lex(); // Eat hash token. 2586 2587 const MCExpr *ShiftAmount; 2588 SMLoc E = Parser.getTok().getLoc(); 2589 if (getParser().ParseExpression(ShiftAmount)) { 2590 Error(E, "malformed rotate expression"); 2591 return MatchOperand_ParseFail; 2592 } 2593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2594 if (!CE) { 2595 Error(E, "rotate amount must be an immediate"); 2596 return MatchOperand_ParseFail; 2597 } 2598 2599 int64_t Val = CE->getValue(); 2600 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2601 // normally, zero is represented in asm by omitting the rotate operand 2602 // entirely. 2603 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2604 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2605 return MatchOperand_ParseFail; 2606 } 2607 2608 E = Parser.getTok().getLoc(); 2609 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2610 2611 return MatchOperand_Success; 2612} 2613 2614ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2615parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2616 SMLoc S = Parser.getTok().getLoc(); 2617 // The bitfield descriptor is really two operands, the LSB and the width. 2618 if (Parser.getTok().isNot(AsmToken::Hash)) { 2619 Error(Parser.getTok().getLoc(), "'#' expected"); 2620 return MatchOperand_ParseFail; 2621 } 2622 Parser.Lex(); // Eat hash token. 2623 2624 const MCExpr *LSBExpr; 2625 SMLoc E = Parser.getTok().getLoc(); 2626 if (getParser().ParseExpression(LSBExpr)) { 2627 Error(E, "malformed immediate expression"); 2628 return MatchOperand_ParseFail; 2629 } 2630 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2631 if (!CE) { 2632 Error(E, "'lsb' operand must be an immediate"); 2633 return MatchOperand_ParseFail; 2634 } 2635 2636 int64_t LSB = CE->getValue(); 2637 // The LSB must be in the range [0,31] 2638 if (LSB < 0 || LSB > 31) { 2639 Error(E, "'lsb' operand must be in the range [0,31]"); 2640 return MatchOperand_ParseFail; 2641 } 2642 E = Parser.getTok().getLoc(); 2643 2644 // Expect another immediate operand. 2645 if (Parser.getTok().isNot(AsmToken::Comma)) { 2646 Error(Parser.getTok().getLoc(), "too few operands"); 2647 return MatchOperand_ParseFail; 2648 } 2649 Parser.Lex(); // Eat hash token. 2650 if (Parser.getTok().isNot(AsmToken::Hash)) { 2651 Error(Parser.getTok().getLoc(), "'#' expected"); 2652 return MatchOperand_ParseFail; 2653 } 2654 Parser.Lex(); // Eat hash token. 2655 2656 const MCExpr *WidthExpr; 2657 if (getParser().ParseExpression(WidthExpr)) { 2658 Error(E, "malformed immediate expression"); 2659 return MatchOperand_ParseFail; 2660 } 2661 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2662 if (!CE) { 2663 Error(E, "'width' operand must be an immediate"); 2664 return MatchOperand_ParseFail; 2665 } 2666 2667 int64_t Width = CE->getValue(); 2668 // The LSB must be in the range [1,32-lsb] 2669 if (Width < 1 || Width > 32 - LSB) { 2670 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2671 return MatchOperand_ParseFail; 2672 } 2673 E = Parser.getTok().getLoc(); 2674 2675 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2676 2677 return MatchOperand_Success; 2678} 2679 2680ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2681parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2682 // Check for a post-index addressing register operand. Specifically: 2683 // postidx_reg := '+' register {, shift} 2684 // | '-' register {, shift} 2685 // | register {, shift} 2686 2687 // This method must return MatchOperand_NoMatch without consuming any tokens 2688 // in the case where there is no match, as other alternatives take other 2689 // parse methods. 2690 AsmToken Tok = Parser.getTok(); 2691 SMLoc S = Tok.getLoc(); 2692 bool haveEaten = false; 2693 bool isAdd = true; 2694 int Reg = -1; 2695 if (Tok.is(AsmToken::Plus)) { 2696 Parser.Lex(); // Eat the '+' token. 2697 haveEaten = true; 2698 } else if (Tok.is(AsmToken::Minus)) { 2699 Parser.Lex(); // Eat the '-' token. 2700 isAdd = false; 2701 haveEaten = true; 2702 } 2703 if (Parser.getTok().is(AsmToken::Identifier)) 2704 Reg = tryParseRegister(); 2705 if (Reg == -1) { 2706 if (!haveEaten) 2707 return MatchOperand_NoMatch; 2708 Error(Parser.getTok().getLoc(), "register expected"); 2709 return MatchOperand_ParseFail; 2710 } 2711 SMLoc E = Parser.getTok().getLoc(); 2712 2713 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2714 unsigned ShiftImm = 0; 2715 if (Parser.getTok().is(AsmToken::Comma)) { 2716 Parser.Lex(); // Eat the ','. 2717 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2718 return MatchOperand_ParseFail; 2719 } 2720 2721 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2722 ShiftImm, S, E)); 2723 2724 return MatchOperand_Success; 2725} 2726 2727ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2728parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2729 // Check for a post-index addressing register operand. Specifically: 2730 // am3offset := '+' register 2731 // | '-' register 2732 // | register 2733 // | # imm 2734 // | # + imm 2735 // | # - imm 2736 2737 // This method must return MatchOperand_NoMatch without consuming any tokens 2738 // in the case where there is no match, as other alternatives take other 2739 // parse methods. 2740 AsmToken Tok = Parser.getTok(); 2741 SMLoc S = Tok.getLoc(); 2742 2743 // Do immediates first, as we always parse those if we have a '#'. 2744 if (Parser.getTok().is(AsmToken::Hash)) { 2745 Parser.Lex(); // Eat the '#'. 2746 // Explicitly look for a '-', as we need to encode negative zero 2747 // differently. 2748 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2749 const MCExpr *Offset; 2750 if (getParser().ParseExpression(Offset)) 2751 return MatchOperand_ParseFail; 2752 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2753 if (!CE) { 2754 Error(S, "constant expression expected"); 2755 return MatchOperand_ParseFail; 2756 } 2757 SMLoc E = Tok.getLoc(); 2758 // Negative zero is encoded as the flag value INT32_MIN. 2759 int32_t Val = CE->getValue(); 2760 if (isNegative && Val == 0) 2761 Val = INT32_MIN; 2762 2763 Operands.push_back( 2764 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2765 2766 return MatchOperand_Success; 2767 } 2768 2769 2770 bool haveEaten = false; 2771 bool isAdd = true; 2772 int Reg = -1; 2773 if (Tok.is(AsmToken::Plus)) { 2774 Parser.Lex(); // Eat the '+' token. 2775 haveEaten = true; 2776 } else if (Tok.is(AsmToken::Minus)) { 2777 Parser.Lex(); // Eat the '-' token. 2778 isAdd = false; 2779 haveEaten = true; 2780 } 2781 if (Parser.getTok().is(AsmToken::Identifier)) 2782 Reg = tryParseRegister(); 2783 if (Reg == -1) { 2784 if (!haveEaten) 2785 return MatchOperand_NoMatch; 2786 Error(Parser.getTok().getLoc(), "register expected"); 2787 return MatchOperand_ParseFail; 2788 } 2789 SMLoc E = Parser.getTok().getLoc(); 2790 2791 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 2792 0, S, E)); 2793 2794 return MatchOperand_Success; 2795} 2796 2797/// cvtT2LdrdPre - Convert parsed operands to MCInst. 2798/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2799/// when they refer multiple MIOperands inside a single one. 2800bool ARMAsmParser:: 2801cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 2802 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2803 // Rt, Rt2 2804 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2805 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2806 // Create a writeback register dummy placeholder. 2807 Inst.addOperand(MCOperand::CreateReg(0)); 2808 // addr 2809 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 2810 // pred 2811 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2812 return true; 2813} 2814 2815/// cvtT2StrdPre - Convert parsed operands to MCInst. 2816/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2817/// when they refer multiple MIOperands inside a single one. 2818bool ARMAsmParser:: 2819cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 2820 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2821 // Create a writeback register dummy placeholder. 2822 Inst.addOperand(MCOperand::CreateReg(0)); 2823 // Rt, Rt2 2824 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2825 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2826 // addr 2827 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 2828 // pred 2829 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2830 return true; 2831} 2832 2833/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 2834/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2835/// when they refer multiple MIOperands inside a single one. 2836bool ARMAsmParser:: 2837cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 2838 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2839 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2840 2841 // Create a writeback register dummy placeholder. 2842 Inst.addOperand(MCOperand::CreateImm(0)); 2843 2844 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 2845 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2846 return true; 2847} 2848 2849/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 2850/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2851/// when they refer multiple MIOperands inside a single one. 2852bool ARMAsmParser:: 2853cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 2854 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2855 // Create a writeback register dummy placeholder. 2856 Inst.addOperand(MCOperand::CreateImm(0)); 2857 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2858 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 2859 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2860 return true; 2861} 2862 2863/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 2864/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2865/// when they refer multiple MIOperands inside a single one. 2866bool ARMAsmParser:: 2867cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 2868 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2869 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2870 2871 // Create a writeback register dummy placeholder. 2872 Inst.addOperand(MCOperand::CreateImm(0)); 2873 2874 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 2875 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2876 return true; 2877} 2878 2879/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 2880/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2881/// when they refer multiple MIOperands inside a single one. 2882bool ARMAsmParser:: 2883cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 2884 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2885 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2886 2887 // Create a writeback register dummy placeholder. 2888 Inst.addOperand(MCOperand::CreateImm(0)); 2889 2890 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 2891 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2892 return true; 2893} 2894 2895 2896/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 2897/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2898/// when they refer multiple MIOperands inside a single one. 2899bool ARMAsmParser:: 2900cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 2901 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2902 // Create a writeback register dummy placeholder. 2903 Inst.addOperand(MCOperand::CreateImm(0)); 2904 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2905 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 2906 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2907 return true; 2908} 2909 2910/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 2911/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2912/// when they refer multiple MIOperands inside a single one. 2913bool ARMAsmParser:: 2914cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 2915 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2916 // Create a writeback register dummy placeholder. 2917 Inst.addOperand(MCOperand::CreateImm(0)); 2918 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2919 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 2920 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2921 return true; 2922} 2923 2924/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 2925/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2926/// when they refer multiple MIOperands inside a single one. 2927bool ARMAsmParser:: 2928cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 2929 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2930 // Create a writeback register dummy placeholder. 2931 Inst.addOperand(MCOperand::CreateImm(0)); 2932 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2933 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 2934 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2935 return true; 2936} 2937 2938/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 2939/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2940/// when they refer multiple MIOperands inside a single one. 2941bool ARMAsmParser:: 2942cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 2943 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2944 // Rt 2945 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2946 // Create a writeback register dummy placeholder. 2947 Inst.addOperand(MCOperand::CreateImm(0)); 2948 // addr 2949 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2950 // offset 2951 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 2952 // pred 2953 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2954 return true; 2955} 2956 2957/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 2958/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2959/// when they refer multiple MIOperands inside a single one. 2960bool ARMAsmParser:: 2961cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 2962 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2963 // Rt 2964 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2965 // Create a writeback register dummy placeholder. 2966 Inst.addOperand(MCOperand::CreateImm(0)); 2967 // addr 2968 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2969 // offset 2970 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 2971 // pred 2972 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2973 return true; 2974} 2975 2976/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 2977/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2978/// when they refer multiple MIOperands inside a single one. 2979bool ARMAsmParser:: 2980cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 2981 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2982 // Create a writeback register dummy placeholder. 2983 Inst.addOperand(MCOperand::CreateImm(0)); 2984 // Rt 2985 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2986 // addr 2987 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2988 // offset 2989 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 2990 // pred 2991 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2992 return true; 2993} 2994 2995/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 2996/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2997/// when they refer multiple MIOperands inside a single one. 2998bool ARMAsmParser:: 2999cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3000 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3001 // Create a writeback register dummy placeholder. 3002 Inst.addOperand(MCOperand::CreateImm(0)); 3003 // Rt 3004 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3005 // addr 3006 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3007 // offset 3008 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3009 // pred 3010 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3011 return true; 3012} 3013 3014/// cvtLdrdPre - Convert parsed operands to MCInst. 3015/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3016/// when they refer multiple MIOperands inside a single one. 3017bool ARMAsmParser:: 3018cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3019 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3020 // Rt, Rt2 3021 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3022 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3023 // Create a writeback register dummy placeholder. 3024 Inst.addOperand(MCOperand::CreateImm(0)); 3025 // addr 3026 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3027 // pred 3028 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3029 return true; 3030} 3031 3032/// cvtStrdPre - Convert parsed operands to MCInst. 3033/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3034/// when they refer multiple MIOperands inside a single one. 3035bool ARMAsmParser:: 3036cvtStrdPre(MCInst &Inst, unsigned Opcode, 3037 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3038 // Create a writeback register dummy placeholder. 3039 Inst.addOperand(MCOperand::CreateImm(0)); 3040 // Rt, Rt2 3041 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3042 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3043 // addr 3044 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3045 // pred 3046 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3047 return true; 3048} 3049 3050/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3051/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3052/// when they refer multiple MIOperands inside a single one. 3053bool ARMAsmParser:: 3054cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3055 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3056 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3057 // Create a writeback register dummy placeholder. 3058 Inst.addOperand(MCOperand::CreateImm(0)); 3059 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3060 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3061 return true; 3062} 3063 3064/// cvtThumbMultiple- Convert parsed operands to MCInst. 3065/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3066/// when they refer multiple MIOperands inside a single one. 3067bool ARMAsmParser:: 3068cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3069 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3070 // The second source operand must be the same register as the destination 3071 // operand. 3072 if (Operands.size() == 6 && 3073 (((ARMOperand*)Operands[3])->getReg() != 3074 ((ARMOperand*)Operands[5])->getReg()) && 3075 (((ARMOperand*)Operands[3])->getReg() != 3076 ((ARMOperand*)Operands[4])->getReg())) { 3077 Error(Operands[3]->getStartLoc(), 3078 "destination register must match source register"); 3079 return false; 3080 } 3081 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3082 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3083 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3084 // If we have a three-operand form, use that, else the second source operand 3085 // is just the destination operand again. 3086 if (Operands.size() == 6) 3087 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3088 else 3089 Inst.addOperand(Inst.getOperand(0)); 3090 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3091 3092 return true; 3093} 3094 3095/// Parse an ARM memory expression, return false if successful else return true 3096/// or an error. The first token must be a '[' when called. 3097bool ARMAsmParser:: 3098parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3099 SMLoc S, E; 3100 assert(Parser.getTok().is(AsmToken::LBrac) && 3101 "Token is not a Left Bracket"); 3102 S = Parser.getTok().getLoc(); 3103 Parser.Lex(); // Eat left bracket token. 3104 3105 const AsmToken &BaseRegTok = Parser.getTok(); 3106 int BaseRegNum = tryParseRegister(); 3107 if (BaseRegNum == -1) 3108 return Error(BaseRegTok.getLoc(), "register expected"); 3109 3110 // The next token must either be a comma or a closing bracket. 3111 const AsmToken &Tok = Parser.getTok(); 3112 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3113 return Error(Tok.getLoc(), "malformed memory operand"); 3114 3115 if (Tok.is(AsmToken::RBrac)) { 3116 E = Tok.getLoc(); 3117 Parser.Lex(); // Eat right bracket token. 3118 3119 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3120 0, 0, false, S, E)); 3121 3122 // If there's a pre-indexing writeback marker, '!', just add it as a token 3123 // operand. It's rather odd, but syntactically valid. 3124 if (Parser.getTok().is(AsmToken::Exclaim)) { 3125 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3126 Parser.Lex(); // Eat the '!'. 3127 } 3128 3129 return false; 3130 } 3131 3132 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3133 Parser.Lex(); // Eat the comma. 3134 3135 // If we have a ':', it's an alignment specifier. 3136 if (Parser.getTok().is(AsmToken::Colon)) { 3137 Parser.Lex(); // Eat the ':'. 3138 E = Parser.getTok().getLoc(); 3139 3140 const MCExpr *Expr; 3141 if (getParser().ParseExpression(Expr)) 3142 return true; 3143 3144 // The expression has to be a constant. Memory references with relocations 3145 // don't come through here, as they use the <label> forms of the relevant 3146 // instructions. 3147 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3148 if (!CE) 3149 return Error (E, "constant expression expected"); 3150 3151 unsigned Align = 0; 3152 switch (CE->getValue()) { 3153 default: 3154 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3155 case 64: Align = 8; break; 3156 case 128: Align = 16; break; 3157 case 256: Align = 32; break; 3158 } 3159 3160 // Now we should have the closing ']' 3161 E = Parser.getTok().getLoc(); 3162 if (Parser.getTok().isNot(AsmToken::RBrac)) 3163 return Error(E, "']' expected"); 3164 Parser.Lex(); // Eat right bracket token. 3165 3166 // Don't worry about range checking the value here. That's handled by 3167 // the is*() predicates. 3168 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3169 ARM_AM::no_shift, 0, Align, 3170 false, S, E)); 3171 3172 // If there's a pre-indexing writeback marker, '!', just add it as a token 3173 // operand. 3174 if (Parser.getTok().is(AsmToken::Exclaim)) { 3175 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3176 Parser.Lex(); // Eat the '!'. 3177 } 3178 3179 return false; 3180 } 3181 3182 // If we have a '#', it's an immediate offset, else assume it's a register 3183 // offset. 3184 if (Parser.getTok().is(AsmToken::Hash)) { 3185 Parser.Lex(); // Eat the '#'. 3186 E = Parser.getTok().getLoc(); 3187 3188 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3189 const MCExpr *Offset; 3190 if (getParser().ParseExpression(Offset)) 3191 return true; 3192 3193 // The expression has to be a constant. Memory references with relocations 3194 // don't come through here, as they use the <label> forms of the relevant 3195 // instructions. 3196 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3197 if (!CE) 3198 return Error (E, "constant expression expected"); 3199 3200 // If the constant was #-0, represent it as INT32_MIN. 3201 int32_t Val = CE->getValue(); 3202 if (isNegative && Val == 0) 3203 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3204 3205 // Now we should have the closing ']' 3206 E = Parser.getTok().getLoc(); 3207 if (Parser.getTok().isNot(AsmToken::RBrac)) 3208 return Error(E, "']' expected"); 3209 Parser.Lex(); // Eat right bracket token. 3210 3211 // Don't worry about range checking the value here. That's handled by 3212 // the is*() predicates. 3213 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3214 ARM_AM::no_shift, 0, 0, 3215 false, S, E)); 3216 3217 // If there's a pre-indexing writeback marker, '!', just add it as a token 3218 // operand. 3219 if (Parser.getTok().is(AsmToken::Exclaim)) { 3220 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3221 Parser.Lex(); // Eat the '!'. 3222 } 3223 3224 return false; 3225 } 3226 3227 // The register offset is optionally preceded by a '+' or '-' 3228 bool isNegative = false; 3229 if (Parser.getTok().is(AsmToken::Minus)) { 3230 isNegative = true; 3231 Parser.Lex(); // Eat the '-'. 3232 } else if (Parser.getTok().is(AsmToken::Plus)) { 3233 // Nothing to do. 3234 Parser.Lex(); // Eat the '+'. 3235 } 3236 3237 E = Parser.getTok().getLoc(); 3238 int OffsetRegNum = tryParseRegister(); 3239 if (OffsetRegNum == -1) 3240 return Error(E, "register expected"); 3241 3242 // If there's a shift operator, handle it. 3243 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3244 unsigned ShiftImm = 0; 3245 if (Parser.getTok().is(AsmToken::Comma)) { 3246 Parser.Lex(); // Eat the ','. 3247 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3248 return true; 3249 } 3250 3251 // Now we should have the closing ']' 3252 E = Parser.getTok().getLoc(); 3253 if (Parser.getTok().isNot(AsmToken::RBrac)) 3254 return Error(E, "']' expected"); 3255 Parser.Lex(); // Eat right bracket token. 3256 3257 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3258 ShiftType, ShiftImm, 0, isNegative, 3259 S, E)); 3260 3261 // If there's a pre-indexing writeback marker, '!', just add it as a token 3262 // operand. 3263 if (Parser.getTok().is(AsmToken::Exclaim)) { 3264 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3265 Parser.Lex(); // Eat the '!'. 3266 } 3267 3268 return false; 3269} 3270 3271/// parseMemRegOffsetShift - one of these two: 3272/// ( lsl | lsr | asr | ror ) , # shift_amount 3273/// rrx 3274/// return true if it parses a shift otherwise it returns false. 3275bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3276 unsigned &Amount) { 3277 SMLoc Loc = Parser.getTok().getLoc(); 3278 const AsmToken &Tok = Parser.getTok(); 3279 if (Tok.isNot(AsmToken::Identifier)) 3280 return true; 3281 StringRef ShiftName = Tok.getString(); 3282 if (ShiftName == "lsl" || ShiftName == "LSL") 3283 St = ARM_AM::lsl; 3284 else if (ShiftName == "lsr" || ShiftName == "LSR") 3285 St = ARM_AM::lsr; 3286 else if (ShiftName == "asr" || ShiftName == "ASR") 3287 St = ARM_AM::asr; 3288 else if (ShiftName == "ror" || ShiftName == "ROR") 3289 St = ARM_AM::ror; 3290 else if (ShiftName == "rrx" || ShiftName == "RRX") 3291 St = ARM_AM::rrx; 3292 else 3293 return Error(Loc, "illegal shift operator"); 3294 Parser.Lex(); // Eat shift type token. 3295 3296 // rrx stands alone. 3297 Amount = 0; 3298 if (St != ARM_AM::rrx) { 3299 Loc = Parser.getTok().getLoc(); 3300 // A '#' and a shift amount. 3301 const AsmToken &HashTok = Parser.getTok(); 3302 if (HashTok.isNot(AsmToken::Hash)) 3303 return Error(HashTok.getLoc(), "'#' expected"); 3304 Parser.Lex(); // Eat hash token. 3305 3306 const MCExpr *Expr; 3307 if (getParser().ParseExpression(Expr)) 3308 return true; 3309 // Range check the immediate. 3310 // lsl, ror: 0 <= imm <= 31 3311 // lsr, asr: 0 <= imm <= 32 3312 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3313 if (!CE) 3314 return Error(Loc, "shift amount must be an immediate"); 3315 int64_t Imm = CE->getValue(); 3316 if (Imm < 0 || 3317 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3318 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3319 return Error(Loc, "immediate shift value out of range"); 3320 Amount = Imm; 3321 } 3322 3323 return false; 3324} 3325 3326/// parseFPImm - A floating point immediate expression operand. 3327ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3328parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3329 SMLoc S = Parser.getTok().getLoc(); 3330 3331 if (Parser.getTok().isNot(AsmToken::Hash)) 3332 return MatchOperand_NoMatch; 3333 Parser.Lex(); // Eat the '#'. 3334 3335 // Handle negation, as that still comes through as a separate token. 3336 bool isNegative = false; 3337 if (Parser.getTok().is(AsmToken::Minus)) { 3338 isNegative = true; 3339 Parser.Lex(); 3340 } 3341 const AsmToken &Tok = Parser.getTok(); 3342 if (Tok.is(AsmToken::Real)) { 3343 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3344 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3345 // If we had a '-' in front, toggle the sign bit. 3346 IntVal ^= (uint64_t)isNegative << 63; 3347 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3348 Parser.Lex(); // Eat the token. 3349 if (Val == -1) { 3350 TokError("floating point value out of range"); 3351 return MatchOperand_ParseFail; 3352 } 3353 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3354 return MatchOperand_Success; 3355 } 3356 if (Tok.is(AsmToken::Integer)) { 3357 int64_t Val = Tok.getIntVal(); 3358 Parser.Lex(); // Eat the token. 3359 if (Val > 255 || Val < 0) { 3360 TokError("encoded floating point value out of range"); 3361 return MatchOperand_ParseFail; 3362 } 3363 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3364 return MatchOperand_Success; 3365 } 3366 3367 TokError("invalid floating point immediate"); 3368 return MatchOperand_ParseFail; 3369} 3370/// Parse a arm instruction operand. For now this parses the operand regardless 3371/// of the mnemonic. 3372bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3373 StringRef Mnemonic) { 3374 SMLoc S, E; 3375 3376 // Check if the current operand has a custom associated parser, if so, try to 3377 // custom parse the operand, or fallback to the general approach. 3378 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3379 if (ResTy == MatchOperand_Success) 3380 return false; 3381 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3382 // there was a match, but an error occurred, in which case, just return that 3383 // the operand parsing failed. 3384 if (ResTy == MatchOperand_ParseFail) 3385 return true; 3386 3387 switch (getLexer().getKind()) { 3388 default: 3389 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3390 return true; 3391 case AsmToken::Identifier: { 3392 // If this is VMRS, check for the apsr_nzcv operand. 3393 if (!tryParseRegisterWithWriteBack(Operands)) 3394 return false; 3395 int Res = tryParseShiftRegister(Operands); 3396 if (Res == 0) // success 3397 return false; 3398 else if (Res == -1) // irrecoverable error 3399 return true; 3400 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3401 S = Parser.getTok().getLoc(); 3402 Parser.Lex(); 3403 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3404 return false; 3405 } 3406 3407 // Fall though for the Identifier case that is not a register or a 3408 // special name. 3409 } 3410 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3411 case AsmToken::Dot: { // . as a branch target 3412 // This was not a register so parse other operands that start with an 3413 // identifier (like labels) as expressions and create them as immediates. 3414 const MCExpr *IdVal; 3415 S = Parser.getTok().getLoc(); 3416 if (getParser().ParseExpression(IdVal)) 3417 return true; 3418 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3419 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3420 return false; 3421 } 3422 case AsmToken::LBrac: 3423 return parseMemory(Operands); 3424 case AsmToken::LCurly: 3425 return parseRegisterList(Operands); 3426 case AsmToken::Hash: { 3427 // #42 -> immediate. 3428 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3429 S = Parser.getTok().getLoc(); 3430 Parser.Lex(); 3431 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3432 const MCExpr *ImmVal; 3433 if (getParser().ParseExpression(ImmVal)) 3434 return true; 3435 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3436 if (!CE) { 3437 Error(S, "constant expression expected"); 3438 return MatchOperand_ParseFail; 3439 } 3440 int32_t Val = CE->getValue(); 3441 if (isNegative && Val == 0) 3442 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3443 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3444 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3445 return false; 3446 } 3447 case AsmToken::Colon: { 3448 // ":lower16:" and ":upper16:" expression prefixes 3449 // FIXME: Check it's an expression prefix, 3450 // e.g. (FOO - :lower16:BAR) isn't legal. 3451 ARMMCExpr::VariantKind RefKind; 3452 if (parsePrefix(RefKind)) 3453 return true; 3454 3455 const MCExpr *SubExprVal; 3456 if (getParser().ParseExpression(SubExprVal)) 3457 return true; 3458 3459 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3460 getContext()); 3461 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3462 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3463 return false; 3464 } 3465 } 3466} 3467 3468// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3469// :lower16: and :upper16:. 3470bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3471 RefKind = ARMMCExpr::VK_ARM_None; 3472 3473 // :lower16: and :upper16: modifiers 3474 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3475 Parser.Lex(); // Eat ':' 3476 3477 if (getLexer().isNot(AsmToken::Identifier)) { 3478 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3479 return true; 3480 } 3481 3482 StringRef IDVal = Parser.getTok().getIdentifier(); 3483 if (IDVal == "lower16") { 3484 RefKind = ARMMCExpr::VK_ARM_LO16; 3485 } else if (IDVal == "upper16") { 3486 RefKind = ARMMCExpr::VK_ARM_HI16; 3487 } else { 3488 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3489 return true; 3490 } 3491 Parser.Lex(); 3492 3493 if (getLexer().isNot(AsmToken::Colon)) { 3494 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3495 return true; 3496 } 3497 Parser.Lex(); // Eat the last ':' 3498 return false; 3499} 3500 3501/// \brief Given a mnemonic, split out possible predication code and carry 3502/// setting letters to form a canonical mnemonic and flags. 3503// 3504// FIXME: Would be nice to autogen this. 3505// FIXME: This is a bit of a maze of special cases. 3506StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3507 unsigned &PredicationCode, 3508 bool &CarrySetting, 3509 unsigned &ProcessorIMod, 3510 StringRef &ITMask) { 3511 PredicationCode = ARMCC::AL; 3512 CarrySetting = false; 3513 ProcessorIMod = 0; 3514 3515 // Ignore some mnemonics we know aren't predicated forms. 3516 // 3517 // FIXME: Would be nice to autogen this. 3518 if ((Mnemonic == "movs" && isThumb()) || 3519 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3520 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3521 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3522 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3523 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3524 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3525 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3526 return Mnemonic; 3527 3528 // First, split out any predication code. Ignore mnemonics we know aren't 3529 // predicated but do have a carry-set and so weren't caught above. 3530 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3531 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3532 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3533 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3534 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3535 .Case("eq", ARMCC::EQ) 3536 .Case("ne", ARMCC::NE) 3537 .Case("hs", ARMCC::HS) 3538 .Case("cs", ARMCC::HS) 3539 .Case("lo", ARMCC::LO) 3540 .Case("cc", ARMCC::LO) 3541 .Case("mi", ARMCC::MI) 3542 .Case("pl", ARMCC::PL) 3543 .Case("vs", ARMCC::VS) 3544 .Case("vc", ARMCC::VC) 3545 .Case("hi", ARMCC::HI) 3546 .Case("ls", ARMCC::LS) 3547 .Case("ge", ARMCC::GE) 3548 .Case("lt", ARMCC::LT) 3549 .Case("gt", ARMCC::GT) 3550 .Case("le", ARMCC::LE) 3551 .Case("al", ARMCC::AL) 3552 .Default(~0U); 3553 if (CC != ~0U) { 3554 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3555 PredicationCode = CC; 3556 } 3557 } 3558 3559 // Next, determine if we have a carry setting bit. We explicitly ignore all 3560 // the instructions we know end in 's'. 3561 if (Mnemonic.endswith("s") && 3562 !(Mnemonic == "cps" || Mnemonic == "mls" || 3563 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3564 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3565 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3566 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3567 (Mnemonic == "movs" && isThumb()))) { 3568 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3569 CarrySetting = true; 3570 } 3571 3572 // The "cps" instruction can have a interrupt mode operand which is glued into 3573 // the mnemonic. Check if this is the case, split it and parse the imod op 3574 if (Mnemonic.startswith("cps")) { 3575 // Split out any imod code. 3576 unsigned IMod = 3577 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3578 .Case("ie", ARM_PROC::IE) 3579 .Case("id", ARM_PROC::ID) 3580 .Default(~0U); 3581 if (IMod != ~0U) { 3582 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3583 ProcessorIMod = IMod; 3584 } 3585 } 3586 3587 // The "it" instruction has the condition mask on the end of the mnemonic. 3588 if (Mnemonic.startswith("it")) { 3589 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3590 Mnemonic = Mnemonic.slice(0, 2); 3591 } 3592 3593 return Mnemonic; 3594} 3595 3596/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3597/// inclusion of carry set or predication code operands. 3598// 3599// FIXME: It would be nice to autogen this. 3600void ARMAsmParser:: 3601getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3602 bool &CanAcceptPredicationCode) { 3603 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3604 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3605 Mnemonic == "add" || Mnemonic == "adc" || 3606 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3607 Mnemonic == "orr" || Mnemonic == "mvn" || 3608 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3609 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3610 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3611 Mnemonic == "mla" || Mnemonic == "smlal" || 3612 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3613 CanAcceptCarrySet = true; 3614 } else 3615 CanAcceptCarrySet = false; 3616 3617 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3618 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3619 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3620 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3621 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3622 (Mnemonic == "clrex" && !isThumb()) || 3623 (Mnemonic == "nop" && isThumbOne()) || 3624 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3625 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3626 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3627 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3628 !isThumb()) || 3629 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3630 CanAcceptPredicationCode = false; 3631 } else 3632 CanAcceptPredicationCode = true; 3633 3634 if (isThumb()) { 3635 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3636 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3637 CanAcceptPredicationCode = false; 3638 } 3639} 3640 3641bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3642 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3643 // FIXME: This is all horribly hacky. We really need a better way to deal 3644 // with optional operands like this in the matcher table. 3645 3646 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3647 // another does not. Specifically, the MOVW instruction does not. So we 3648 // special case it here and remove the defaulted (non-setting) cc_out 3649 // operand if that's the instruction we're trying to match. 3650 // 3651 // We do this as post-processing of the explicit operands rather than just 3652 // conditionally adding the cc_out in the first place because we need 3653 // to check the type of the parsed immediate operand. 3654 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3655 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3656 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3657 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3658 return true; 3659 3660 // Register-register 'add' for thumb does not have a cc_out operand 3661 // when there are only two register operands. 3662 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3663 static_cast<ARMOperand*>(Operands[3])->isReg() && 3664 static_cast<ARMOperand*>(Operands[4])->isReg() && 3665 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3666 return true; 3667 // Register-register 'add' for thumb does not have a cc_out operand 3668 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3669 // have to check the immediate range here since Thumb2 has a variant 3670 // that can handle a different range and has a cc_out operand. 3671 if (((isThumb() && Mnemonic == "add") || 3672 (isThumbTwo() && Mnemonic == "sub")) && 3673 Operands.size() == 6 && 3674 static_cast<ARMOperand*>(Operands[3])->isReg() && 3675 static_cast<ARMOperand*>(Operands[4])->isReg() && 3676 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3677 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3678 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3679 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3680 return true; 3681 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3682 // imm0_4095 variant. That's the least-preferred variant when 3683 // selecting via the generic "add" mnemonic, so to know that we 3684 // should remove the cc_out operand, we have to explicitly check that 3685 // it's not one of the other variants. Ugh. 3686 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3687 Operands.size() == 6 && 3688 static_cast<ARMOperand*>(Operands[3])->isReg() && 3689 static_cast<ARMOperand*>(Operands[4])->isReg() && 3690 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3691 // Nest conditions rather than one big 'if' statement for readability. 3692 // 3693 // If either register is a high reg, it's either one of the SP 3694 // variants (handled above) or a 32-bit encoding, so we just 3695 // check against T3. 3696 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3697 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3698 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3699 return false; 3700 // If both registers are low, we're in an IT block, and the immediate is 3701 // in range, we should use encoding T1 instead, which has a cc_out. 3702 if (inITBlock() && 3703 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3704 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3705 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3706 return false; 3707 3708 // Otherwise, we use encoding T4, which does not have a cc_out 3709 // operand. 3710 return true; 3711 } 3712 3713 // The thumb2 multiply instruction doesn't have a CCOut register, so 3714 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3715 // use the 16-bit encoding or not. 3716 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3717 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3718 static_cast<ARMOperand*>(Operands[3])->isReg() && 3719 static_cast<ARMOperand*>(Operands[4])->isReg() && 3720 static_cast<ARMOperand*>(Operands[5])->isReg() && 3721 // If the registers aren't low regs, the destination reg isn't the 3722 // same as one of the source regs, or the cc_out operand is zero 3723 // outside of an IT block, we have to use the 32-bit encoding, so 3724 // remove the cc_out operand. 3725 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3726 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 3727 !inITBlock() || 3728 (static_cast<ARMOperand*>(Operands[3])->getReg() != 3729 static_cast<ARMOperand*>(Operands[5])->getReg() && 3730 static_cast<ARMOperand*>(Operands[3])->getReg() != 3731 static_cast<ARMOperand*>(Operands[4])->getReg()))) 3732 return true; 3733 3734 3735 3736 // Register-register 'add/sub' for thumb does not have a cc_out operand 3737 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 3738 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 3739 // right, this will result in better diagnostics (which operand is off) 3740 // anyway. 3741 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 3742 (Operands.size() == 5 || Operands.size() == 6) && 3743 static_cast<ARMOperand*>(Operands[3])->isReg() && 3744 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 3745 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3746 return true; 3747 3748 return false; 3749} 3750 3751/// Parse an arm instruction mnemonic followed by its operands. 3752bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 3753 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3754 // Create the leading tokens for the mnemonic, split by '.' characters. 3755 size_t Start = 0, Next = Name.find('.'); 3756 StringRef Mnemonic = Name.slice(Start, Next); 3757 3758 // Split out the predication code and carry setting flag from the mnemonic. 3759 unsigned PredicationCode; 3760 unsigned ProcessorIMod; 3761 bool CarrySetting; 3762 StringRef ITMask; 3763 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 3764 ProcessorIMod, ITMask); 3765 3766 // In Thumb1, only the branch (B) instruction can be predicated. 3767 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 3768 Parser.EatToEndOfStatement(); 3769 return Error(NameLoc, "conditional execution not supported in Thumb1"); 3770 } 3771 3772 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 3773 3774 // Handle the IT instruction ITMask. Convert it to a bitmask. This 3775 // is the mask as it will be for the IT encoding if the conditional 3776 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 3777 // where the conditional bit0 is zero, the instruction post-processing 3778 // will adjust the mask accordingly. 3779 if (Mnemonic == "it") { 3780 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 3781 if (ITMask.size() > 3) { 3782 Parser.EatToEndOfStatement(); 3783 return Error(Loc, "too many conditions on IT instruction"); 3784 } 3785 unsigned Mask = 8; 3786 for (unsigned i = ITMask.size(); i != 0; --i) { 3787 char pos = ITMask[i - 1]; 3788 if (pos != 't' && pos != 'e') { 3789 Parser.EatToEndOfStatement(); 3790 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 3791 } 3792 Mask >>= 1; 3793 if (ITMask[i - 1] == 't') 3794 Mask |= 8; 3795 } 3796 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 3797 } 3798 3799 // FIXME: This is all a pretty gross hack. We should automatically handle 3800 // optional operands like this via tblgen. 3801 3802 // Next, add the CCOut and ConditionCode operands, if needed. 3803 // 3804 // For mnemonics which can ever incorporate a carry setting bit or predication 3805 // code, our matching model involves us always generating CCOut and 3806 // ConditionCode operands to match the mnemonic "as written" and then we let 3807 // the matcher deal with finding the right instruction or generating an 3808 // appropriate error. 3809 bool CanAcceptCarrySet, CanAcceptPredicationCode; 3810 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 3811 3812 // If we had a carry-set on an instruction that can't do that, issue an 3813 // error. 3814 if (!CanAcceptCarrySet && CarrySetting) { 3815 Parser.EatToEndOfStatement(); 3816 return Error(NameLoc, "instruction '" + Mnemonic + 3817 "' can not set flags, but 's' suffix specified"); 3818 } 3819 // If we had a predication code on an instruction that can't do that, issue an 3820 // error. 3821 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 3822 Parser.EatToEndOfStatement(); 3823 return Error(NameLoc, "instruction '" + Mnemonic + 3824 "' is not predicable, but condition code specified"); 3825 } 3826 3827 // Add the carry setting operand, if necessary. 3828 if (CanAcceptCarrySet) { 3829 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 3830 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 3831 Loc)); 3832 } 3833 3834 // Add the predication code operand, if necessary. 3835 if (CanAcceptPredicationCode) { 3836 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 3837 CarrySetting); 3838 Operands.push_back(ARMOperand::CreateCondCode( 3839 ARMCC::CondCodes(PredicationCode), Loc)); 3840 } 3841 3842 // Add the processor imod operand, if necessary. 3843 if (ProcessorIMod) { 3844 Operands.push_back(ARMOperand::CreateImm( 3845 MCConstantExpr::Create(ProcessorIMod, getContext()), 3846 NameLoc, NameLoc)); 3847 } 3848 3849 // Add the remaining tokens in the mnemonic. 3850 while (Next != StringRef::npos) { 3851 Start = Next; 3852 Next = Name.find('.', Start + 1); 3853 StringRef ExtraToken = Name.slice(Start, Next); 3854 3855 // For now, we're only parsing Thumb1 (for the most part), so 3856 // just ignore ".n" qualifiers. We'll use them to restrict 3857 // matching when we do Thumb2. 3858 if (ExtraToken != ".n") { 3859 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 3860 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 3861 } 3862 } 3863 3864 // Read the remaining operands. 3865 if (getLexer().isNot(AsmToken::EndOfStatement)) { 3866 // Read the first operand. 3867 if (parseOperand(Operands, Mnemonic)) { 3868 Parser.EatToEndOfStatement(); 3869 return true; 3870 } 3871 3872 while (getLexer().is(AsmToken::Comma)) { 3873 Parser.Lex(); // Eat the comma. 3874 3875 // Parse and remember the operand. 3876 if (parseOperand(Operands, Mnemonic)) { 3877 Parser.EatToEndOfStatement(); 3878 return true; 3879 } 3880 } 3881 } 3882 3883 if (getLexer().isNot(AsmToken::EndOfStatement)) { 3884 SMLoc Loc = getLexer().getLoc(); 3885 Parser.EatToEndOfStatement(); 3886 return Error(Loc, "unexpected token in argument list"); 3887 } 3888 3889 Parser.Lex(); // Consume the EndOfStatement 3890 3891 // Some instructions, mostly Thumb, have forms for the same mnemonic that 3892 // do and don't have a cc_out optional-def operand. With some spot-checks 3893 // of the operand list, we can figure out which variant we're trying to 3894 // parse and adjust accordingly before actually matching. We shouldn't ever 3895 // try to remove a cc_out operand that was explicitly set on the the 3896 // mnemonic, of course (CarrySetting == true). Reason number #317 the 3897 // table driven matcher doesn't fit well with the ARM instruction set. 3898 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 3899 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 3900 Operands.erase(Operands.begin() + 1); 3901 delete Op; 3902 } 3903 3904 // ARM mode 'blx' need special handling, as the register operand version 3905 // is predicable, but the label operand version is not. So, we can't rely 3906 // on the Mnemonic based checking to correctly figure out when to put 3907 // a k_CondCode operand in the list. If we're trying to match the label 3908 // version, remove the k_CondCode operand here. 3909 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 3910 static_cast<ARMOperand*>(Operands[2])->isImm()) { 3911 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 3912 Operands.erase(Operands.begin() + 1); 3913 delete Op; 3914 } 3915 3916 // The vector-compare-to-zero instructions have a literal token "#0" at 3917 // the end that comes to here as an immediate operand. Convert it to a 3918 // token to play nicely with the matcher. 3919 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 3920 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 3921 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3922 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 3923 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3924 if (CE && CE->getValue() == 0) { 3925 Operands.erase(Operands.begin() + 5); 3926 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3927 delete Op; 3928 } 3929 } 3930 // VCMP{E} does the same thing, but with a different operand count. 3931 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 3932 static_cast<ARMOperand*>(Operands[4])->isImm()) { 3933 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 3934 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3935 if (CE && CE->getValue() == 0) { 3936 Operands.erase(Operands.begin() + 4); 3937 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3938 delete Op; 3939 } 3940 } 3941 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 3942 // end. Convert it to a token here. 3943 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 3944 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3945 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 3946 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3947 if (CE && CE->getValue() == 0) { 3948 Operands.erase(Operands.begin() + 5); 3949 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3950 delete Op; 3951 } 3952 } 3953 3954 return false; 3955} 3956 3957// Validate context-sensitive operand constraints. 3958 3959// return 'true' if register list contains non-low GPR registers, 3960// 'false' otherwise. If Reg is in the register list or is HiReg, set 3961// 'containsReg' to true. 3962static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 3963 unsigned HiReg, bool &containsReg) { 3964 containsReg = false; 3965 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 3966 unsigned OpReg = Inst.getOperand(i).getReg(); 3967 if (OpReg == Reg) 3968 containsReg = true; 3969 // Anything other than a low register isn't legal here. 3970 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 3971 return true; 3972 } 3973 return false; 3974} 3975 3976// Check if the specified regisgter is in the register list of the inst, 3977// starting at the indicated operand number. 3978static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 3979 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 3980 unsigned OpReg = Inst.getOperand(i).getReg(); 3981 if (OpReg == Reg) 3982 return true; 3983 } 3984 return false; 3985} 3986 3987// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 3988// the ARMInsts array) instead. Getting that here requires awkward 3989// API changes, though. Better way? 3990namespace llvm { 3991extern MCInstrDesc ARMInsts[]; 3992} 3993static MCInstrDesc &getInstDesc(unsigned Opcode) { 3994 return ARMInsts[Opcode]; 3995} 3996 3997// FIXME: We would really like to be able to tablegen'erate this. 3998bool ARMAsmParser:: 3999validateInstruction(MCInst &Inst, 4000 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4001 MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4002 SMLoc Loc = Operands[0]->getStartLoc(); 4003 // Check the IT block state first. 4004 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4005 // being allowed in IT blocks, but not being predicable. It just always 4006 // executes. 4007 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4008 unsigned bit = 1; 4009 if (ITState.FirstCond) 4010 ITState.FirstCond = false; 4011 else 4012 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4013 // The instruction must be predicable. 4014 if (!MCID.isPredicable()) 4015 return Error(Loc, "instructions in IT block must be predicable"); 4016 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4017 unsigned ITCond = bit ? ITState.Cond : 4018 ARMCC::getOppositeCondition(ITState.Cond); 4019 if (Cond != ITCond) { 4020 // Find the condition code Operand to get its SMLoc information. 4021 SMLoc CondLoc; 4022 for (unsigned i = 1; i < Operands.size(); ++i) 4023 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4024 CondLoc = Operands[i]->getStartLoc(); 4025 return Error(CondLoc, "incorrect condition in IT block; got '" + 4026 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4027 "', but expected '" + 4028 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4029 } 4030 // Check for non-'al' condition codes outside of the IT block. 4031 } else if (isThumbTwo() && MCID.isPredicable() && 4032 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4033 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4034 Inst.getOpcode() != ARM::t2B) 4035 return Error(Loc, "predicated instructions must be in IT block"); 4036 4037 switch (Inst.getOpcode()) { 4038 case ARM::LDRD: 4039 case ARM::LDRD_PRE: 4040 case ARM::LDRD_POST: 4041 case ARM::LDREXD: { 4042 // Rt2 must be Rt + 1. 4043 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4044 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4045 if (Rt2 != Rt + 1) 4046 return Error(Operands[3]->getStartLoc(), 4047 "destination operands must be sequential"); 4048 return false; 4049 } 4050 case ARM::STRD: { 4051 // Rt2 must be Rt + 1. 4052 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4053 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4054 if (Rt2 != Rt + 1) 4055 return Error(Operands[3]->getStartLoc(), 4056 "source operands must be sequential"); 4057 return false; 4058 } 4059 case ARM::STRD_PRE: 4060 case ARM::STRD_POST: 4061 case ARM::STREXD: { 4062 // Rt2 must be Rt + 1. 4063 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4064 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4065 if (Rt2 != Rt + 1) 4066 return Error(Operands[3]->getStartLoc(), 4067 "source operands must be sequential"); 4068 return false; 4069 } 4070 case ARM::SBFX: 4071 case ARM::UBFX: { 4072 // width must be in range [1, 32-lsb] 4073 unsigned lsb = Inst.getOperand(2).getImm(); 4074 unsigned widthm1 = Inst.getOperand(3).getImm(); 4075 if (widthm1 >= 32 - lsb) 4076 return Error(Operands[5]->getStartLoc(), 4077 "bitfield width must be in range [1,32-lsb]"); 4078 return false; 4079 } 4080 case ARM::tLDMIA: { 4081 // If we're parsing Thumb2, the .w variant is available and handles 4082 // most cases that are normally illegal for a Thumb1 LDM 4083 // instruction. We'll make the transformation in processInstruction() 4084 // if necessary. 4085 // 4086 // Thumb LDM instructions are writeback iff the base register is not 4087 // in the register list. 4088 unsigned Rn = Inst.getOperand(0).getReg(); 4089 bool hasWritebackToken = 4090 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4091 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4092 bool listContainsBase; 4093 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4094 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4095 "registers must be in range r0-r7"); 4096 // If we should have writeback, then there should be a '!' token. 4097 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4098 return Error(Operands[2]->getStartLoc(), 4099 "writeback operator '!' expected"); 4100 // If we should not have writeback, there must not be a '!'. This is 4101 // true even for the 32-bit wide encodings. 4102 if (listContainsBase && hasWritebackToken) 4103 return Error(Operands[3]->getStartLoc(), 4104 "writeback operator '!' not allowed when base register " 4105 "in register list"); 4106 4107 break; 4108 } 4109 case ARM::t2LDMIA_UPD: { 4110 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4111 return Error(Operands[4]->getStartLoc(), 4112 "writeback operator '!' not allowed when base register " 4113 "in register list"); 4114 break; 4115 } 4116 case ARM::tPOP: { 4117 bool listContainsBase; 4118 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4119 return Error(Operands[2]->getStartLoc(), 4120 "registers must be in range r0-r7 or pc"); 4121 break; 4122 } 4123 case ARM::tPUSH: { 4124 bool listContainsBase; 4125 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4126 return Error(Operands[2]->getStartLoc(), 4127 "registers must be in range r0-r7 or lr"); 4128 break; 4129 } 4130 case ARM::tSTMIA_UPD: { 4131 bool listContainsBase; 4132 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4133 return Error(Operands[4]->getStartLoc(), 4134 "registers must be in range r0-r7"); 4135 break; 4136 } 4137 } 4138 4139 return false; 4140} 4141 4142void ARMAsmParser:: 4143processInstruction(MCInst &Inst, 4144 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4145 switch (Inst.getOpcode()) { 4146 case ARM::LDMIA_UPD: 4147 // If this is a load of a single register via a 'pop', then we should use 4148 // a post-indexed LDR instruction instead, per the ARM ARM. 4149 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4150 Inst.getNumOperands() == 5) { 4151 MCInst TmpInst; 4152 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4153 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4154 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4155 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4156 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4157 TmpInst.addOperand(MCOperand::CreateImm(4)); 4158 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4159 TmpInst.addOperand(Inst.getOperand(3)); 4160 Inst = TmpInst; 4161 } 4162 break; 4163 case ARM::STMDB_UPD: 4164 // If this is a store of a single register via a 'push', then we should use 4165 // a pre-indexed STR instruction instead, per the ARM ARM. 4166 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4167 Inst.getNumOperands() == 5) { 4168 MCInst TmpInst; 4169 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4170 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4171 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4172 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4173 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4174 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4175 TmpInst.addOperand(Inst.getOperand(3)); 4176 Inst = TmpInst; 4177 } 4178 break; 4179 case ARM::tADDi8: 4180 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4181 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4182 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4183 // to encoding T1 if <Rd> is omitted." 4184 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4185 Inst.setOpcode(ARM::tADDi3); 4186 break; 4187 case ARM::tSUBi8: 4188 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4189 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4190 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4191 // to encoding T1 if <Rd> is omitted." 4192 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4193 Inst.setOpcode(ARM::tSUBi3); 4194 break; 4195 case ARM::tB: 4196 // A Thumb conditional branch outside of an IT block is a tBcc. 4197 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4198 Inst.setOpcode(ARM::tBcc); 4199 break; 4200 case ARM::t2B: 4201 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4202 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4203 Inst.setOpcode(ARM::t2Bcc); 4204 break; 4205 case ARM::t2Bcc: 4206 // If the conditional is AL or we're in an IT block, we really want t2B. 4207 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4208 Inst.setOpcode(ARM::t2B); 4209 break; 4210 case ARM::tBcc: 4211 // If the conditional is AL, we really want tB. 4212 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4213 Inst.setOpcode(ARM::tB); 4214 break; 4215 case ARM::tLDMIA: { 4216 // If the register list contains any high registers, or if the writeback 4217 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4218 // instead if we're in Thumb2. Otherwise, this should have generated 4219 // an error in validateInstruction(). 4220 unsigned Rn = Inst.getOperand(0).getReg(); 4221 bool hasWritebackToken = 4222 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4223 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4224 bool listContainsBase; 4225 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4226 (!listContainsBase && !hasWritebackToken) || 4227 (listContainsBase && hasWritebackToken)) { 4228 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4229 assert (isThumbTwo()); 4230 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4231 // If we're switching to the updating version, we need to insert 4232 // the writeback tied operand. 4233 if (hasWritebackToken) 4234 Inst.insert(Inst.begin(), 4235 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4236 } 4237 break; 4238 } 4239 case ARM::tSTMIA_UPD: { 4240 // If the register list contains any high registers, we need to use 4241 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4242 // should have generated an error in validateInstruction(). 4243 unsigned Rn = Inst.getOperand(0).getReg(); 4244 bool listContainsBase; 4245 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4246 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4247 assert (isThumbTwo()); 4248 Inst.setOpcode(ARM::t2STMIA_UPD); 4249 } 4250 break; 4251 } 4252 case ARM::t2MOVi: { 4253 // If we can use the 16-bit encoding and the user didn't explicitly 4254 // request the 32-bit variant, transform it here. 4255 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4256 Inst.getOperand(1).getImm() <= 255 && 4257 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4258 Inst.getOperand(4).getReg() == ARM::CPSR) || 4259 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4260 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4261 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4262 // The operands aren't in the same order for tMOVi8... 4263 MCInst TmpInst; 4264 TmpInst.setOpcode(ARM::tMOVi8); 4265 TmpInst.addOperand(Inst.getOperand(0)); 4266 TmpInst.addOperand(Inst.getOperand(4)); 4267 TmpInst.addOperand(Inst.getOperand(1)); 4268 TmpInst.addOperand(Inst.getOperand(2)); 4269 TmpInst.addOperand(Inst.getOperand(3)); 4270 Inst = TmpInst; 4271 } 4272 break; 4273 } 4274 case ARM::t2MOVr: { 4275 // If we can use the 16-bit encoding and the user didn't explicitly 4276 // request the 32-bit variant, transform it here. 4277 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4278 isARMLowRegister(Inst.getOperand(1).getReg()) && 4279 Inst.getOperand(2).getImm() == ARMCC::AL && 4280 Inst.getOperand(4).getReg() == ARM::CPSR && 4281 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4282 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4283 // The operands aren't the same for tMOV[S]r... (no cc_out) 4284 MCInst TmpInst; 4285 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4286 TmpInst.addOperand(Inst.getOperand(0)); 4287 TmpInst.addOperand(Inst.getOperand(1)); 4288 TmpInst.addOperand(Inst.getOperand(2)); 4289 TmpInst.addOperand(Inst.getOperand(3)); 4290 Inst = TmpInst; 4291 } 4292 break; 4293 } 4294 case ARM::t2SXTH: 4295 case ARM::t2SXTB: 4296 case ARM::t2UXTH: 4297 case ARM::t2UXTB: { 4298 // If we can use the 16-bit encoding and the user didn't explicitly 4299 // request the 32-bit variant, transform it here. 4300 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4301 isARMLowRegister(Inst.getOperand(1).getReg()) && 4302 Inst.getOperand(2).getImm() == 0 && 4303 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4304 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4305 unsigned NewOpc; 4306 switch (Inst.getOpcode()) { 4307 default: llvm_unreachable("Illegal opcode!"); 4308 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4309 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4310 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4311 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4312 } 4313 // The operands aren't the same for thumb1 (no rotate operand). 4314 MCInst TmpInst; 4315 TmpInst.setOpcode(NewOpc); 4316 TmpInst.addOperand(Inst.getOperand(0)); 4317 TmpInst.addOperand(Inst.getOperand(1)); 4318 TmpInst.addOperand(Inst.getOperand(3)); 4319 TmpInst.addOperand(Inst.getOperand(4)); 4320 Inst = TmpInst; 4321 } 4322 break; 4323 } 4324 case ARM::t2IT: { 4325 // The mask bits for all but the first condition are represented as 4326 // the low bit of the condition code value implies 't'. We currently 4327 // always have 1 implies 't', so XOR toggle the bits if the low bit 4328 // of the condition code is zero. The encoding also expects the low 4329 // bit of the condition to be encoded as bit 4 of the mask operand, 4330 // so mask that in if needed 4331 MCOperand &MO = Inst.getOperand(1); 4332 unsigned Mask = MO.getImm(); 4333 unsigned OrigMask = Mask; 4334 unsigned TZ = CountTrailingZeros_32(Mask); 4335 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4336 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4337 for (unsigned i = 3; i != TZ; --i) 4338 Mask ^= 1 << i; 4339 } else 4340 Mask |= 0x10; 4341 MO.setImm(Mask); 4342 4343 // Set up the IT block state according to the IT instruction we just 4344 // matched. 4345 assert(!inITBlock() && "nested IT blocks?!"); 4346 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4347 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4348 ITState.CurPosition = 0; 4349 ITState.FirstCond = true; 4350 break; 4351 } 4352 } 4353} 4354 4355unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4356 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4357 // suffix depending on whether they're in an IT block or not. 4358 unsigned Opc = Inst.getOpcode(); 4359 MCInstrDesc &MCID = getInstDesc(Opc); 4360 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4361 assert(MCID.hasOptionalDef() && 4362 "optionally flag setting instruction missing optional def operand"); 4363 assert(MCID.NumOperands == Inst.getNumOperands() && 4364 "operand count mismatch!"); 4365 // Find the optional-def operand (cc_out). 4366 unsigned OpNo; 4367 for (OpNo = 0; 4368 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4369 ++OpNo) 4370 ; 4371 // If we're parsing Thumb1, reject it completely. 4372 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4373 return Match_MnemonicFail; 4374 // If we're parsing Thumb2, which form is legal depends on whether we're 4375 // in an IT block. 4376 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4377 !inITBlock()) 4378 return Match_RequiresITBlock; 4379 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4380 inITBlock()) 4381 return Match_RequiresNotITBlock; 4382 } 4383 // Some high-register supporting Thumb1 encodings only allow both registers 4384 // to be from r0-r7 when in Thumb2. 4385 else if (Opc == ARM::tADDhirr && isThumbOne() && 4386 isARMLowRegister(Inst.getOperand(1).getReg()) && 4387 isARMLowRegister(Inst.getOperand(2).getReg())) 4388 return Match_RequiresThumb2; 4389 // Others only require ARMv6 or later. 4390 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4391 isARMLowRegister(Inst.getOperand(0).getReg()) && 4392 isARMLowRegister(Inst.getOperand(1).getReg())) 4393 return Match_RequiresV6; 4394 return Match_Success; 4395} 4396 4397bool ARMAsmParser:: 4398MatchAndEmitInstruction(SMLoc IDLoc, 4399 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4400 MCStreamer &Out) { 4401 MCInst Inst; 4402 unsigned ErrorInfo; 4403 unsigned MatchResult; 4404 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4405 switch (MatchResult) { 4406 default: break; 4407 case Match_Success: 4408 // Context sensitive operand constraints aren't handled by the matcher, 4409 // so check them here. 4410 if (validateInstruction(Inst, Operands)) { 4411 // Still progress the IT block, otherwise one wrong condition causes 4412 // nasty cascading errors. 4413 forwardITPosition(); 4414 return true; 4415 } 4416 4417 // Some instructions need post-processing to, for example, tweak which 4418 // encoding is selected. 4419 processInstruction(Inst, Operands); 4420 4421 // Only move forward at the very end so that everything in validate 4422 // and process gets a consistent answer about whether we're in an IT 4423 // block. 4424 forwardITPosition(); 4425 4426 Out.EmitInstruction(Inst); 4427 return false; 4428 case Match_MissingFeature: 4429 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4430 return true; 4431 case Match_InvalidOperand: { 4432 SMLoc ErrorLoc = IDLoc; 4433 if (ErrorInfo != ~0U) { 4434 if (ErrorInfo >= Operands.size()) 4435 return Error(IDLoc, "too few operands for instruction"); 4436 4437 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4438 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4439 } 4440 4441 return Error(ErrorLoc, "invalid operand for instruction"); 4442 } 4443 case Match_MnemonicFail: 4444 return Error(IDLoc, "invalid instruction"); 4445 case Match_ConversionFail: 4446 // The converter function will have already emited a diagnostic. 4447 return true; 4448 case Match_RequiresNotITBlock: 4449 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4450 case Match_RequiresITBlock: 4451 return Error(IDLoc, "instruction only valid inside IT block"); 4452 case Match_RequiresV6: 4453 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4454 case Match_RequiresThumb2: 4455 return Error(IDLoc, "instruction variant requires Thumb2"); 4456 } 4457 4458 llvm_unreachable("Implement any new match types added!"); 4459 return true; 4460} 4461 4462/// parseDirective parses the arm specific directives 4463bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4464 StringRef IDVal = DirectiveID.getIdentifier(); 4465 if (IDVal == ".word") 4466 return parseDirectiveWord(4, DirectiveID.getLoc()); 4467 else if (IDVal == ".thumb") 4468 return parseDirectiveThumb(DirectiveID.getLoc()); 4469 else if (IDVal == ".thumb_func") 4470 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4471 else if (IDVal == ".code") 4472 return parseDirectiveCode(DirectiveID.getLoc()); 4473 else if (IDVal == ".syntax") 4474 return parseDirectiveSyntax(DirectiveID.getLoc()); 4475 return true; 4476} 4477 4478/// parseDirectiveWord 4479/// ::= .word [ expression (, expression)* ] 4480bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4481 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4482 for (;;) { 4483 const MCExpr *Value; 4484 if (getParser().ParseExpression(Value)) 4485 return true; 4486 4487 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4488 4489 if (getLexer().is(AsmToken::EndOfStatement)) 4490 break; 4491 4492 // FIXME: Improve diagnostic. 4493 if (getLexer().isNot(AsmToken::Comma)) 4494 return Error(L, "unexpected token in directive"); 4495 Parser.Lex(); 4496 } 4497 } 4498 4499 Parser.Lex(); 4500 return false; 4501} 4502 4503/// parseDirectiveThumb 4504/// ::= .thumb 4505bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4506 if (getLexer().isNot(AsmToken::EndOfStatement)) 4507 return Error(L, "unexpected token in directive"); 4508 Parser.Lex(); 4509 4510 // TODO: set thumb mode 4511 // TODO: tell the MC streamer the mode 4512 // getParser().getStreamer().Emit???(); 4513 return false; 4514} 4515 4516/// parseDirectiveThumbFunc 4517/// ::= .thumbfunc symbol_name 4518bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4519 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4520 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4521 StringRef Name; 4522 4523 // Darwin asm has function name after .thumb_func direction 4524 // ELF doesn't 4525 if (isMachO) { 4526 const AsmToken &Tok = Parser.getTok(); 4527 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4528 return Error(L, "unexpected token in .thumb_func directive"); 4529 Name = Tok.getString(); 4530 Parser.Lex(); // Consume the identifier token. 4531 } 4532 4533 if (getLexer().isNot(AsmToken::EndOfStatement)) 4534 return Error(L, "unexpected token in directive"); 4535 Parser.Lex(); 4536 4537 // FIXME: assuming function name will be the line following .thumb_func 4538 if (!isMachO) { 4539 Name = Parser.getTok().getString(); 4540 } 4541 4542 // Mark symbol as a thumb symbol. 4543 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4544 getParser().getStreamer().EmitThumbFunc(Func); 4545 return false; 4546} 4547 4548/// parseDirectiveSyntax 4549/// ::= .syntax unified | divided 4550bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4551 const AsmToken &Tok = Parser.getTok(); 4552 if (Tok.isNot(AsmToken::Identifier)) 4553 return Error(L, "unexpected token in .syntax directive"); 4554 StringRef Mode = Tok.getString(); 4555 if (Mode == "unified" || Mode == "UNIFIED") 4556 Parser.Lex(); 4557 else if (Mode == "divided" || Mode == "DIVIDED") 4558 return Error(L, "'.syntax divided' arm asssembly not supported"); 4559 else 4560 return Error(L, "unrecognized syntax mode in .syntax directive"); 4561 4562 if (getLexer().isNot(AsmToken::EndOfStatement)) 4563 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4564 Parser.Lex(); 4565 4566 // TODO tell the MC streamer the mode 4567 // getParser().getStreamer().Emit???(); 4568 return false; 4569} 4570 4571/// parseDirectiveCode 4572/// ::= .code 16 | 32 4573bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4574 const AsmToken &Tok = Parser.getTok(); 4575 if (Tok.isNot(AsmToken::Integer)) 4576 return Error(L, "unexpected token in .code directive"); 4577 int64_t Val = Parser.getTok().getIntVal(); 4578 if (Val == 16) 4579 Parser.Lex(); 4580 else if (Val == 32) 4581 Parser.Lex(); 4582 else 4583 return Error(L, "invalid operand to .code directive"); 4584 4585 if (getLexer().isNot(AsmToken::EndOfStatement)) 4586 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4587 Parser.Lex(); 4588 4589 if (Val == 16) { 4590 if (!isThumb()) 4591 SwitchMode(); 4592 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4593 } else { 4594 if (isThumb()) 4595 SwitchMode(); 4596 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4597 } 4598 4599 return false; 4600} 4601 4602extern "C" void LLVMInitializeARMAsmLexer(); 4603 4604/// Force static initialization. 4605extern "C" void LLVMInitializeARMAsmParser() { 4606 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4607 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4608 LLVMInitializeARMAsmLexer(); 4609} 4610 4611#define GET_REGISTER_MATCHER 4612#define GET_MATCHER_IMPLEMENTATION 4613#include "ARMGenAsmMatcher.inc" 4614