ARMAsmParser.cpp revision 460a90540b045c102012da2492999557e6840526
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringExtras.h" 34#include "llvm/ADT/StringSwitch.h" 35#include "llvm/ADT/Twine.h" 36 37using namespace llvm; 38 39namespace { 40 41class ARMOperand; 42 43class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131#define GET_ASSEMBLER_HEADER 132#include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseMemBarrierOptOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseProcIFlagsOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseMSRMaskOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 148 StringRef Op, int Low, int High); 149 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 150 return parsePKHImm(O, "lsl", 0, 31); 151 } 152 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 153 return parsePKHImm(O, "asr", 1, 32); 154 } 155 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 156 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 157 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 162 163 // Asm Match Converter Methods 164 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 165 const SmallVectorImpl<MCParsedAsmOperand*> &); 166 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 167 const SmallVectorImpl<MCParsedAsmOperand*> &); 168 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 169 const SmallVectorImpl<MCParsedAsmOperand*> &); 170 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 199 bool validateInstruction(MCInst &Inst, 200 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 201 void processInstruction(MCInst &Inst, 202 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 203 bool shouldOmitCCOutOperand(StringRef Mnemonic, 204 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 205 206public: 207 enum ARMMatchResultTy { 208 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 209 Match_RequiresNotITBlock, 210 Match_RequiresV6, 211 Match_RequiresThumb2 212 }; 213 214 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 215 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 216 MCAsmParserExtension::Initialize(_Parser); 217 218 // Initialize the set of available features. 219 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 220 221 // Not in an ITBlock to start with. 222 ITState.CurPosition = ~0U; 223 } 224 225 // Implementation of the MCTargetAsmParser interface: 226 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 227 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 228 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 229 bool ParseDirective(AsmToken DirectiveID); 230 231 unsigned checkTargetMatchPredicate(MCInst &Inst); 232 233 bool MatchAndEmitInstruction(SMLoc IDLoc, 234 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 235 MCStreamer &Out); 236}; 237} // end anonymous namespace 238 239namespace { 240 241/// ARMOperand - Instances of this class represent a parsed ARM machine 242/// instruction. 243class ARMOperand : public MCParsedAsmOperand { 244 enum KindTy { 245 k_CondCode, 246 k_CCOut, 247 k_ITCondMask, 248 k_CoprocNum, 249 k_CoprocReg, 250 k_Immediate, 251 k_FPImmediate, 252 k_MemBarrierOpt, 253 k_Memory, 254 k_PostIndexRegister, 255 k_MSRMask, 256 k_ProcIFlags, 257 k_VectorIndex, 258 k_Register, 259 k_RegisterList, 260 k_DPRRegisterList, 261 k_SPRRegisterList, 262 k_ShiftedRegister, 263 k_ShiftedImmediate, 264 k_ShifterImmediate, 265 k_RotateImmediate, 266 k_BitfieldDescriptor, 267 k_Token 268 } Kind; 269 270 SMLoc StartLoc, EndLoc; 271 SmallVector<unsigned, 8> Registers; 272 273 union { 274 struct { 275 ARMCC::CondCodes Val; 276 } CC; 277 278 struct { 279 unsigned Val; 280 } Cop; 281 282 struct { 283 unsigned Mask:4; 284 } ITMask; 285 286 struct { 287 ARM_MB::MemBOpt Val; 288 } MBOpt; 289 290 struct { 291 ARM_PROC::IFlags Val; 292 } IFlags; 293 294 struct { 295 unsigned Val; 296 } MMask; 297 298 struct { 299 const char *Data; 300 unsigned Length; 301 } Tok; 302 303 struct { 304 unsigned RegNum; 305 } Reg; 306 307 struct { 308 unsigned Val; 309 } VectorIndex; 310 311 struct { 312 const MCExpr *Val; 313 } Imm; 314 315 struct { 316 unsigned Val; // encoded 8-bit representation 317 } FPImm; 318 319 /// Combined record for all forms of ARM address expressions. 320 struct { 321 unsigned BaseRegNum; 322 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 323 // was specified. 324 const MCConstantExpr *OffsetImm; // Offset immediate value 325 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 326 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 327 unsigned ShiftImm; // shift for OffsetReg. 328 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 329 } Mem; 330 331 struct { 332 unsigned RegNum; 333 bool isAdd; 334 ARM_AM::ShiftOpc ShiftTy; 335 unsigned ShiftImm; 336 } PostIdxReg; 337 338 struct { 339 bool isASR; 340 unsigned Imm; 341 } ShifterImm; 342 struct { 343 ARM_AM::ShiftOpc ShiftTy; 344 unsigned SrcReg; 345 unsigned ShiftReg; 346 unsigned ShiftImm; 347 } RegShiftedReg; 348 struct { 349 ARM_AM::ShiftOpc ShiftTy; 350 unsigned SrcReg; 351 unsigned ShiftImm; 352 } RegShiftedImm; 353 struct { 354 unsigned Imm; 355 } RotImm; 356 struct { 357 unsigned LSB; 358 unsigned Width; 359 } Bitfield; 360 }; 361 362 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 363public: 364 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 365 Kind = o.Kind; 366 StartLoc = o.StartLoc; 367 EndLoc = o.EndLoc; 368 switch (Kind) { 369 case k_CondCode: 370 CC = o.CC; 371 break; 372 case k_ITCondMask: 373 ITMask = o.ITMask; 374 break; 375 case k_Token: 376 Tok = o.Tok; 377 break; 378 case k_CCOut: 379 case k_Register: 380 Reg = o.Reg; 381 break; 382 case k_RegisterList: 383 case k_DPRRegisterList: 384 case k_SPRRegisterList: 385 Registers = o.Registers; 386 break; 387 case k_CoprocNum: 388 case k_CoprocReg: 389 Cop = o.Cop; 390 break; 391 case k_Immediate: 392 Imm = o.Imm; 393 break; 394 case k_FPImmediate: 395 FPImm = o.FPImm; 396 break; 397 case k_MemBarrierOpt: 398 MBOpt = o.MBOpt; 399 break; 400 case k_Memory: 401 Mem = o.Mem; 402 break; 403 case k_PostIndexRegister: 404 PostIdxReg = o.PostIdxReg; 405 break; 406 case k_MSRMask: 407 MMask = o.MMask; 408 break; 409 case k_ProcIFlags: 410 IFlags = o.IFlags; 411 break; 412 case k_ShifterImmediate: 413 ShifterImm = o.ShifterImm; 414 break; 415 case k_ShiftedRegister: 416 RegShiftedReg = o.RegShiftedReg; 417 break; 418 case k_ShiftedImmediate: 419 RegShiftedImm = o.RegShiftedImm; 420 break; 421 case k_RotateImmediate: 422 RotImm = o.RotImm; 423 break; 424 case k_BitfieldDescriptor: 425 Bitfield = o.Bitfield; 426 break; 427 case k_VectorIndex: 428 VectorIndex = o.VectorIndex; 429 break; 430 } 431 } 432 433 /// getStartLoc - Get the location of the first token of this operand. 434 SMLoc getStartLoc() const { return StartLoc; } 435 /// getEndLoc - Get the location of the last token of this operand. 436 SMLoc getEndLoc() const { return EndLoc; } 437 438 ARMCC::CondCodes getCondCode() const { 439 assert(Kind == k_CondCode && "Invalid access!"); 440 return CC.Val; 441 } 442 443 unsigned getCoproc() const { 444 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 445 return Cop.Val; 446 } 447 448 StringRef getToken() const { 449 assert(Kind == k_Token && "Invalid access!"); 450 return StringRef(Tok.Data, Tok.Length); 451 } 452 453 unsigned getReg() const { 454 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 455 return Reg.RegNum; 456 } 457 458 const SmallVectorImpl<unsigned> &getRegList() const { 459 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 460 Kind == k_SPRRegisterList) && "Invalid access!"); 461 return Registers; 462 } 463 464 const MCExpr *getImm() const { 465 assert(Kind == k_Immediate && "Invalid access!"); 466 return Imm.Val; 467 } 468 469 unsigned getFPImm() const { 470 assert(Kind == k_FPImmediate && "Invalid access!"); 471 return FPImm.Val; 472 } 473 474 unsigned getVectorIndex() const { 475 assert(Kind == k_VectorIndex && "Invalid access!"); 476 return VectorIndex.Val; 477 } 478 479 ARM_MB::MemBOpt getMemBarrierOpt() const { 480 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 481 return MBOpt.Val; 482 } 483 484 ARM_PROC::IFlags getProcIFlags() const { 485 assert(Kind == k_ProcIFlags && "Invalid access!"); 486 return IFlags.Val; 487 } 488 489 unsigned getMSRMask() const { 490 assert(Kind == k_MSRMask && "Invalid access!"); 491 return MMask.Val; 492 } 493 494 bool isCoprocNum() const { return Kind == k_CoprocNum; } 495 bool isCoprocReg() const { return Kind == k_CoprocReg; } 496 bool isCondCode() const { return Kind == k_CondCode; } 497 bool isCCOut() const { return Kind == k_CCOut; } 498 bool isITMask() const { return Kind == k_ITCondMask; } 499 bool isITCondCode() const { return Kind == k_CondCode; } 500 bool isImm() const { return Kind == k_Immediate; } 501 bool isFPImm() const { return Kind == k_FPImmediate; } 502 bool isImm8s4() const { 503 if (Kind != k_Immediate) 504 return false; 505 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 506 if (!CE) return false; 507 int64_t Value = CE->getValue(); 508 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 509 } 510 bool isImm0_1020s4() const { 511 if (Kind != k_Immediate) 512 return false; 513 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 514 if (!CE) return false; 515 int64_t Value = CE->getValue(); 516 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 517 } 518 bool isImm0_508s4() const { 519 if (Kind != k_Immediate) 520 return false; 521 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 522 if (!CE) return false; 523 int64_t Value = CE->getValue(); 524 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 525 } 526 bool isImm0_255() const { 527 if (Kind != k_Immediate) 528 return false; 529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 530 if (!CE) return false; 531 int64_t Value = CE->getValue(); 532 return Value >= 0 && Value < 256; 533 } 534 bool isImm0_7() const { 535 if (Kind != k_Immediate) 536 return false; 537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 538 if (!CE) return false; 539 int64_t Value = CE->getValue(); 540 return Value >= 0 && Value < 8; 541 } 542 bool isImm0_15() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return Value >= 0 && Value < 16; 549 } 550 bool isImm0_31() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return Value >= 0 && Value < 32; 557 } 558 bool isImm1_16() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return Value > 0 && Value < 17; 565 } 566 bool isImm1_32() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value > 0 && Value < 33; 573 } 574 bool isImm0_65535() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 65536; 581 } 582 bool isImm0_65535Expr() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 // If it's not a constant expression, it'll generate a fixup and be 587 // handled later. 588 if (!CE) return true; 589 int64_t Value = CE->getValue(); 590 return Value >= 0 && Value < 65536; 591 } 592 bool isImm24bit() const { 593 if (Kind != k_Immediate) 594 return false; 595 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 596 if (!CE) return false; 597 int64_t Value = CE->getValue(); 598 return Value >= 0 && Value <= 0xffffff; 599 } 600 bool isImmThumbSR() const { 601 if (Kind != k_Immediate) 602 return false; 603 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 604 if (!CE) return false; 605 int64_t Value = CE->getValue(); 606 return Value > 0 && Value < 33; 607 } 608 bool isPKHLSLImm() const { 609 if (Kind != k_Immediate) 610 return false; 611 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 612 if (!CE) return false; 613 int64_t Value = CE->getValue(); 614 return Value >= 0 && Value < 32; 615 } 616 bool isPKHASRImm() const { 617 if (Kind != k_Immediate) 618 return false; 619 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 620 if (!CE) return false; 621 int64_t Value = CE->getValue(); 622 return Value > 0 && Value <= 32; 623 } 624 bool isARMSOImm() const { 625 if (Kind != k_Immediate) 626 return false; 627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 628 if (!CE) return false; 629 int64_t Value = CE->getValue(); 630 return ARM_AM::getSOImmVal(Value) != -1; 631 } 632 bool isT2SOImm() const { 633 if (Kind != k_Immediate) 634 return false; 635 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 636 if (!CE) return false; 637 int64_t Value = CE->getValue(); 638 return ARM_AM::getT2SOImmVal(Value) != -1; 639 } 640 bool isSetEndImm() const { 641 if (Kind != k_Immediate) 642 return false; 643 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 644 if (!CE) return false; 645 int64_t Value = CE->getValue(); 646 return Value == 1 || Value == 0; 647 } 648 bool isReg() const { return Kind == k_Register; } 649 bool isRegList() const { return Kind == k_RegisterList; } 650 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 651 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 652 bool isToken() const { return Kind == k_Token; } 653 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 654 bool isMemory() const { return Kind == k_Memory; } 655 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 656 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 657 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 658 bool isRotImm() const { return Kind == k_RotateImmediate; } 659 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 660 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 661 bool isPostIdxReg() const { 662 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 663 } 664 bool isMemNoOffset() const { 665 if (Kind != k_Memory) 666 return false; 667 // No offset of any kind. 668 return Mem.OffsetRegNum == 0 && Mem.OffsetImm == 0; 669 } 670 bool isAddrMode2() const { 671 if (Kind != k_Memory) 672 return false; 673 // Check for register offset. 674 if (Mem.OffsetRegNum) return true; 675 // Immediate offset in range [-4095, 4095]. 676 if (!Mem.OffsetImm) return true; 677 int64_t Val = Mem.OffsetImm->getValue(); 678 return Val > -4096 && Val < 4096; 679 } 680 bool isAM2OffsetImm() const { 681 if (Kind != k_Immediate) 682 return false; 683 // Immediate offset in range [-4095, 4095]. 684 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 685 if (!CE) return false; 686 int64_t Val = CE->getValue(); 687 return Val > -4096 && Val < 4096; 688 } 689 bool isAddrMode3() const { 690 if (Kind != k_Memory) 691 return false; 692 // No shifts are legal for AM3. 693 if (Mem.ShiftType != ARM_AM::no_shift) return false; 694 // Check for register offset. 695 if (Mem.OffsetRegNum) return true; 696 // Immediate offset in range [-255, 255]. 697 if (!Mem.OffsetImm) return true; 698 int64_t Val = Mem.OffsetImm->getValue(); 699 return Val > -256 && Val < 256; 700 } 701 bool isAM3Offset() const { 702 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 703 return false; 704 if (Kind == k_PostIndexRegister) 705 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 706 // Immediate offset in range [-255, 255]. 707 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 708 if (!CE) return false; 709 int64_t Val = CE->getValue(); 710 // Special case, #-0 is INT32_MIN. 711 return (Val > -256 && Val < 256) || Val == INT32_MIN; 712 } 713 bool isAddrMode5() const { 714 if (Kind != k_Memory) 715 return false; 716 // Check for register offset. 717 if (Mem.OffsetRegNum) return false; 718 // Immediate offset in range [-1020, 1020] and a multiple of 4. 719 if (!Mem.OffsetImm) return true; 720 int64_t Val = Mem.OffsetImm->getValue(); 721 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 722 Val == INT32_MIN; 723 } 724 bool isMemTBB() const { 725 if (Kind != k_Memory || !Mem.OffsetRegNum || Mem.isNegative || 726 Mem.ShiftType != ARM_AM::no_shift) 727 return false; 728 return true; 729 } 730 bool isMemTBH() const { 731 if (Kind != k_Memory || !Mem.OffsetRegNum || Mem.isNegative || 732 Mem.ShiftType != ARM_AM::lsl || Mem.ShiftImm != 1) 733 return false; 734 return true; 735 } 736 bool isMemRegOffset() const { 737 if (Kind != k_Memory || !Mem.OffsetRegNum) 738 return false; 739 return true; 740 } 741 bool isT2MemRegOffset() const { 742 if (Kind != k_Memory || !Mem.OffsetRegNum || Mem.isNegative) 743 return false; 744 // Only lsl #{0, 1, 2, 3} allowed. 745 if (Mem.ShiftType == ARM_AM::no_shift) 746 return true; 747 if (Mem.ShiftType != ARM_AM::lsl || Mem.ShiftImm > 3) 748 return false; 749 return true; 750 } 751 bool isMemThumbRR() const { 752 // Thumb reg+reg addressing is simple. Just two registers, a base and 753 // an offset. No shifts, negations or any other complicating factors. 754 if (Kind != k_Memory || !Mem.OffsetRegNum || Mem.isNegative || 755 Mem.ShiftType != ARM_AM::no_shift) 756 return false; 757 return isARMLowRegister(Mem.BaseRegNum) && 758 (!Mem.OffsetRegNum || isARMLowRegister(Mem.OffsetRegNum)); 759 } 760 bool isMemThumbRIs4() const { 761 if (Kind != k_Memory || Mem.OffsetRegNum != 0 || 762 !isARMLowRegister(Mem.BaseRegNum)) 763 return false; 764 // Immediate offset, multiple of 4 in range [0, 124]. 765 if (!Mem.OffsetImm) return true; 766 int64_t Val = Mem.OffsetImm->getValue(); 767 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 768 } 769 bool isMemThumbRIs2() const { 770 if (Kind != k_Memory || Mem.OffsetRegNum != 0 || 771 !isARMLowRegister(Mem.BaseRegNum)) 772 return false; 773 // Immediate offset, multiple of 4 in range [0, 62]. 774 if (!Mem.OffsetImm) return true; 775 int64_t Val = Mem.OffsetImm->getValue(); 776 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 777 } 778 bool isMemThumbRIs1() const { 779 if (Kind != k_Memory || Mem.OffsetRegNum != 0 || 780 !isARMLowRegister(Mem.BaseRegNum)) 781 return false; 782 // Immediate offset in range [0, 31]. 783 if (!Mem.OffsetImm) return true; 784 int64_t Val = Mem.OffsetImm->getValue(); 785 return Val >= 0 && Val <= 31; 786 } 787 bool isMemThumbSPI() const { 788 if (Kind != k_Memory || Mem.OffsetRegNum != 0 || Mem.BaseRegNum != ARM::SP) 789 return false; 790 // Immediate offset, multiple of 4 in range [0, 1020]. 791 if (!Mem.OffsetImm) return true; 792 int64_t Val = Mem.OffsetImm->getValue(); 793 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 794 } 795 bool isMemImm8s4Offset() const { 796 if (Kind != k_Memory || Mem.OffsetRegNum != 0) 797 return false; 798 // Immediate offset a multiple of 4 in range [-1020, 1020]. 799 if (!Mem.OffsetImm) return true; 800 int64_t Val = Mem.OffsetImm->getValue(); 801 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 802 } 803 bool isMemImm0_1020s4Offset() const { 804 if (Kind != k_Memory || Mem.OffsetRegNum != 0) 805 return false; 806 // Immediate offset a multiple of 4 in range [0, 1020]. 807 if (!Mem.OffsetImm) return true; 808 int64_t Val = Mem.OffsetImm->getValue(); 809 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 810 } 811 bool isMemImm8Offset() const { 812 if (Kind != k_Memory || Mem.OffsetRegNum != 0) 813 return false; 814 // Immediate offset in range [-255, 255]. 815 if (!Mem.OffsetImm) return true; 816 int64_t Val = Mem.OffsetImm->getValue(); 817 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 818 } 819 bool isMemPosImm8Offset() const { 820 if (Kind != k_Memory || Mem.OffsetRegNum != 0) 821 return false; 822 // Immediate offset in range [0, 255]. 823 if (!Mem.OffsetImm) return true; 824 int64_t Val = Mem.OffsetImm->getValue(); 825 return Val >= 0 && Val < 256; 826 } 827 bool isMemNegImm8Offset() const { 828 if (Kind != k_Memory || Mem.OffsetRegNum != 0) 829 return false; 830 // Immediate offset in range [-255, -1]. 831 if (!Mem.OffsetImm) return true; 832 int64_t Val = Mem.OffsetImm->getValue(); 833 return Val > -256 && Val < 0; 834 } 835 bool isMemUImm12Offset() const { 836 // If we have an immediate that's not a constant, treat it as a label 837 // reference needing a fixup. If it is a constant, it's something else 838 // and we reject it. 839 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 840 return true; 841 842 if (Kind != k_Memory || Mem.OffsetRegNum != 0) 843 return false; 844 // Immediate offset in range [0, 4095]. 845 if (!Mem.OffsetImm) return true; 846 int64_t Val = Mem.OffsetImm->getValue(); 847 return (Val >= 0 && Val < 4096); 848 } 849 bool isMemImm12Offset() const { 850 // If we have an immediate that's not a constant, treat it as a label 851 // reference needing a fixup. If it is a constant, it's something else 852 // and we reject it. 853 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 854 return true; 855 856 if (Kind != k_Memory || Mem.OffsetRegNum != 0) 857 return false; 858 // Immediate offset in range [-4095, 4095]. 859 if (!Mem.OffsetImm) return true; 860 int64_t Val = Mem.OffsetImm->getValue(); 861 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 862 } 863 bool isPostIdxImm8() const { 864 if (Kind != k_Immediate) 865 return false; 866 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 867 if (!CE) return false; 868 int64_t Val = CE->getValue(); 869 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 870 } 871 872 bool isMSRMask() const { return Kind == k_MSRMask; } 873 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 874 875 bool isVectorIndex8() const { 876 if (Kind != k_VectorIndex) return false; 877 return VectorIndex.Val < 8; 878 } 879 bool isVectorIndex16() const { 880 if (Kind != k_VectorIndex) return false; 881 return VectorIndex.Val < 4; 882 } 883 bool isVectorIndex32() const { 884 if (Kind != k_VectorIndex) return false; 885 return VectorIndex.Val < 2; 886 } 887 888 889 890 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 891 // Add as immediates when possible. Null MCExpr = 0. 892 if (Expr == 0) 893 Inst.addOperand(MCOperand::CreateImm(0)); 894 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 895 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 896 else 897 Inst.addOperand(MCOperand::CreateExpr(Expr)); 898 } 899 900 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 901 assert(N == 2 && "Invalid number of operands!"); 902 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 903 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 904 Inst.addOperand(MCOperand::CreateReg(RegNum)); 905 } 906 907 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 908 assert(N == 1 && "Invalid number of operands!"); 909 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 910 } 911 912 void addITMaskOperands(MCInst &Inst, unsigned N) const { 913 assert(N == 1 && "Invalid number of operands!"); 914 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 915 } 916 917 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 918 assert(N == 1 && "Invalid number of operands!"); 919 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 920 } 921 922 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 923 assert(N == 1 && "Invalid number of operands!"); 924 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 925 } 926 927 void addCCOutOperands(MCInst &Inst, unsigned N) const { 928 assert(N == 1 && "Invalid number of operands!"); 929 Inst.addOperand(MCOperand::CreateReg(getReg())); 930 } 931 932 void addRegOperands(MCInst &Inst, unsigned N) const { 933 assert(N == 1 && "Invalid number of operands!"); 934 Inst.addOperand(MCOperand::CreateReg(getReg())); 935 } 936 937 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 938 assert(N == 3 && "Invalid number of operands!"); 939 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 940 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 941 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 942 Inst.addOperand(MCOperand::CreateImm( 943 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 944 } 945 946 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 947 assert(N == 2 && "Invalid number of operands!"); 948 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 949 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 950 Inst.addOperand(MCOperand::CreateImm( 951 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 952 } 953 954 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 955 assert(N == 1 && "Invalid number of operands!"); 956 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 957 ShifterImm.Imm)); 958 } 959 960 void addRegListOperands(MCInst &Inst, unsigned N) const { 961 assert(N == 1 && "Invalid number of operands!"); 962 const SmallVectorImpl<unsigned> &RegList = getRegList(); 963 for (SmallVectorImpl<unsigned>::const_iterator 964 I = RegList.begin(), E = RegList.end(); I != E; ++I) 965 Inst.addOperand(MCOperand::CreateReg(*I)); 966 } 967 968 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 969 addRegListOperands(Inst, N); 970 } 971 972 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 973 addRegListOperands(Inst, N); 974 } 975 976 void addRotImmOperands(MCInst &Inst, unsigned N) const { 977 assert(N == 1 && "Invalid number of operands!"); 978 // Encoded as val>>3. The printer handles display as 8, 16, 24. 979 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 980 } 981 982 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 983 assert(N == 1 && "Invalid number of operands!"); 984 // Munge the lsb/width into a bitfield mask. 985 unsigned lsb = Bitfield.LSB; 986 unsigned width = Bitfield.Width; 987 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 988 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 989 (32 - (lsb + width))); 990 Inst.addOperand(MCOperand::CreateImm(Mask)); 991 } 992 993 void addImmOperands(MCInst &Inst, unsigned N) const { 994 assert(N == 1 && "Invalid number of operands!"); 995 addExpr(Inst, getImm()); 996 } 997 998 void addFPImmOperands(MCInst &Inst, unsigned N) const { 999 assert(N == 1 && "Invalid number of operands!"); 1000 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1001 } 1002 1003 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1004 assert(N == 1 && "Invalid number of operands!"); 1005 // FIXME: We really want to scale the value here, but the LDRD/STRD 1006 // instruction don't encode operands that way yet. 1007 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1008 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1009 } 1010 1011 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1012 assert(N == 1 && "Invalid number of operands!"); 1013 // The immediate is scaled by four in the encoding and is stored 1014 // in the MCInst as such. Lop off the low two bits here. 1015 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1016 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1017 } 1018 1019 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1020 assert(N == 1 && "Invalid number of operands!"); 1021 // The immediate is scaled by four in the encoding and is stored 1022 // in the MCInst as such. Lop off the low two bits here. 1023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1024 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1025 } 1026 1027 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1028 assert(N == 1 && "Invalid number of operands!"); 1029 addExpr(Inst, getImm()); 1030 } 1031 1032 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1033 assert(N == 1 && "Invalid number of operands!"); 1034 addExpr(Inst, getImm()); 1035 } 1036 1037 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1038 assert(N == 1 && "Invalid number of operands!"); 1039 addExpr(Inst, getImm()); 1040 } 1041 1042 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1043 assert(N == 1 && "Invalid number of operands!"); 1044 addExpr(Inst, getImm()); 1045 } 1046 1047 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1048 assert(N == 1 && "Invalid number of operands!"); 1049 // The constant encodes as the immediate-1, and we store in the instruction 1050 // the bits as encoded, so subtract off one here. 1051 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1052 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1053 } 1054 1055 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1056 assert(N == 1 && "Invalid number of operands!"); 1057 // The constant encodes as the immediate-1, and we store in the instruction 1058 // the bits as encoded, so subtract off one here. 1059 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1060 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1061 } 1062 1063 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1064 assert(N == 1 && "Invalid number of operands!"); 1065 addExpr(Inst, getImm()); 1066 } 1067 1068 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1069 assert(N == 1 && "Invalid number of operands!"); 1070 addExpr(Inst, getImm()); 1071 } 1072 1073 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1074 assert(N == 1 && "Invalid number of operands!"); 1075 addExpr(Inst, getImm()); 1076 } 1077 1078 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1079 assert(N == 1 && "Invalid number of operands!"); 1080 // The constant encodes as the immediate, except for 32, which encodes as 1081 // zero. 1082 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1083 unsigned Imm = CE->getValue(); 1084 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1085 } 1086 1087 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1088 assert(N == 1 && "Invalid number of operands!"); 1089 addExpr(Inst, getImm()); 1090 } 1091 1092 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1093 assert(N == 1 && "Invalid number of operands!"); 1094 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1095 // the instruction as well. 1096 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1097 int Val = CE->getValue(); 1098 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1099 } 1100 1101 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1102 assert(N == 1 && "Invalid number of operands!"); 1103 addExpr(Inst, getImm()); 1104 } 1105 1106 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1107 assert(N == 1 && "Invalid number of operands!"); 1108 addExpr(Inst, getImm()); 1109 } 1110 1111 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1112 assert(N == 1 && "Invalid number of operands!"); 1113 addExpr(Inst, getImm()); 1114 } 1115 1116 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1117 assert(N == 1 && "Invalid number of operands!"); 1118 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1119 } 1120 1121 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1122 assert(N == 1 && "Invalid number of operands!"); 1123 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1124 } 1125 1126 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1127 assert(N == 3 && "Invalid number of operands!"); 1128 int32_t Val = Mem.OffsetImm ? Mem.OffsetImm->getValue() : 0; 1129 if (!Mem.OffsetRegNum) { 1130 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1131 // Special case for #-0 1132 if (Val == INT32_MIN) Val = 0; 1133 if (Val < 0) Val = -Val; 1134 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1135 } else { 1136 // For register offset, we encode the shift type and negation flag 1137 // here. 1138 Val = ARM_AM::getAM2Opc(Mem.isNegative ? ARM_AM::sub : ARM_AM::add, 1139 Mem.ShiftImm, Mem.ShiftType); 1140 } 1141 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1142 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum)); 1143 Inst.addOperand(MCOperand::CreateImm(Val)); 1144 } 1145 1146 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1147 assert(N == 2 && "Invalid number of operands!"); 1148 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1149 assert(CE && "non-constant AM2OffsetImm operand!"); 1150 int32_t Val = CE->getValue(); 1151 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1152 // Special case for #-0 1153 if (Val == INT32_MIN) Val = 0; 1154 if (Val < 0) Val = -Val; 1155 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1156 Inst.addOperand(MCOperand::CreateReg(0)); 1157 Inst.addOperand(MCOperand::CreateImm(Val)); 1158 } 1159 1160 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1161 assert(N == 3 && "Invalid number of operands!"); 1162 int32_t Val = Mem.OffsetImm ? Mem.OffsetImm->getValue() : 0; 1163 if (!Mem.OffsetRegNum) { 1164 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1165 // Special case for #-0 1166 if (Val == INT32_MIN) Val = 0; 1167 if (Val < 0) Val = -Val; 1168 Val = ARM_AM::getAM3Opc(AddSub, Val); 1169 } else { 1170 // For register offset, we encode the shift type and negation flag 1171 // here. 1172 Val = ARM_AM::getAM3Opc(Mem.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1173 } 1174 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1175 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum)); 1176 Inst.addOperand(MCOperand::CreateImm(Val)); 1177 } 1178 1179 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1180 assert(N == 2 && "Invalid number of operands!"); 1181 if (Kind == k_PostIndexRegister) { 1182 int32_t Val = 1183 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1184 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1185 Inst.addOperand(MCOperand::CreateImm(Val)); 1186 return; 1187 } 1188 1189 // Constant offset. 1190 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1191 int32_t Val = CE->getValue(); 1192 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1193 // Special case for #-0 1194 if (Val == INT32_MIN) Val = 0; 1195 if (Val < 0) Val = -Val; 1196 Val = ARM_AM::getAM3Opc(AddSub, Val); 1197 Inst.addOperand(MCOperand::CreateReg(0)); 1198 Inst.addOperand(MCOperand::CreateImm(Val)); 1199 } 1200 1201 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1202 assert(N == 2 && "Invalid number of operands!"); 1203 // The lower two bits are always zero and as such are not encoded. 1204 int32_t Val = Mem.OffsetImm ? Mem.OffsetImm->getValue() / 4 : 0; 1205 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1206 // Special case for #-0 1207 if (Val == INT32_MIN) Val = 0; 1208 if (Val < 0) Val = -Val; 1209 Val = ARM_AM::getAM5Opc(AddSub, Val); 1210 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1211 Inst.addOperand(MCOperand::CreateImm(Val)); 1212 } 1213 1214 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1215 assert(N == 2 && "Invalid number of operands!"); 1216 int64_t Val = Mem.OffsetImm ? Mem.OffsetImm->getValue() : 0; 1217 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1218 Inst.addOperand(MCOperand::CreateImm(Val)); 1219 } 1220 1221 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1222 assert(N == 2 && "Invalid number of operands!"); 1223 // The lower two bits are always zero and as such are not encoded. 1224 int32_t Val = Mem.OffsetImm ? Mem.OffsetImm->getValue() / 4 : 0; 1225 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1226 Inst.addOperand(MCOperand::CreateImm(Val)); 1227 } 1228 1229 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1230 assert(N == 2 && "Invalid number of operands!"); 1231 int64_t Val = Mem.OffsetImm ? Mem.OffsetImm->getValue() : 0; 1232 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1233 Inst.addOperand(MCOperand::CreateImm(Val)); 1234 } 1235 1236 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1237 addMemImm8OffsetOperands(Inst, N); 1238 } 1239 1240 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1241 addMemImm8OffsetOperands(Inst, N); 1242 } 1243 1244 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1245 assert(N == 2 && "Invalid number of operands!"); 1246 // If this is an immediate, it's a label reference. 1247 if (Kind == k_Immediate) { 1248 addExpr(Inst, getImm()); 1249 Inst.addOperand(MCOperand::CreateImm(0)); 1250 return; 1251 } 1252 1253 // Otherwise, it's a normal memory reg+offset. 1254 int64_t Val = Mem.OffsetImm ? Mem.OffsetImm->getValue() : 0; 1255 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1256 Inst.addOperand(MCOperand::CreateImm(Val)); 1257 } 1258 1259 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1260 assert(N == 2 && "Invalid number of operands!"); 1261 // If this is an immediate, it's a label reference. 1262 if (Kind == k_Immediate) { 1263 addExpr(Inst, getImm()); 1264 Inst.addOperand(MCOperand::CreateImm(0)); 1265 return; 1266 } 1267 1268 // Otherwise, it's a normal memory reg+offset. 1269 int64_t Val = Mem.OffsetImm ? Mem.OffsetImm->getValue() : 0; 1270 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1271 Inst.addOperand(MCOperand::CreateImm(Val)); 1272 } 1273 1274 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1275 assert(N == 2 && "Invalid number of operands!"); 1276 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1277 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum)); 1278 } 1279 1280 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1281 assert(N == 2 && "Invalid number of operands!"); 1282 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1283 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum)); 1284 } 1285 1286 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1287 assert(N == 3 && "Invalid number of operands!"); 1288 unsigned Val = ARM_AM::getAM2Opc(Mem.isNegative ? ARM_AM::sub : ARM_AM::add, 1289 Mem.ShiftImm, Mem.ShiftType); 1290 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1291 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum)); 1292 Inst.addOperand(MCOperand::CreateImm(Val)); 1293 } 1294 1295 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1296 assert(N == 3 && "Invalid number of operands!"); 1297 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1298 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum)); 1299 Inst.addOperand(MCOperand::CreateImm(Mem.ShiftImm)); 1300 } 1301 1302 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1303 assert(N == 2 && "Invalid number of operands!"); 1304 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1305 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum)); 1306 } 1307 1308 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1309 assert(N == 2 && "Invalid number of operands!"); 1310 int64_t Val = Mem.OffsetImm ? (Mem.OffsetImm->getValue() / 4) : 0; 1311 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1312 Inst.addOperand(MCOperand::CreateImm(Val)); 1313 } 1314 1315 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1316 assert(N == 2 && "Invalid number of operands!"); 1317 int64_t Val = Mem.OffsetImm ? (Mem.OffsetImm->getValue() / 2) : 0; 1318 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1319 Inst.addOperand(MCOperand::CreateImm(Val)); 1320 } 1321 1322 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1323 assert(N == 2 && "Invalid number of operands!"); 1324 int64_t Val = Mem.OffsetImm ? (Mem.OffsetImm->getValue()) : 0; 1325 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1326 Inst.addOperand(MCOperand::CreateImm(Val)); 1327 } 1328 1329 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1330 assert(N == 2 && "Invalid number of operands!"); 1331 int64_t Val = Mem.OffsetImm ? (Mem.OffsetImm->getValue() / 4) : 0; 1332 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); 1333 Inst.addOperand(MCOperand::CreateImm(Val)); 1334 } 1335 1336 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1337 assert(N == 1 && "Invalid number of operands!"); 1338 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1339 assert(CE && "non-constant post-idx-imm8 operand!"); 1340 int Imm = CE->getValue(); 1341 bool isAdd = Imm >= 0; 1342 if (Imm == INT32_MIN) Imm = 0; 1343 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1344 Inst.addOperand(MCOperand::CreateImm(Imm)); 1345 } 1346 1347 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1348 assert(N == 2 && "Invalid number of operands!"); 1349 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1350 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1351 } 1352 1353 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1354 assert(N == 2 && "Invalid number of operands!"); 1355 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1356 // The sign, shift type, and shift amount are encoded in a single operand 1357 // using the AM2 encoding helpers. 1358 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1359 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1360 PostIdxReg.ShiftTy); 1361 Inst.addOperand(MCOperand::CreateImm(Imm)); 1362 } 1363 1364 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1365 assert(N == 1 && "Invalid number of operands!"); 1366 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1367 } 1368 1369 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1370 assert(N == 1 && "Invalid number of operands!"); 1371 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1372 } 1373 1374 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1375 assert(N == 1 && "Invalid number of operands!"); 1376 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1377 } 1378 1379 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1380 assert(N == 1 && "Invalid number of operands!"); 1381 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1382 } 1383 1384 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1385 assert(N == 1 && "Invalid number of operands!"); 1386 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1387 } 1388 1389 virtual void print(raw_ostream &OS) const; 1390 1391 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1392 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1393 Op->ITMask.Mask = Mask; 1394 Op->StartLoc = S; 1395 Op->EndLoc = S; 1396 return Op; 1397 } 1398 1399 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1400 ARMOperand *Op = new ARMOperand(k_CondCode); 1401 Op->CC.Val = CC; 1402 Op->StartLoc = S; 1403 Op->EndLoc = S; 1404 return Op; 1405 } 1406 1407 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1408 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1409 Op->Cop.Val = CopVal; 1410 Op->StartLoc = S; 1411 Op->EndLoc = S; 1412 return Op; 1413 } 1414 1415 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1416 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1417 Op->Cop.Val = CopVal; 1418 Op->StartLoc = S; 1419 Op->EndLoc = S; 1420 return Op; 1421 } 1422 1423 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1424 ARMOperand *Op = new ARMOperand(k_CCOut); 1425 Op->Reg.RegNum = RegNum; 1426 Op->StartLoc = S; 1427 Op->EndLoc = S; 1428 return Op; 1429 } 1430 1431 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1432 ARMOperand *Op = new ARMOperand(k_Token); 1433 Op->Tok.Data = Str.data(); 1434 Op->Tok.Length = Str.size(); 1435 Op->StartLoc = S; 1436 Op->EndLoc = S; 1437 return Op; 1438 } 1439 1440 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1441 ARMOperand *Op = new ARMOperand(k_Register); 1442 Op->Reg.RegNum = RegNum; 1443 Op->StartLoc = S; 1444 Op->EndLoc = E; 1445 return Op; 1446 } 1447 1448 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1449 unsigned SrcReg, 1450 unsigned ShiftReg, 1451 unsigned ShiftImm, 1452 SMLoc S, SMLoc E) { 1453 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1454 Op->RegShiftedReg.ShiftTy = ShTy; 1455 Op->RegShiftedReg.SrcReg = SrcReg; 1456 Op->RegShiftedReg.ShiftReg = ShiftReg; 1457 Op->RegShiftedReg.ShiftImm = ShiftImm; 1458 Op->StartLoc = S; 1459 Op->EndLoc = E; 1460 return Op; 1461 } 1462 1463 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1464 unsigned SrcReg, 1465 unsigned ShiftImm, 1466 SMLoc S, SMLoc E) { 1467 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1468 Op->RegShiftedImm.ShiftTy = ShTy; 1469 Op->RegShiftedImm.SrcReg = SrcReg; 1470 Op->RegShiftedImm.ShiftImm = ShiftImm; 1471 Op->StartLoc = S; 1472 Op->EndLoc = E; 1473 return Op; 1474 } 1475 1476 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1477 SMLoc S, SMLoc E) { 1478 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1479 Op->ShifterImm.isASR = isASR; 1480 Op->ShifterImm.Imm = Imm; 1481 Op->StartLoc = S; 1482 Op->EndLoc = E; 1483 return Op; 1484 } 1485 1486 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1487 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1488 Op->RotImm.Imm = Imm; 1489 Op->StartLoc = S; 1490 Op->EndLoc = E; 1491 return Op; 1492 } 1493 1494 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1495 SMLoc S, SMLoc E) { 1496 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1497 Op->Bitfield.LSB = LSB; 1498 Op->Bitfield.Width = Width; 1499 Op->StartLoc = S; 1500 Op->EndLoc = E; 1501 return Op; 1502 } 1503 1504 static ARMOperand * 1505 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1506 SMLoc StartLoc, SMLoc EndLoc) { 1507 KindTy Kind = k_RegisterList; 1508 1509 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1510 Kind = k_DPRRegisterList; 1511 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1512 contains(Regs.front().first)) 1513 Kind = k_SPRRegisterList; 1514 1515 ARMOperand *Op = new ARMOperand(Kind); 1516 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1517 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1518 Op->Registers.push_back(I->first); 1519 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1520 Op->StartLoc = StartLoc; 1521 Op->EndLoc = EndLoc; 1522 return Op; 1523 } 1524 1525 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1526 MCContext &Ctx) { 1527 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1528 Op->VectorIndex.Val = Idx; 1529 Op->StartLoc = S; 1530 Op->EndLoc = E; 1531 return Op; 1532 } 1533 1534 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1535 ARMOperand *Op = new ARMOperand(k_Immediate); 1536 Op->Imm.Val = Val; 1537 Op->StartLoc = S; 1538 Op->EndLoc = E; 1539 return Op; 1540 } 1541 1542 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1543 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1544 Op->FPImm.Val = Val; 1545 Op->StartLoc = S; 1546 Op->EndLoc = S; 1547 return Op; 1548 } 1549 1550 static ARMOperand *CreateMem(unsigned BaseRegNum, 1551 const MCConstantExpr *OffsetImm, 1552 unsigned OffsetRegNum, 1553 ARM_AM::ShiftOpc ShiftType, 1554 unsigned ShiftImm, 1555 bool isNegative, 1556 SMLoc S, SMLoc E) { 1557 ARMOperand *Op = new ARMOperand(k_Memory); 1558 Op->Mem.BaseRegNum = BaseRegNum; 1559 Op->Mem.OffsetImm = OffsetImm; 1560 Op->Mem.OffsetRegNum = OffsetRegNum; 1561 Op->Mem.ShiftType = ShiftType; 1562 Op->Mem.ShiftImm = ShiftImm; 1563 Op->Mem.isNegative = isNegative; 1564 Op->StartLoc = S; 1565 Op->EndLoc = E; 1566 return Op; 1567 } 1568 1569 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1570 ARM_AM::ShiftOpc ShiftTy, 1571 unsigned ShiftImm, 1572 SMLoc S, SMLoc E) { 1573 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1574 Op->PostIdxReg.RegNum = RegNum; 1575 Op->PostIdxReg.isAdd = isAdd; 1576 Op->PostIdxReg.ShiftTy = ShiftTy; 1577 Op->PostIdxReg.ShiftImm = ShiftImm; 1578 Op->StartLoc = S; 1579 Op->EndLoc = E; 1580 return Op; 1581 } 1582 1583 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1584 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1585 Op->MBOpt.Val = Opt; 1586 Op->StartLoc = S; 1587 Op->EndLoc = S; 1588 return Op; 1589 } 1590 1591 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1592 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1593 Op->IFlags.Val = IFlags; 1594 Op->StartLoc = S; 1595 Op->EndLoc = S; 1596 return Op; 1597 } 1598 1599 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1600 ARMOperand *Op = new ARMOperand(k_MSRMask); 1601 Op->MMask.Val = MMask; 1602 Op->StartLoc = S; 1603 Op->EndLoc = S; 1604 return Op; 1605 } 1606}; 1607 1608} // end anonymous namespace. 1609 1610void ARMOperand::print(raw_ostream &OS) const { 1611 switch (Kind) { 1612 case k_FPImmediate: 1613 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1614 << ") >"; 1615 break; 1616 case k_CondCode: 1617 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1618 break; 1619 case k_CCOut: 1620 OS << "<ccout " << getReg() << ">"; 1621 break; 1622 case k_ITCondMask: { 1623 static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)", 1624 "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)", 1625 "(tee)", "(eee)" }; 1626 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1627 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1628 break; 1629 } 1630 case k_CoprocNum: 1631 OS << "<coprocessor number: " << getCoproc() << ">"; 1632 break; 1633 case k_CoprocReg: 1634 OS << "<coprocessor register: " << getCoproc() << ">"; 1635 break; 1636 case k_MSRMask: 1637 OS << "<mask: " << getMSRMask() << ">"; 1638 break; 1639 case k_Immediate: 1640 getImm()->print(OS); 1641 break; 1642 case k_MemBarrierOpt: 1643 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1644 break; 1645 case k_Memory: 1646 OS << "<memory " 1647 << " base:" << Mem.BaseRegNum; 1648 OS << ">"; 1649 break; 1650 case k_PostIndexRegister: 1651 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1652 << PostIdxReg.RegNum; 1653 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1654 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1655 << PostIdxReg.ShiftImm; 1656 OS << ">"; 1657 break; 1658 case k_ProcIFlags: { 1659 OS << "<ARM_PROC::"; 1660 unsigned IFlags = getProcIFlags(); 1661 for (int i=2; i >= 0; --i) 1662 if (IFlags & (1 << i)) 1663 OS << ARM_PROC::IFlagsToString(1 << i); 1664 OS << ">"; 1665 break; 1666 } 1667 case k_Register: 1668 OS << "<register " << getReg() << ">"; 1669 break; 1670 case k_ShifterImmediate: 1671 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1672 << " #" << ShifterImm.Imm << ">"; 1673 break; 1674 case k_ShiftedRegister: 1675 OS << "<so_reg_reg " 1676 << RegShiftedReg.SrcReg 1677 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1678 << ", " << RegShiftedReg.ShiftReg << ", " 1679 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1680 << ">"; 1681 break; 1682 case k_ShiftedImmediate: 1683 OS << "<so_reg_imm " 1684 << RegShiftedImm.SrcReg 1685 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1686 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1687 << ">"; 1688 break; 1689 case k_RotateImmediate: 1690 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1691 break; 1692 case k_BitfieldDescriptor: 1693 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1694 << ", width: " << Bitfield.Width << ">"; 1695 break; 1696 case k_RegisterList: 1697 case k_DPRRegisterList: 1698 case k_SPRRegisterList: { 1699 OS << "<register_list "; 1700 1701 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1702 for (SmallVectorImpl<unsigned>::const_iterator 1703 I = RegList.begin(), E = RegList.end(); I != E; ) { 1704 OS << *I; 1705 if (++I < E) OS << ", "; 1706 } 1707 1708 OS << ">"; 1709 break; 1710 } 1711 case k_Token: 1712 OS << "'" << getToken() << "'"; 1713 break; 1714 case k_VectorIndex: 1715 OS << "<vectorindex " << getVectorIndex() << ">"; 1716 break; 1717 } 1718} 1719 1720/// @name Auto-generated Match Functions 1721/// { 1722 1723static unsigned MatchRegisterName(StringRef Name); 1724 1725/// } 1726 1727bool ARMAsmParser::ParseRegister(unsigned &RegNo, 1728 SMLoc &StartLoc, SMLoc &EndLoc) { 1729 RegNo = tryParseRegister(); 1730 1731 return (RegNo == (unsigned)-1); 1732} 1733 1734/// Try to parse a register name. The token must be an Identifier when called, 1735/// and if it is a register name the token is eaten and the register number is 1736/// returned. Otherwise return -1. 1737/// 1738int ARMAsmParser::tryParseRegister() { 1739 const AsmToken &Tok = Parser.getTok(); 1740 if (Tok.isNot(AsmToken::Identifier)) return -1; 1741 1742 // FIXME: Validate register for the current architecture; we have to do 1743 // validation later, so maybe there is no need for this here. 1744 std::string upperCase = Tok.getString().str(); 1745 std::string lowerCase = LowercaseString(upperCase); 1746 unsigned RegNum = MatchRegisterName(lowerCase); 1747 if (!RegNum) { 1748 RegNum = StringSwitch<unsigned>(lowerCase) 1749 .Case("r13", ARM::SP) 1750 .Case("r14", ARM::LR) 1751 .Case("r15", ARM::PC) 1752 .Case("ip", ARM::R12) 1753 .Default(0); 1754 } 1755 if (!RegNum) return -1; 1756 1757 Parser.Lex(); // Eat identifier token. 1758 1759#if 0 1760 // Also check for an index operand. This is only legal for vector registers, 1761 // but that'll get caught OK in operand matching, so we don't need to 1762 // explicitly filter everything else out here. 1763 if (Parser.getTok().is(AsmToken::LBrac)) { 1764 SMLoc SIdx = Parser.getTok().getLoc(); 1765 Parser.Lex(); // Eat left bracket token. 1766 1767 const MCExpr *ImmVal; 1768 SMLoc ExprLoc = Parser.getTok().getLoc(); 1769 if (getParser().ParseExpression(ImmVal)) 1770 return MatchOperand_ParseFail; 1771 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 1772 if (!MCE) { 1773 TokError("immediate value expected for vector index"); 1774 return MatchOperand_ParseFail; 1775 } 1776 1777 SMLoc E = Parser.getTok().getLoc(); 1778 if (Parser.getTok().isNot(AsmToken::RBrac)) { 1779 Error(E, "']' expected"); 1780 return MatchOperand_ParseFail; 1781 } 1782 1783 Parser.Lex(); // Eat right bracket token. 1784 1785 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 1786 SIdx, E, 1787 getContext())); 1788 } 1789#endif 1790 1791 return RegNum; 1792} 1793 1794// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 1795// If a recoverable error occurs, return 1. If an irrecoverable error 1796// occurs, return -1. An irrecoverable error is one where tokens have been 1797// consumed in the process of trying to parse the shifter (i.e., when it is 1798// indeed a shifter operand, but malformed). 1799int ARMAsmParser::tryParseShiftRegister( 1800 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1801 SMLoc S = Parser.getTok().getLoc(); 1802 const AsmToken &Tok = Parser.getTok(); 1803 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 1804 1805 std::string upperCase = Tok.getString().str(); 1806 std::string lowerCase = LowercaseString(upperCase); 1807 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 1808 .Case("lsl", ARM_AM::lsl) 1809 .Case("lsr", ARM_AM::lsr) 1810 .Case("asr", ARM_AM::asr) 1811 .Case("ror", ARM_AM::ror) 1812 .Case("rrx", ARM_AM::rrx) 1813 .Default(ARM_AM::no_shift); 1814 1815 if (ShiftTy == ARM_AM::no_shift) 1816 return 1; 1817 1818 Parser.Lex(); // Eat the operator. 1819 1820 // The source register for the shift has already been added to the 1821 // operand list, so we need to pop it off and combine it into the shifted 1822 // register operand instead. 1823 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 1824 if (!PrevOp->isReg()) 1825 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 1826 int SrcReg = PrevOp->getReg(); 1827 int64_t Imm = 0; 1828 int ShiftReg = 0; 1829 if (ShiftTy == ARM_AM::rrx) { 1830 // RRX Doesn't have an explicit shift amount. The encoder expects 1831 // the shift register to be the same as the source register. Seems odd, 1832 // but OK. 1833 ShiftReg = SrcReg; 1834 } else { 1835 // Figure out if this is shifted by a constant or a register (for non-RRX). 1836 if (Parser.getTok().is(AsmToken::Hash)) { 1837 Parser.Lex(); // Eat hash. 1838 SMLoc ImmLoc = Parser.getTok().getLoc(); 1839 const MCExpr *ShiftExpr = 0; 1840 if (getParser().ParseExpression(ShiftExpr)) { 1841 Error(ImmLoc, "invalid immediate shift value"); 1842 return -1; 1843 } 1844 // The expression must be evaluatable as an immediate. 1845 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 1846 if (!CE) { 1847 Error(ImmLoc, "invalid immediate shift value"); 1848 return -1; 1849 } 1850 // Range check the immediate. 1851 // lsl, ror: 0 <= imm <= 31 1852 // lsr, asr: 0 <= imm <= 32 1853 Imm = CE->getValue(); 1854 if (Imm < 0 || 1855 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 1856 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 1857 Error(ImmLoc, "immediate shift value out of range"); 1858 return -1; 1859 } 1860 } else if (Parser.getTok().is(AsmToken::Identifier)) { 1861 ShiftReg = tryParseRegister(); 1862 SMLoc L = Parser.getTok().getLoc(); 1863 if (ShiftReg == -1) { 1864 Error (L, "expected immediate or register in shift operand"); 1865 return -1; 1866 } 1867 } else { 1868 Error (Parser.getTok().getLoc(), 1869 "expected immediate or register in shift operand"); 1870 return -1; 1871 } 1872 } 1873 1874 if (ShiftReg && ShiftTy != ARM_AM::rrx) 1875 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 1876 ShiftReg, Imm, 1877 S, Parser.getTok().getLoc())); 1878 else 1879 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 1880 S, Parser.getTok().getLoc())); 1881 1882 return 0; 1883} 1884 1885 1886/// Try to parse a register name. The token must be an Identifier when called. 1887/// If it's a register, an AsmOperand is created. Another AsmOperand is created 1888/// if there is a "writeback". 'true' if it's not a register. 1889/// 1890/// TODO this is likely to change to allow different register types and or to 1891/// parse for a specific register type. 1892bool ARMAsmParser:: 1893tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1894 SMLoc S = Parser.getTok().getLoc(); 1895 int RegNo = tryParseRegister(); 1896 if (RegNo == -1) 1897 return true; 1898 1899 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 1900 1901 const AsmToken &ExclaimTok = Parser.getTok(); 1902 if (ExclaimTok.is(AsmToken::Exclaim)) { 1903 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 1904 ExclaimTok.getLoc())); 1905 Parser.Lex(); // Eat exclaim token 1906 return false; 1907 } 1908 1909 // Also check for an index operand. This is only legal for vector registers, 1910 // but that'll get caught OK in operand matching, so we don't need to 1911 // explicitly filter everything else out here. 1912 if (Parser.getTok().is(AsmToken::LBrac)) { 1913 SMLoc SIdx = Parser.getTok().getLoc(); 1914 Parser.Lex(); // Eat left bracket token. 1915 1916 const MCExpr *ImmVal; 1917 SMLoc ExprLoc = Parser.getTok().getLoc(); 1918 if (getParser().ParseExpression(ImmVal)) 1919 return MatchOperand_ParseFail; 1920 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 1921 if (!MCE) { 1922 TokError("immediate value expected for vector index"); 1923 return MatchOperand_ParseFail; 1924 } 1925 1926 SMLoc E = Parser.getTok().getLoc(); 1927 if (Parser.getTok().isNot(AsmToken::RBrac)) { 1928 Error(E, "']' expected"); 1929 return MatchOperand_ParseFail; 1930 } 1931 1932 Parser.Lex(); // Eat right bracket token. 1933 1934 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 1935 SIdx, E, 1936 getContext())); 1937 } 1938 1939 return false; 1940} 1941 1942/// MatchCoprocessorOperandName - Try to parse an coprocessor related 1943/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 1944/// "c5", ... 1945static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 1946 // Use the same layout as the tablegen'erated register name matcher. Ugly, 1947 // but efficient. 1948 switch (Name.size()) { 1949 default: break; 1950 case 2: 1951 if (Name[0] != CoprocOp) 1952 return -1; 1953 switch (Name[1]) { 1954 default: return -1; 1955 case '0': return 0; 1956 case '1': return 1; 1957 case '2': return 2; 1958 case '3': return 3; 1959 case '4': return 4; 1960 case '5': return 5; 1961 case '6': return 6; 1962 case '7': return 7; 1963 case '8': return 8; 1964 case '9': return 9; 1965 } 1966 break; 1967 case 3: 1968 if (Name[0] != CoprocOp || Name[1] != '1') 1969 return -1; 1970 switch (Name[2]) { 1971 default: return -1; 1972 case '0': return 10; 1973 case '1': return 11; 1974 case '2': return 12; 1975 case '3': return 13; 1976 case '4': return 14; 1977 case '5': return 15; 1978 } 1979 break; 1980 } 1981 1982 return -1; 1983} 1984 1985/// parseITCondCode - Try to parse a condition code for an IT instruction. 1986ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 1987parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1988 SMLoc S = Parser.getTok().getLoc(); 1989 const AsmToken &Tok = Parser.getTok(); 1990 if (!Tok.is(AsmToken::Identifier)) 1991 return MatchOperand_NoMatch; 1992 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 1993 .Case("eq", ARMCC::EQ) 1994 .Case("ne", ARMCC::NE) 1995 .Case("hs", ARMCC::HS) 1996 .Case("cs", ARMCC::HS) 1997 .Case("lo", ARMCC::LO) 1998 .Case("cc", ARMCC::LO) 1999 .Case("mi", ARMCC::MI) 2000 .Case("pl", ARMCC::PL) 2001 .Case("vs", ARMCC::VS) 2002 .Case("vc", ARMCC::VC) 2003 .Case("hi", ARMCC::HI) 2004 .Case("ls", ARMCC::LS) 2005 .Case("ge", ARMCC::GE) 2006 .Case("lt", ARMCC::LT) 2007 .Case("gt", ARMCC::GT) 2008 .Case("le", ARMCC::LE) 2009 .Case("al", ARMCC::AL) 2010 .Default(~0U); 2011 if (CC == ~0U) 2012 return MatchOperand_NoMatch; 2013 Parser.Lex(); // Eat the token. 2014 2015 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2016 2017 return MatchOperand_Success; 2018} 2019 2020/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2021/// token must be an Identifier when called, and if it is a coprocessor 2022/// number, the token is eaten and the operand is added to the operand list. 2023ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2024parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2025 SMLoc S = Parser.getTok().getLoc(); 2026 const AsmToken &Tok = Parser.getTok(); 2027 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2028 2029 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2030 if (Num == -1) 2031 return MatchOperand_NoMatch; 2032 2033 Parser.Lex(); // Eat identifier token. 2034 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2035 return MatchOperand_Success; 2036} 2037 2038/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2039/// token must be an Identifier when called, and if it is a coprocessor 2040/// number, the token is eaten and the operand is added to the operand list. 2041ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2042parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2043 SMLoc S = Parser.getTok().getLoc(); 2044 const AsmToken &Tok = Parser.getTok(); 2045 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2046 2047 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2048 if (Reg == -1) 2049 return MatchOperand_NoMatch; 2050 2051 Parser.Lex(); // Eat identifier token. 2052 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2053 return MatchOperand_Success; 2054} 2055 2056// For register list parsing, we need to map from raw GPR register numbering 2057// to the enumeration values. The enumeration values aren't sorted by 2058// register number due to our using "sp", "lr" and "pc" as canonical names. 2059static unsigned getNextRegister(unsigned Reg) { 2060 // If this is a GPR, we need to do it manually, otherwise we can rely 2061 // on the sort ordering of the enumeration since the other reg-classes 2062 // are sane. 2063 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2064 return Reg + 1; 2065 switch(Reg) { 2066 default: assert(0 && "Invalid GPR number!"); 2067 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2068 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2069 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2070 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2071 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2072 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2073 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2074 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2075 } 2076} 2077 2078/// Parse a register list. 2079bool ARMAsmParser:: 2080parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2081 assert(Parser.getTok().is(AsmToken::LCurly) && 2082 "Token is not a Left Curly Brace"); 2083 SMLoc S = Parser.getTok().getLoc(); 2084 Parser.Lex(); // Eat '{' token. 2085 SMLoc RegLoc = Parser.getTok().getLoc(); 2086 2087 // Check the first register in the list to see what register class 2088 // this is a list of. 2089 int Reg = tryParseRegister(); 2090 if (Reg == -1) 2091 return Error(RegLoc, "register expected"); 2092 2093 MCRegisterClass *RC; 2094 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2095 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2096 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2097 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2098 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2099 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2100 else 2101 return Error(RegLoc, "invalid register in register list"); 2102 2103 // The reglist instructions have at most 16 registers, so reserve 2104 // space for that many. 2105 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2106 // Store the first register. 2107 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2108 2109 // This starts immediately after the first register token in the list, 2110 // so we can see either a comma or a minus (range separator) as a legal 2111 // next token. 2112 while (Parser.getTok().is(AsmToken::Comma) || 2113 Parser.getTok().is(AsmToken::Minus)) { 2114 if (Parser.getTok().is(AsmToken::Minus)) { 2115 Parser.Lex(); // Eat the comma. 2116 SMLoc EndLoc = Parser.getTok().getLoc(); 2117 int EndReg = tryParseRegister(); 2118 if (EndReg == -1) 2119 return Error(EndLoc, "register expected"); 2120 // If the register is the same as the start reg, there's nothing 2121 // more to do. 2122 if (Reg == EndReg) 2123 continue; 2124 // The register must be in the same register class as the first. 2125 if (!RC->contains(EndReg)) 2126 return Error(EndLoc, "invalid register in register list"); 2127 // Ranges must go from low to high. 2128 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2129 return Error(EndLoc, "bad range in register list"); 2130 2131 // Add all the registers in the range to the register list. 2132 while (Reg != EndReg) { 2133 Reg = getNextRegister(Reg); 2134 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2135 } 2136 continue; 2137 } 2138 Parser.Lex(); // Eat the comma. 2139 RegLoc = Parser.getTok().getLoc(); 2140 int OldReg = Reg; 2141 Reg = tryParseRegister(); 2142 if (Reg == -1) 2143 return Error(RegLoc, "register expected"); 2144 // The register must be in the same register class as the first. 2145 if (!RC->contains(Reg)) 2146 return Error(RegLoc, "invalid register in register list"); 2147 // List must be monotonically increasing. 2148 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2149 return Error(RegLoc, "register list not in ascending order"); 2150 // VFP register lists must also be contiguous. 2151 // It's OK to use the enumeration values directly here rather, as the 2152 // VFP register classes have the enum sorted properly. 2153 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2154 Reg != OldReg + 1) 2155 return Error(RegLoc, "non-contiguous register range"); 2156 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2157 } 2158 2159 SMLoc E = Parser.getTok().getLoc(); 2160 if (Parser.getTok().isNot(AsmToken::RCurly)) 2161 return Error(E, "'}' expected"); 2162 Parser.Lex(); // Eat '}' token. 2163 2164 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2165 return false; 2166} 2167 2168/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2169ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2170parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2171 SMLoc S = Parser.getTok().getLoc(); 2172 const AsmToken &Tok = Parser.getTok(); 2173 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2174 StringRef OptStr = Tok.getString(); 2175 2176 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2177 .Case("sy", ARM_MB::SY) 2178 .Case("st", ARM_MB::ST) 2179 .Case("sh", ARM_MB::ISH) 2180 .Case("ish", ARM_MB::ISH) 2181 .Case("shst", ARM_MB::ISHST) 2182 .Case("ishst", ARM_MB::ISHST) 2183 .Case("nsh", ARM_MB::NSH) 2184 .Case("un", ARM_MB::NSH) 2185 .Case("nshst", ARM_MB::NSHST) 2186 .Case("unst", ARM_MB::NSHST) 2187 .Case("osh", ARM_MB::OSH) 2188 .Case("oshst", ARM_MB::OSHST) 2189 .Default(~0U); 2190 2191 if (Opt == ~0U) 2192 return MatchOperand_NoMatch; 2193 2194 Parser.Lex(); // Eat identifier token. 2195 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2196 return MatchOperand_Success; 2197} 2198 2199/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2200ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2201parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2202 SMLoc S = Parser.getTok().getLoc(); 2203 const AsmToken &Tok = Parser.getTok(); 2204 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2205 StringRef IFlagsStr = Tok.getString(); 2206 2207 // An iflags string of "none" is interpreted to mean that none of the AIF 2208 // bits are set. Not a terribly useful instruction, but a valid encoding. 2209 unsigned IFlags = 0; 2210 if (IFlagsStr != "none") { 2211 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2212 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2213 .Case("a", ARM_PROC::A) 2214 .Case("i", ARM_PROC::I) 2215 .Case("f", ARM_PROC::F) 2216 .Default(~0U); 2217 2218 // If some specific iflag is already set, it means that some letter is 2219 // present more than once, this is not acceptable. 2220 if (Flag == ~0U || (IFlags & Flag)) 2221 return MatchOperand_NoMatch; 2222 2223 IFlags |= Flag; 2224 } 2225 } 2226 2227 Parser.Lex(); // Eat identifier token. 2228 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2229 return MatchOperand_Success; 2230} 2231 2232/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2233ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2234parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2235 SMLoc S = Parser.getTok().getLoc(); 2236 const AsmToken &Tok = Parser.getTok(); 2237 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2238 StringRef Mask = Tok.getString(); 2239 2240 if (isMClass()) { 2241 // See ARMv6-M 10.1.1 2242 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2243 .Case("apsr", 0) 2244 .Case("iapsr", 1) 2245 .Case("eapsr", 2) 2246 .Case("xpsr", 3) 2247 .Case("ipsr", 5) 2248 .Case("epsr", 6) 2249 .Case("iepsr", 7) 2250 .Case("msp", 8) 2251 .Case("psp", 9) 2252 .Case("primask", 16) 2253 .Case("basepri", 17) 2254 .Case("basepri_max", 18) 2255 .Case("faultmask", 19) 2256 .Case("control", 20) 2257 .Default(~0U); 2258 2259 if (FlagsVal == ~0U) 2260 return MatchOperand_NoMatch; 2261 2262 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2263 // basepri, basepri_max and faultmask only valid for V7m. 2264 return MatchOperand_NoMatch; 2265 2266 Parser.Lex(); // Eat identifier token. 2267 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2268 return MatchOperand_Success; 2269 } 2270 2271 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2272 size_t Start = 0, Next = Mask.find('_'); 2273 StringRef Flags = ""; 2274 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2275 if (Next != StringRef::npos) 2276 Flags = Mask.slice(Next+1, Mask.size()); 2277 2278 // FlagsVal contains the complete mask: 2279 // 3-0: Mask 2280 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2281 unsigned FlagsVal = 0; 2282 2283 if (SpecReg == "apsr") { 2284 FlagsVal = StringSwitch<unsigned>(Flags) 2285 .Case("nzcvq", 0x8) // same as CPSR_f 2286 .Case("g", 0x4) // same as CPSR_s 2287 .Case("nzcvqg", 0xc) // same as CPSR_fs 2288 .Default(~0U); 2289 2290 if (FlagsVal == ~0U) { 2291 if (!Flags.empty()) 2292 return MatchOperand_NoMatch; 2293 else 2294 FlagsVal = 8; // No flag 2295 } 2296 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2297 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2298 Flags = "fc"; 2299 for (int i = 0, e = Flags.size(); i != e; ++i) { 2300 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2301 .Case("c", 1) 2302 .Case("x", 2) 2303 .Case("s", 4) 2304 .Case("f", 8) 2305 .Default(~0U); 2306 2307 // If some specific flag is already set, it means that some letter is 2308 // present more than once, this is not acceptable. 2309 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2310 return MatchOperand_NoMatch; 2311 FlagsVal |= Flag; 2312 } 2313 } else // No match for special register. 2314 return MatchOperand_NoMatch; 2315 2316 // Special register without flags are equivalent to "fc" flags. 2317 if (!FlagsVal) 2318 FlagsVal = 0x9; 2319 2320 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2321 if (SpecReg == "spsr") 2322 FlagsVal |= 16; 2323 2324 Parser.Lex(); // Eat identifier token. 2325 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2326 return MatchOperand_Success; 2327} 2328 2329ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2330parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2331 int Low, int High) { 2332 const AsmToken &Tok = Parser.getTok(); 2333 if (Tok.isNot(AsmToken::Identifier)) { 2334 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2335 return MatchOperand_ParseFail; 2336 } 2337 StringRef ShiftName = Tok.getString(); 2338 std::string LowerOp = LowercaseString(Op); 2339 std::string UpperOp = UppercaseString(Op); 2340 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2341 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2342 return MatchOperand_ParseFail; 2343 } 2344 Parser.Lex(); // Eat shift type token. 2345 2346 // There must be a '#' and a shift amount. 2347 if (Parser.getTok().isNot(AsmToken::Hash)) { 2348 Error(Parser.getTok().getLoc(), "'#' expected"); 2349 return MatchOperand_ParseFail; 2350 } 2351 Parser.Lex(); // Eat hash token. 2352 2353 const MCExpr *ShiftAmount; 2354 SMLoc Loc = Parser.getTok().getLoc(); 2355 if (getParser().ParseExpression(ShiftAmount)) { 2356 Error(Loc, "illegal expression"); 2357 return MatchOperand_ParseFail; 2358 } 2359 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2360 if (!CE) { 2361 Error(Loc, "constant expression expected"); 2362 return MatchOperand_ParseFail; 2363 } 2364 int Val = CE->getValue(); 2365 if (Val < Low || Val > High) { 2366 Error(Loc, "immediate value out of range"); 2367 return MatchOperand_ParseFail; 2368 } 2369 2370 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2371 2372 return MatchOperand_Success; 2373} 2374 2375ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2376parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2377 const AsmToken &Tok = Parser.getTok(); 2378 SMLoc S = Tok.getLoc(); 2379 if (Tok.isNot(AsmToken::Identifier)) { 2380 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2381 return MatchOperand_ParseFail; 2382 } 2383 int Val = StringSwitch<int>(Tok.getString()) 2384 .Case("be", 1) 2385 .Case("le", 0) 2386 .Default(-1); 2387 Parser.Lex(); // Eat the token. 2388 2389 if (Val == -1) { 2390 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2391 return MatchOperand_ParseFail; 2392 } 2393 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2394 getContext()), 2395 S, Parser.getTok().getLoc())); 2396 return MatchOperand_Success; 2397} 2398 2399/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2400/// instructions. Legal values are: 2401/// lsl #n 'n' in [0,31] 2402/// asr #n 'n' in [1,32] 2403/// n == 32 encoded as n == 0. 2404ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2405parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2406 const AsmToken &Tok = Parser.getTok(); 2407 SMLoc S = Tok.getLoc(); 2408 if (Tok.isNot(AsmToken::Identifier)) { 2409 Error(S, "shift operator 'asr' or 'lsl' expected"); 2410 return MatchOperand_ParseFail; 2411 } 2412 StringRef ShiftName = Tok.getString(); 2413 bool isASR; 2414 if (ShiftName == "lsl" || ShiftName == "LSL") 2415 isASR = false; 2416 else if (ShiftName == "asr" || ShiftName == "ASR") 2417 isASR = true; 2418 else { 2419 Error(S, "shift operator 'asr' or 'lsl' expected"); 2420 return MatchOperand_ParseFail; 2421 } 2422 Parser.Lex(); // Eat the operator. 2423 2424 // A '#' and a shift amount. 2425 if (Parser.getTok().isNot(AsmToken::Hash)) { 2426 Error(Parser.getTok().getLoc(), "'#' expected"); 2427 return MatchOperand_ParseFail; 2428 } 2429 Parser.Lex(); // Eat hash token. 2430 2431 const MCExpr *ShiftAmount; 2432 SMLoc E = Parser.getTok().getLoc(); 2433 if (getParser().ParseExpression(ShiftAmount)) { 2434 Error(E, "malformed shift expression"); 2435 return MatchOperand_ParseFail; 2436 } 2437 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2438 if (!CE) { 2439 Error(E, "shift amount must be an immediate"); 2440 return MatchOperand_ParseFail; 2441 } 2442 2443 int64_t Val = CE->getValue(); 2444 if (isASR) { 2445 // Shift amount must be in [1,32] 2446 if (Val < 1 || Val > 32) { 2447 Error(E, "'asr' shift amount must be in range [1,32]"); 2448 return MatchOperand_ParseFail; 2449 } 2450 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2451 if (isThumb() && Val == 32) { 2452 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2453 return MatchOperand_ParseFail; 2454 } 2455 if (Val == 32) Val = 0; 2456 } else { 2457 // Shift amount must be in [1,32] 2458 if (Val < 0 || Val > 31) { 2459 Error(E, "'lsr' shift amount must be in range [0,31]"); 2460 return MatchOperand_ParseFail; 2461 } 2462 } 2463 2464 E = Parser.getTok().getLoc(); 2465 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2466 2467 return MatchOperand_Success; 2468} 2469 2470/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2471/// of instructions. Legal values are: 2472/// ror #n 'n' in {0, 8, 16, 24} 2473ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2474parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2475 const AsmToken &Tok = Parser.getTok(); 2476 SMLoc S = Tok.getLoc(); 2477 if (Tok.isNot(AsmToken::Identifier)) 2478 return MatchOperand_NoMatch; 2479 StringRef ShiftName = Tok.getString(); 2480 if (ShiftName != "ror" && ShiftName != "ROR") 2481 return MatchOperand_NoMatch; 2482 Parser.Lex(); // Eat the operator. 2483 2484 // A '#' and a rotate amount. 2485 if (Parser.getTok().isNot(AsmToken::Hash)) { 2486 Error(Parser.getTok().getLoc(), "'#' expected"); 2487 return MatchOperand_ParseFail; 2488 } 2489 Parser.Lex(); // Eat hash token. 2490 2491 const MCExpr *ShiftAmount; 2492 SMLoc E = Parser.getTok().getLoc(); 2493 if (getParser().ParseExpression(ShiftAmount)) { 2494 Error(E, "malformed rotate expression"); 2495 return MatchOperand_ParseFail; 2496 } 2497 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2498 if (!CE) { 2499 Error(E, "rotate amount must be an immediate"); 2500 return MatchOperand_ParseFail; 2501 } 2502 2503 int64_t Val = CE->getValue(); 2504 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2505 // normally, zero is represented in asm by omitting the rotate operand 2506 // entirely. 2507 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2508 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2509 return MatchOperand_ParseFail; 2510 } 2511 2512 E = Parser.getTok().getLoc(); 2513 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2514 2515 return MatchOperand_Success; 2516} 2517 2518ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2519parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2520 SMLoc S = Parser.getTok().getLoc(); 2521 // The bitfield descriptor is really two operands, the LSB and the width. 2522 if (Parser.getTok().isNot(AsmToken::Hash)) { 2523 Error(Parser.getTok().getLoc(), "'#' expected"); 2524 return MatchOperand_ParseFail; 2525 } 2526 Parser.Lex(); // Eat hash token. 2527 2528 const MCExpr *LSBExpr; 2529 SMLoc E = Parser.getTok().getLoc(); 2530 if (getParser().ParseExpression(LSBExpr)) { 2531 Error(E, "malformed immediate expression"); 2532 return MatchOperand_ParseFail; 2533 } 2534 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2535 if (!CE) { 2536 Error(E, "'lsb' operand must be an immediate"); 2537 return MatchOperand_ParseFail; 2538 } 2539 2540 int64_t LSB = CE->getValue(); 2541 // The LSB must be in the range [0,31] 2542 if (LSB < 0 || LSB > 31) { 2543 Error(E, "'lsb' operand must be in the range [0,31]"); 2544 return MatchOperand_ParseFail; 2545 } 2546 E = Parser.getTok().getLoc(); 2547 2548 // Expect another immediate operand. 2549 if (Parser.getTok().isNot(AsmToken::Comma)) { 2550 Error(Parser.getTok().getLoc(), "too few operands"); 2551 return MatchOperand_ParseFail; 2552 } 2553 Parser.Lex(); // Eat hash token. 2554 if (Parser.getTok().isNot(AsmToken::Hash)) { 2555 Error(Parser.getTok().getLoc(), "'#' expected"); 2556 return MatchOperand_ParseFail; 2557 } 2558 Parser.Lex(); // Eat hash token. 2559 2560 const MCExpr *WidthExpr; 2561 if (getParser().ParseExpression(WidthExpr)) { 2562 Error(E, "malformed immediate expression"); 2563 return MatchOperand_ParseFail; 2564 } 2565 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2566 if (!CE) { 2567 Error(E, "'width' operand must be an immediate"); 2568 return MatchOperand_ParseFail; 2569 } 2570 2571 int64_t Width = CE->getValue(); 2572 // The LSB must be in the range [1,32-lsb] 2573 if (Width < 1 || Width > 32 - LSB) { 2574 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2575 return MatchOperand_ParseFail; 2576 } 2577 E = Parser.getTok().getLoc(); 2578 2579 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2580 2581 return MatchOperand_Success; 2582} 2583 2584ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2585parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2586 // Check for a post-index addressing register operand. Specifically: 2587 // postidx_reg := '+' register {, shift} 2588 // | '-' register {, shift} 2589 // | register {, shift} 2590 2591 // This method must return MatchOperand_NoMatch without consuming any tokens 2592 // in the case where there is no match, as other alternatives take other 2593 // parse methods. 2594 AsmToken Tok = Parser.getTok(); 2595 SMLoc S = Tok.getLoc(); 2596 bool haveEaten = false; 2597 bool isAdd = true; 2598 int Reg = -1; 2599 if (Tok.is(AsmToken::Plus)) { 2600 Parser.Lex(); // Eat the '+' token. 2601 haveEaten = true; 2602 } else if (Tok.is(AsmToken::Minus)) { 2603 Parser.Lex(); // Eat the '-' token. 2604 isAdd = false; 2605 haveEaten = true; 2606 } 2607 if (Parser.getTok().is(AsmToken::Identifier)) 2608 Reg = tryParseRegister(); 2609 if (Reg == -1) { 2610 if (!haveEaten) 2611 return MatchOperand_NoMatch; 2612 Error(Parser.getTok().getLoc(), "register expected"); 2613 return MatchOperand_ParseFail; 2614 } 2615 SMLoc E = Parser.getTok().getLoc(); 2616 2617 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2618 unsigned ShiftImm = 0; 2619 if (Parser.getTok().is(AsmToken::Comma)) { 2620 Parser.Lex(); // Eat the ','. 2621 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2622 return MatchOperand_ParseFail; 2623 } 2624 2625 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2626 ShiftImm, S, E)); 2627 2628 return MatchOperand_Success; 2629} 2630 2631ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2632parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2633 // Check for a post-index addressing register operand. Specifically: 2634 // am3offset := '+' register 2635 // | '-' register 2636 // | register 2637 // | # imm 2638 // | # + imm 2639 // | # - imm 2640 2641 // This method must return MatchOperand_NoMatch without consuming any tokens 2642 // in the case where there is no match, as other alternatives take other 2643 // parse methods. 2644 AsmToken Tok = Parser.getTok(); 2645 SMLoc S = Tok.getLoc(); 2646 2647 // Do immediates first, as we always parse those if we have a '#'. 2648 if (Parser.getTok().is(AsmToken::Hash)) { 2649 Parser.Lex(); // Eat the '#'. 2650 // Explicitly look for a '-', as we need to encode negative zero 2651 // differently. 2652 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2653 const MCExpr *Offset; 2654 if (getParser().ParseExpression(Offset)) 2655 return MatchOperand_ParseFail; 2656 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2657 if (!CE) { 2658 Error(S, "constant expression expected"); 2659 return MatchOperand_ParseFail; 2660 } 2661 SMLoc E = Tok.getLoc(); 2662 // Negative zero is encoded as the flag value INT32_MIN. 2663 int32_t Val = CE->getValue(); 2664 if (isNegative && Val == 0) 2665 Val = INT32_MIN; 2666 2667 Operands.push_back( 2668 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2669 2670 return MatchOperand_Success; 2671 } 2672 2673 2674 bool haveEaten = false; 2675 bool isAdd = true; 2676 int Reg = -1; 2677 if (Tok.is(AsmToken::Plus)) { 2678 Parser.Lex(); // Eat the '+' token. 2679 haveEaten = true; 2680 } else if (Tok.is(AsmToken::Minus)) { 2681 Parser.Lex(); // Eat the '-' token. 2682 isAdd = false; 2683 haveEaten = true; 2684 } 2685 if (Parser.getTok().is(AsmToken::Identifier)) 2686 Reg = tryParseRegister(); 2687 if (Reg == -1) { 2688 if (!haveEaten) 2689 return MatchOperand_NoMatch; 2690 Error(Parser.getTok().getLoc(), "register expected"); 2691 return MatchOperand_ParseFail; 2692 } 2693 SMLoc E = Parser.getTok().getLoc(); 2694 2695 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 2696 0, S, E)); 2697 2698 return MatchOperand_Success; 2699} 2700 2701/// cvtT2LdrdPre - Convert parsed operands to MCInst. 2702/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2703/// when they refer multiple MIOperands inside a single one. 2704bool ARMAsmParser:: 2705cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 2706 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2707 // Rt, Rt2 2708 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2709 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2710 // Create a writeback register dummy placeholder. 2711 Inst.addOperand(MCOperand::CreateReg(0)); 2712 // addr 2713 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 2714 // pred 2715 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2716 return true; 2717} 2718 2719/// cvtT2StrdPre - Convert parsed operands to MCInst. 2720/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2721/// when they refer multiple MIOperands inside a single one. 2722bool ARMAsmParser:: 2723cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 2724 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2725 // Create a writeback register dummy placeholder. 2726 Inst.addOperand(MCOperand::CreateReg(0)); 2727 // Rt, Rt2 2728 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2729 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2730 // addr 2731 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 2732 // pred 2733 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2734 return true; 2735} 2736 2737/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 2738/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2739/// when they refer multiple MIOperands inside a single one. 2740bool ARMAsmParser:: 2741cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 2742 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2743 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2744 2745 // Create a writeback register dummy placeholder. 2746 Inst.addOperand(MCOperand::CreateImm(0)); 2747 2748 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 2749 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2750 return true; 2751} 2752 2753/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 2754/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2755/// when they refer multiple MIOperands inside a single one. 2756bool ARMAsmParser:: 2757cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 2758 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2759 // Create a writeback register dummy placeholder. 2760 Inst.addOperand(MCOperand::CreateImm(0)); 2761 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2762 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 2763 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2764 return true; 2765} 2766 2767/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 2768/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2769/// when they refer multiple MIOperands inside a single one. 2770bool ARMAsmParser:: 2771cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 2772 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2773 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2774 2775 // Create a writeback register dummy placeholder. 2776 Inst.addOperand(MCOperand::CreateImm(0)); 2777 2778 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 2779 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2780 return true; 2781} 2782 2783/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 2784/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2785/// when they refer multiple MIOperands inside a single one. 2786bool ARMAsmParser:: 2787cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 2788 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2789 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2790 2791 // Create a writeback register dummy placeholder. 2792 Inst.addOperand(MCOperand::CreateImm(0)); 2793 2794 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 2795 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2796 return true; 2797} 2798 2799 2800/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 2801/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2802/// when they refer multiple MIOperands inside a single one. 2803bool ARMAsmParser:: 2804cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 2805 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2806 // Create a writeback register dummy placeholder. 2807 Inst.addOperand(MCOperand::CreateImm(0)); 2808 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2809 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 2810 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2811 return true; 2812} 2813 2814/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 2815/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2816/// when they refer multiple MIOperands inside a single one. 2817bool ARMAsmParser:: 2818cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 2819 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2820 // Create a writeback register dummy placeholder. 2821 Inst.addOperand(MCOperand::CreateImm(0)); 2822 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2823 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 2824 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2825 return true; 2826} 2827 2828/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 2829/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2830/// when they refer multiple MIOperands inside a single one. 2831bool ARMAsmParser:: 2832cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 2833 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2834 // Create a writeback register dummy placeholder. 2835 Inst.addOperand(MCOperand::CreateImm(0)); 2836 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2837 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 2838 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2839 return true; 2840} 2841 2842/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 2843/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2844/// when they refer multiple MIOperands inside a single one. 2845bool ARMAsmParser:: 2846cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 2847 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2848 // Rt 2849 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2850 // Create a writeback register dummy placeholder. 2851 Inst.addOperand(MCOperand::CreateImm(0)); 2852 // addr 2853 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2854 // offset 2855 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 2856 // pred 2857 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2858 return true; 2859} 2860 2861/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 2862/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2863/// when they refer multiple MIOperands inside a single one. 2864bool ARMAsmParser:: 2865cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 2866 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2867 // Rt 2868 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2869 // Create a writeback register dummy placeholder. 2870 Inst.addOperand(MCOperand::CreateImm(0)); 2871 // addr 2872 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2873 // offset 2874 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 2875 // pred 2876 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2877 return true; 2878} 2879 2880/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 2881/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2882/// when they refer multiple MIOperands inside a single one. 2883bool ARMAsmParser:: 2884cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 2885 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2886 // Create a writeback register dummy placeholder. 2887 Inst.addOperand(MCOperand::CreateImm(0)); 2888 // Rt 2889 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2890 // addr 2891 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2892 // offset 2893 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 2894 // pred 2895 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2896 return true; 2897} 2898 2899/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 2900/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2901/// when they refer multiple MIOperands inside a single one. 2902bool ARMAsmParser:: 2903cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 2904 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2905 // Create a writeback register dummy placeholder. 2906 Inst.addOperand(MCOperand::CreateImm(0)); 2907 // Rt 2908 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2909 // addr 2910 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2911 // offset 2912 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 2913 // pred 2914 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2915 return true; 2916} 2917 2918/// cvtLdrdPre - Convert parsed operands to MCInst. 2919/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2920/// when they refer multiple MIOperands inside a single one. 2921bool ARMAsmParser:: 2922cvtLdrdPre(MCInst &Inst, unsigned Opcode, 2923 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2924 // Rt, Rt2 2925 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2926 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2927 // Create a writeback register dummy placeholder. 2928 Inst.addOperand(MCOperand::CreateImm(0)); 2929 // addr 2930 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 2931 // pred 2932 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2933 return true; 2934} 2935 2936/// cvtStrdPre - Convert parsed operands to MCInst. 2937/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2938/// when they refer multiple MIOperands inside a single one. 2939bool ARMAsmParser:: 2940cvtStrdPre(MCInst &Inst, unsigned Opcode, 2941 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2942 // Create a writeback register dummy placeholder. 2943 Inst.addOperand(MCOperand::CreateImm(0)); 2944 // Rt, Rt2 2945 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2946 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2947 // addr 2948 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 2949 // pred 2950 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2951 return true; 2952} 2953 2954/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 2955/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2956/// when they refer multiple MIOperands inside a single one. 2957bool ARMAsmParser:: 2958cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 2959 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2960 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2961 // Create a writeback register dummy placeholder. 2962 Inst.addOperand(MCOperand::CreateImm(0)); 2963 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 2964 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2965 return true; 2966} 2967 2968/// cvtThumbMultiple- Convert parsed operands to MCInst. 2969/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2970/// when they refer multiple MIOperands inside a single one. 2971bool ARMAsmParser:: 2972cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 2973 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2974 // The second source operand must be the same register as the destination 2975 // operand. 2976 if (Operands.size() == 6 && 2977 (((ARMOperand*)Operands[3])->getReg() != 2978 ((ARMOperand*)Operands[5])->getReg()) && 2979 (((ARMOperand*)Operands[3])->getReg() != 2980 ((ARMOperand*)Operands[4])->getReg())) { 2981 Error(Operands[3]->getStartLoc(), 2982 "destination register must match source register"); 2983 return false; 2984 } 2985 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2986 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 2987 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 2988 // If we have a three-operand form, use that, else the second source operand 2989 // is just the destination operand again. 2990 if (Operands.size() == 6) 2991 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 2992 else 2993 Inst.addOperand(Inst.getOperand(0)); 2994 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 2995 2996 return true; 2997} 2998 2999/// Parse an ARM memory expression, return false if successful else return true 3000/// or an error. The first token must be a '[' when called. 3001bool ARMAsmParser:: 3002parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3003 SMLoc S, E; 3004 assert(Parser.getTok().is(AsmToken::LBrac) && 3005 "Token is not a Left Bracket"); 3006 S = Parser.getTok().getLoc(); 3007 Parser.Lex(); // Eat left bracket token. 3008 3009 const AsmToken &BaseRegTok = Parser.getTok(); 3010 int BaseRegNum = tryParseRegister(); 3011 if (BaseRegNum == -1) 3012 return Error(BaseRegTok.getLoc(), "register expected"); 3013 3014 // The next token must either be a comma or a closing bracket. 3015 const AsmToken &Tok = Parser.getTok(); 3016 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3017 return Error(Tok.getLoc(), "malformed memory operand"); 3018 3019 if (Tok.is(AsmToken::RBrac)) { 3020 E = Tok.getLoc(); 3021 Parser.Lex(); // Eat right bracket token. 3022 3023 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3024 0, false, S, E)); 3025 3026 // If there's a pre-indexing writeback marker, '!', just add it as a token 3027 // operand. It's rather odd, but syntactically valid. 3028 if (Parser.getTok().is(AsmToken::Exclaim)) { 3029 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3030 Parser.Lex(); // Eat the '!'. 3031 } 3032 3033 return false; 3034 } 3035 3036 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3037 Parser.Lex(); // Eat the comma. 3038 3039 // If we have a '#' it's an immediate offset, else assume it's a register 3040 // offset. 3041 if (Parser.getTok().is(AsmToken::Hash)) { 3042 Parser.Lex(); // Eat the '#'. 3043 E = Parser.getTok().getLoc(); 3044 3045 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3046 const MCExpr *Offset; 3047 if (getParser().ParseExpression(Offset)) 3048 return true; 3049 3050 // The expression has to be a constant. Memory references with relocations 3051 // don't come through here, as they use the <label> forms of the relevant 3052 // instructions. 3053 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3054 if (!CE) 3055 return Error (E, "constant expression expected"); 3056 3057 // If the constant was #-0, represent it as INT32_MIN. 3058 int32_t Val = CE->getValue(); 3059 if (isNegative && Val == 0) 3060 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3061 3062 // Now we should have the closing ']' 3063 E = Parser.getTok().getLoc(); 3064 if (Parser.getTok().isNot(AsmToken::RBrac)) 3065 return Error(E, "']' expected"); 3066 Parser.Lex(); // Eat right bracket token. 3067 3068 // Don't worry about range checking the value here. That's handled by 3069 // the is*() predicates. 3070 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3071 ARM_AM::no_shift, 0, false, S,E)); 3072 3073 // If there's a pre-indexing writeback marker, '!', just add it as a token 3074 // operand. 3075 if (Parser.getTok().is(AsmToken::Exclaim)) { 3076 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3077 Parser.Lex(); // Eat the '!'. 3078 } 3079 3080 return false; 3081 } 3082 3083 // The register offset is optionally preceded by a '+' or '-' 3084 bool isNegative = false; 3085 if (Parser.getTok().is(AsmToken::Minus)) { 3086 isNegative = true; 3087 Parser.Lex(); // Eat the '-'. 3088 } else if (Parser.getTok().is(AsmToken::Plus)) { 3089 // Nothing to do. 3090 Parser.Lex(); // Eat the '+'. 3091 } 3092 3093 E = Parser.getTok().getLoc(); 3094 int OffsetRegNum = tryParseRegister(); 3095 if (OffsetRegNum == -1) 3096 return Error(E, "register expected"); 3097 3098 // If there's a shift operator, handle it. 3099 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3100 unsigned ShiftImm = 0; 3101 if (Parser.getTok().is(AsmToken::Comma)) { 3102 Parser.Lex(); // Eat the ','. 3103 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3104 return true; 3105 } 3106 3107 // Now we should have the closing ']' 3108 E = Parser.getTok().getLoc(); 3109 if (Parser.getTok().isNot(AsmToken::RBrac)) 3110 return Error(E, "']' expected"); 3111 Parser.Lex(); // Eat right bracket token. 3112 3113 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3114 ShiftType, ShiftImm, isNegative, 3115 S, E)); 3116 3117 // If there's a pre-indexing writeback marker, '!', just add it as a token 3118 // operand. 3119 if (Parser.getTok().is(AsmToken::Exclaim)) { 3120 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3121 Parser.Lex(); // Eat the '!'. 3122 } 3123 3124 return false; 3125} 3126 3127/// parseMemRegOffsetShift - one of these two: 3128/// ( lsl | lsr | asr | ror ) , # shift_amount 3129/// rrx 3130/// return true if it parses a shift otherwise it returns false. 3131bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3132 unsigned &Amount) { 3133 SMLoc Loc = Parser.getTok().getLoc(); 3134 const AsmToken &Tok = Parser.getTok(); 3135 if (Tok.isNot(AsmToken::Identifier)) 3136 return true; 3137 StringRef ShiftName = Tok.getString(); 3138 if (ShiftName == "lsl" || ShiftName == "LSL") 3139 St = ARM_AM::lsl; 3140 else if (ShiftName == "lsr" || ShiftName == "LSR") 3141 St = ARM_AM::lsr; 3142 else if (ShiftName == "asr" || ShiftName == "ASR") 3143 St = ARM_AM::asr; 3144 else if (ShiftName == "ror" || ShiftName == "ROR") 3145 St = ARM_AM::ror; 3146 else if (ShiftName == "rrx" || ShiftName == "RRX") 3147 St = ARM_AM::rrx; 3148 else 3149 return Error(Loc, "illegal shift operator"); 3150 Parser.Lex(); // Eat shift type token. 3151 3152 // rrx stands alone. 3153 Amount = 0; 3154 if (St != ARM_AM::rrx) { 3155 Loc = Parser.getTok().getLoc(); 3156 // A '#' and a shift amount. 3157 const AsmToken &HashTok = Parser.getTok(); 3158 if (HashTok.isNot(AsmToken::Hash)) 3159 return Error(HashTok.getLoc(), "'#' expected"); 3160 Parser.Lex(); // Eat hash token. 3161 3162 const MCExpr *Expr; 3163 if (getParser().ParseExpression(Expr)) 3164 return true; 3165 // Range check the immediate. 3166 // lsl, ror: 0 <= imm <= 31 3167 // lsr, asr: 0 <= imm <= 32 3168 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3169 if (!CE) 3170 return Error(Loc, "shift amount must be an immediate"); 3171 int64_t Imm = CE->getValue(); 3172 if (Imm < 0 || 3173 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3174 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3175 return Error(Loc, "immediate shift value out of range"); 3176 Amount = Imm; 3177 } 3178 3179 return false; 3180} 3181 3182/// parseFPImm - A floating point immediate expression operand. 3183ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3184parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3185 SMLoc S = Parser.getTok().getLoc(); 3186 3187 if (Parser.getTok().isNot(AsmToken::Hash)) 3188 return MatchOperand_NoMatch; 3189 Parser.Lex(); // Eat the '#'. 3190 3191 // Handle negation, as that still comes through as a separate token. 3192 bool isNegative = false; 3193 if (Parser.getTok().is(AsmToken::Minus)) { 3194 isNegative = true; 3195 Parser.Lex(); 3196 } 3197 const AsmToken &Tok = Parser.getTok(); 3198 if (Tok.is(AsmToken::Real)) { 3199 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3200 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3201 // If we had a '-' in front, toggle the sign bit. 3202 IntVal ^= (uint64_t)isNegative << 63; 3203 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3204 Parser.Lex(); // Eat the token. 3205 if (Val == -1) { 3206 TokError("floating point value out of range"); 3207 return MatchOperand_ParseFail; 3208 } 3209 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3210 return MatchOperand_Success; 3211 } 3212 if (Tok.is(AsmToken::Integer)) { 3213 int64_t Val = Tok.getIntVal(); 3214 Parser.Lex(); // Eat the token. 3215 if (Val > 255 || Val < 0) { 3216 TokError("encoded floating point value out of range"); 3217 return MatchOperand_ParseFail; 3218 } 3219 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3220 return MatchOperand_Success; 3221 } 3222 3223 TokError("invalid floating point immediate"); 3224 return MatchOperand_ParseFail; 3225} 3226/// Parse a arm instruction operand. For now this parses the operand regardless 3227/// of the mnemonic. 3228bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3229 StringRef Mnemonic) { 3230 SMLoc S, E; 3231 3232 // Check if the current operand has a custom associated parser, if so, try to 3233 // custom parse the operand, or fallback to the general approach. 3234 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3235 if (ResTy == MatchOperand_Success) 3236 return false; 3237 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3238 // there was a match, but an error occurred, in which case, just return that 3239 // the operand parsing failed. 3240 if (ResTy == MatchOperand_ParseFail) 3241 return true; 3242 3243 switch (getLexer().getKind()) { 3244 default: 3245 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3246 return true; 3247 case AsmToken::Identifier: { 3248 // If this is VMRS, check for the apsr_nzcv operand. 3249 if (!tryParseRegisterWithWriteBack(Operands)) 3250 return false; 3251 int Res = tryParseShiftRegister(Operands); 3252 if (Res == 0) // success 3253 return false; 3254 else if (Res == -1) // irrecoverable error 3255 return true; 3256 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3257 S = Parser.getTok().getLoc(); 3258 Parser.Lex(); 3259 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3260 return false; 3261 } 3262 3263 // Fall though for the Identifier case that is not a register or a 3264 // special name. 3265 } 3266 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3267 case AsmToken::Dot: { // . as a branch target 3268 // This was not a register so parse other operands that start with an 3269 // identifier (like labels) as expressions and create them as immediates. 3270 const MCExpr *IdVal; 3271 S = Parser.getTok().getLoc(); 3272 if (getParser().ParseExpression(IdVal)) 3273 return true; 3274 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3275 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3276 return false; 3277 } 3278 case AsmToken::LBrac: 3279 return parseMemory(Operands); 3280 case AsmToken::LCurly: 3281 return parseRegisterList(Operands); 3282 case AsmToken::Hash: { 3283 // #42 -> immediate. 3284 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3285 S = Parser.getTok().getLoc(); 3286 Parser.Lex(); 3287 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3288 const MCExpr *ImmVal; 3289 if (getParser().ParseExpression(ImmVal)) 3290 return true; 3291 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3292 if (!CE) { 3293 Error(S, "constant expression expected"); 3294 return MatchOperand_ParseFail; 3295 } 3296 int32_t Val = CE->getValue(); 3297 if (isNegative && Val == 0) 3298 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3299 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3300 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3301 return false; 3302 } 3303 case AsmToken::Colon: { 3304 // ":lower16:" and ":upper16:" expression prefixes 3305 // FIXME: Check it's an expression prefix, 3306 // e.g. (FOO - :lower16:BAR) isn't legal. 3307 ARMMCExpr::VariantKind RefKind; 3308 if (parsePrefix(RefKind)) 3309 return true; 3310 3311 const MCExpr *SubExprVal; 3312 if (getParser().ParseExpression(SubExprVal)) 3313 return true; 3314 3315 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3316 getContext()); 3317 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3318 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3319 return false; 3320 } 3321 } 3322} 3323 3324// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3325// :lower16: and :upper16:. 3326bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3327 RefKind = ARMMCExpr::VK_ARM_None; 3328 3329 // :lower16: and :upper16: modifiers 3330 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3331 Parser.Lex(); // Eat ':' 3332 3333 if (getLexer().isNot(AsmToken::Identifier)) { 3334 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3335 return true; 3336 } 3337 3338 StringRef IDVal = Parser.getTok().getIdentifier(); 3339 if (IDVal == "lower16") { 3340 RefKind = ARMMCExpr::VK_ARM_LO16; 3341 } else if (IDVal == "upper16") { 3342 RefKind = ARMMCExpr::VK_ARM_HI16; 3343 } else { 3344 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3345 return true; 3346 } 3347 Parser.Lex(); 3348 3349 if (getLexer().isNot(AsmToken::Colon)) { 3350 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3351 return true; 3352 } 3353 Parser.Lex(); // Eat the last ':' 3354 return false; 3355} 3356 3357/// \brief Given a mnemonic, split out possible predication code and carry 3358/// setting letters to form a canonical mnemonic and flags. 3359// 3360// FIXME: Would be nice to autogen this. 3361// FIXME: This is a bit of a maze of special cases. 3362StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3363 unsigned &PredicationCode, 3364 bool &CarrySetting, 3365 unsigned &ProcessorIMod, 3366 StringRef &ITMask) { 3367 PredicationCode = ARMCC::AL; 3368 CarrySetting = false; 3369 ProcessorIMod = 0; 3370 3371 // Ignore some mnemonics we know aren't predicated forms. 3372 // 3373 // FIXME: Would be nice to autogen this. 3374 if ((Mnemonic == "movs" && isThumb()) || 3375 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3376 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3377 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3378 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3379 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3380 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3381 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3382 return Mnemonic; 3383 3384 // First, split out any predication code. Ignore mnemonics we know aren't 3385 // predicated but do have a carry-set and so weren't caught above. 3386 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3387 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3388 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3389 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3390 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3391 .Case("eq", ARMCC::EQ) 3392 .Case("ne", ARMCC::NE) 3393 .Case("hs", ARMCC::HS) 3394 .Case("cs", ARMCC::HS) 3395 .Case("lo", ARMCC::LO) 3396 .Case("cc", ARMCC::LO) 3397 .Case("mi", ARMCC::MI) 3398 .Case("pl", ARMCC::PL) 3399 .Case("vs", ARMCC::VS) 3400 .Case("vc", ARMCC::VC) 3401 .Case("hi", ARMCC::HI) 3402 .Case("ls", ARMCC::LS) 3403 .Case("ge", ARMCC::GE) 3404 .Case("lt", ARMCC::LT) 3405 .Case("gt", ARMCC::GT) 3406 .Case("le", ARMCC::LE) 3407 .Case("al", ARMCC::AL) 3408 .Default(~0U); 3409 if (CC != ~0U) { 3410 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3411 PredicationCode = CC; 3412 } 3413 } 3414 3415 // Next, determine if we have a carry setting bit. We explicitly ignore all 3416 // the instructions we know end in 's'. 3417 if (Mnemonic.endswith("s") && 3418 !(Mnemonic == "cps" || Mnemonic == "mls" || 3419 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3420 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3421 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3422 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3423 (Mnemonic == "movs" && isThumb()))) { 3424 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3425 CarrySetting = true; 3426 } 3427 3428 // The "cps" instruction can have a interrupt mode operand which is glued into 3429 // the mnemonic. Check if this is the case, split it and parse the imod op 3430 if (Mnemonic.startswith("cps")) { 3431 // Split out any imod code. 3432 unsigned IMod = 3433 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3434 .Case("ie", ARM_PROC::IE) 3435 .Case("id", ARM_PROC::ID) 3436 .Default(~0U); 3437 if (IMod != ~0U) { 3438 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3439 ProcessorIMod = IMod; 3440 } 3441 } 3442 3443 // The "it" instruction has the condition mask on the end of the mnemonic. 3444 if (Mnemonic.startswith("it")) { 3445 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3446 Mnemonic = Mnemonic.slice(0, 2); 3447 } 3448 3449 return Mnemonic; 3450} 3451 3452/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3453/// inclusion of carry set or predication code operands. 3454// 3455// FIXME: It would be nice to autogen this. 3456void ARMAsmParser:: 3457getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3458 bool &CanAcceptPredicationCode) { 3459 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3460 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3461 Mnemonic == "add" || Mnemonic == "adc" || 3462 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3463 Mnemonic == "orr" || Mnemonic == "mvn" || 3464 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3465 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3466 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3467 Mnemonic == "mla" || Mnemonic == "smlal" || 3468 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3469 CanAcceptCarrySet = true; 3470 } else 3471 CanAcceptCarrySet = false; 3472 3473 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3474 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3475 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3476 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3477 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3478 (Mnemonic == "clrex" && !isThumb()) || 3479 (Mnemonic == "nop" && isThumbOne()) || 3480 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw") && 3481 !isThumb()) || 3482 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3483 !isThumb()) || 3484 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3485 CanAcceptPredicationCode = false; 3486 } else 3487 CanAcceptPredicationCode = true; 3488 3489 if (isThumb()) { 3490 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3491 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3492 CanAcceptPredicationCode = false; 3493 } 3494} 3495 3496bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3497 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3498 // FIXME: This is all horribly hacky. We really need a better way to deal 3499 // with optional operands like this in the matcher table. 3500 3501 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3502 // another does not. Specifically, the MOVW instruction does not. So we 3503 // special case it here and remove the defaulted (non-setting) cc_out 3504 // operand if that's the instruction we're trying to match. 3505 // 3506 // We do this as post-processing of the explicit operands rather than just 3507 // conditionally adding the cc_out in the first place because we need 3508 // to check the type of the parsed immediate operand. 3509 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3510 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3511 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3512 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3513 return true; 3514 3515 // Register-register 'add' for thumb does not have a cc_out operand 3516 // when there are only two register operands. 3517 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3518 static_cast<ARMOperand*>(Operands[3])->isReg() && 3519 static_cast<ARMOperand*>(Operands[4])->isReg() && 3520 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3521 return true; 3522 // Register-register 'add' for thumb does not have a cc_out operand 3523 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3524 // have to check the immediate range here since Thumb2 has a variant 3525 // that can handle a different range and has a cc_out operand. 3526 if (((isThumb() && Mnemonic == "add") || 3527 (isThumbTwo() && Mnemonic == "sub")) && 3528 Operands.size() == 6 && 3529 static_cast<ARMOperand*>(Operands[3])->isReg() && 3530 static_cast<ARMOperand*>(Operands[4])->isReg() && 3531 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3532 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3533 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3534 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3535 return true; 3536 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3537 // imm0_4095 variant. That's the least-preferred variant when 3538 // selecting via the generic "add" mnemonic, so to know that we 3539 // should remove the cc_out operand, we have to explicitly check that 3540 // it's not one of the other variants. Ugh. 3541 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3542 Operands.size() == 6 && 3543 static_cast<ARMOperand*>(Operands[3])->isReg() && 3544 static_cast<ARMOperand*>(Operands[4])->isReg() && 3545 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3546 // Nest conditions rather than one big 'if' statement for readability. 3547 // 3548 // If either register is a high reg, it's either one of the SP 3549 // variants (handled above) or a 32-bit encoding, so we just 3550 // check against T3. 3551 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3552 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3553 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3554 return false; 3555 // If both registers are low, we're in an IT block, and the immediate is 3556 // in range, we should use encoding T1 instead, which has a cc_out. 3557 if (inITBlock() && 3558 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3559 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3560 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3561 return false; 3562 3563 // Otherwise, we use encoding T4, which does not have a cc_out 3564 // operand. 3565 return true; 3566 } 3567 3568 // The thumb2 multiply instruction doesn't have a CCOut register, so 3569 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3570 // use the 16-bit encoding or not. 3571 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3572 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3573 static_cast<ARMOperand*>(Operands[3])->isReg() && 3574 static_cast<ARMOperand*>(Operands[4])->isReg() && 3575 static_cast<ARMOperand*>(Operands[5])->isReg() && 3576 // If the registers aren't low regs, the destination reg isn't the 3577 // same as one of the source regs, or the cc_out operand is zero 3578 // outside of an IT block, we have to use the 32-bit encoding, so 3579 // remove the cc_out operand. 3580 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3581 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 3582 !inITBlock() || 3583 (static_cast<ARMOperand*>(Operands[3])->getReg() != 3584 static_cast<ARMOperand*>(Operands[5])->getReg() && 3585 static_cast<ARMOperand*>(Operands[3])->getReg() != 3586 static_cast<ARMOperand*>(Operands[4])->getReg()))) 3587 return true; 3588 3589 3590 3591 // Register-register 'add/sub' for thumb does not have a cc_out operand 3592 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 3593 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 3594 // right, this will result in better diagnostics (which operand is off) 3595 // anyway. 3596 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 3597 (Operands.size() == 5 || Operands.size() == 6) && 3598 static_cast<ARMOperand*>(Operands[3])->isReg() && 3599 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 3600 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3601 return true; 3602 3603 return false; 3604} 3605 3606/// Parse an arm instruction mnemonic followed by its operands. 3607bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 3608 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3609 // Create the leading tokens for the mnemonic, split by '.' characters. 3610 size_t Start = 0, Next = Name.find('.'); 3611 StringRef Mnemonic = Name.slice(Start, Next); 3612 3613 // Split out the predication code and carry setting flag from the mnemonic. 3614 unsigned PredicationCode; 3615 unsigned ProcessorIMod; 3616 bool CarrySetting; 3617 StringRef ITMask; 3618 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 3619 ProcessorIMod, ITMask); 3620 3621 // In Thumb1, only the branch (B) instruction can be predicated. 3622 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 3623 Parser.EatToEndOfStatement(); 3624 return Error(NameLoc, "conditional execution not supported in Thumb1"); 3625 } 3626 3627 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 3628 3629 // Handle the IT instruction ITMask. Convert it to a bitmask. This 3630 // is the mask as it will be for the IT encoding if the conditional 3631 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 3632 // where the conditional bit0 is zero, the instruction post-processing 3633 // will adjust the mask accordingly. 3634 if (Mnemonic == "it") { 3635 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 3636 if (ITMask.size() > 3) { 3637 Parser.EatToEndOfStatement(); 3638 return Error(Loc, "too many conditions on IT instruction"); 3639 } 3640 unsigned Mask = 8; 3641 for (unsigned i = ITMask.size(); i != 0; --i) { 3642 char pos = ITMask[i - 1]; 3643 if (pos != 't' && pos != 'e') { 3644 Parser.EatToEndOfStatement(); 3645 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 3646 } 3647 Mask >>= 1; 3648 if (ITMask[i - 1] == 't') 3649 Mask |= 8; 3650 } 3651 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 3652 } 3653 3654 // FIXME: This is all a pretty gross hack. We should automatically handle 3655 // optional operands like this via tblgen. 3656 3657 // Next, add the CCOut and ConditionCode operands, if needed. 3658 // 3659 // For mnemonics which can ever incorporate a carry setting bit or predication 3660 // code, our matching model involves us always generating CCOut and 3661 // ConditionCode operands to match the mnemonic "as written" and then we let 3662 // the matcher deal with finding the right instruction or generating an 3663 // appropriate error. 3664 bool CanAcceptCarrySet, CanAcceptPredicationCode; 3665 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 3666 3667 // If we had a carry-set on an instruction that can't do that, issue an 3668 // error. 3669 if (!CanAcceptCarrySet && CarrySetting) { 3670 Parser.EatToEndOfStatement(); 3671 return Error(NameLoc, "instruction '" + Mnemonic + 3672 "' can not set flags, but 's' suffix specified"); 3673 } 3674 // If we had a predication code on an instruction that can't do that, issue an 3675 // error. 3676 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 3677 Parser.EatToEndOfStatement(); 3678 return Error(NameLoc, "instruction '" + Mnemonic + 3679 "' is not predicable, but condition code specified"); 3680 } 3681 3682 // Add the carry setting operand, if necessary. 3683 if (CanAcceptCarrySet) { 3684 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 3685 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 3686 Loc)); 3687 } 3688 3689 // Add the predication code operand, if necessary. 3690 if (CanAcceptPredicationCode) { 3691 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 3692 CarrySetting); 3693 Operands.push_back(ARMOperand::CreateCondCode( 3694 ARMCC::CondCodes(PredicationCode), Loc)); 3695 } 3696 3697 // Add the processor imod operand, if necessary. 3698 if (ProcessorIMod) { 3699 Operands.push_back(ARMOperand::CreateImm( 3700 MCConstantExpr::Create(ProcessorIMod, getContext()), 3701 NameLoc, NameLoc)); 3702 } 3703 3704 // Add the remaining tokens in the mnemonic. 3705 while (Next != StringRef::npos) { 3706 Start = Next; 3707 Next = Name.find('.', Start + 1); 3708 StringRef ExtraToken = Name.slice(Start, Next); 3709 3710 // For now, we're only parsing Thumb1 (for the most part), so 3711 // just ignore ".n" qualifiers. We'll use them to restrict 3712 // matching when we do Thumb2. 3713 if (ExtraToken != ".n") { 3714 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 3715 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 3716 } 3717 } 3718 3719 // Read the remaining operands. 3720 if (getLexer().isNot(AsmToken::EndOfStatement)) { 3721 // Read the first operand. 3722 if (parseOperand(Operands, Mnemonic)) { 3723 Parser.EatToEndOfStatement(); 3724 return true; 3725 } 3726 3727 while (getLexer().is(AsmToken::Comma)) { 3728 Parser.Lex(); // Eat the comma. 3729 3730 // Parse and remember the operand. 3731 if (parseOperand(Operands, Mnemonic)) { 3732 Parser.EatToEndOfStatement(); 3733 return true; 3734 } 3735 } 3736 } 3737 3738 if (getLexer().isNot(AsmToken::EndOfStatement)) { 3739 SMLoc Loc = getLexer().getLoc(); 3740 Parser.EatToEndOfStatement(); 3741 return Error(Loc, "unexpected token in argument list"); 3742 } 3743 3744 Parser.Lex(); // Consume the EndOfStatement 3745 3746 // Some instructions, mostly Thumb, have forms for the same mnemonic that 3747 // do and don't have a cc_out optional-def operand. With some spot-checks 3748 // of the operand list, we can figure out which variant we're trying to 3749 // parse and adjust accordingly before actually matching. We shouldn't ever 3750 // try to remove a cc_out operand that was explicitly set on the the 3751 // mnemonic, of course (CarrySetting == true). Reason number #317 the 3752 // table driven matcher doesn't fit well with the ARM instruction set. 3753 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 3754 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 3755 Operands.erase(Operands.begin() + 1); 3756 delete Op; 3757 } 3758 3759 // ARM mode 'blx' need special handling, as the register operand version 3760 // is predicable, but the label operand version is not. So, we can't rely 3761 // on the Mnemonic based checking to correctly figure out when to put 3762 // a k_CondCode operand in the list. If we're trying to match the label 3763 // version, remove the k_CondCode operand here. 3764 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 3765 static_cast<ARMOperand*>(Operands[2])->isImm()) { 3766 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 3767 Operands.erase(Operands.begin() + 1); 3768 delete Op; 3769 } 3770 3771 // The vector-compare-to-zero instructions have a literal token "#0" at 3772 // the end that comes to here as an immediate operand. Convert it to a 3773 // token to play nicely with the matcher. 3774 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 3775 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 3776 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3777 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 3778 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3779 if (CE && CE->getValue() == 0) { 3780 Operands.erase(Operands.begin() + 5); 3781 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3782 delete Op; 3783 } 3784 } 3785 // VCMP{E} does the same thing, but with a different operand count. 3786 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 3787 static_cast<ARMOperand*>(Operands[4])->isImm()) { 3788 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 3789 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3790 if (CE && CE->getValue() == 0) { 3791 Operands.erase(Operands.begin() + 4); 3792 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3793 delete Op; 3794 } 3795 } 3796 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 3797 // end. Convert it to a token here. 3798 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 3799 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3800 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 3801 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3802 if (CE && CE->getValue() == 0) { 3803 Operands.erase(Operands.begin() + 5); 3804 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3805 delete Op; 3806 } 3807 } 3808 3809 return false; 3810} 3811 3812// Validate context-sensitive operand constraints. 3813 3814// return 'true' if register list contains non-low GPR registers, 3815// 'false' otherwise. If Reg is in the register list or is HiReg, set 3816// 'containsReg' to true. 3817static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 3818 unsigned HiReg, bool &containsReg) { 3819 containsReg = false; 3820 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 3821 unsigned OpReg = Inst.getOperand(i).getReg(); 3822 if (OpReg == Reg) 3823 containsReg = true; 3824 // Anything other than a low register isn't legal here. 3825 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 3826 return true; 3827 } 3828 return false; 3829} 3830 3831// Check if the specified regisgter is in the register list of the inst, 3832// starting at the indicated operand number. 3833static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 3834 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 3835 unsigned OpReg = Inst.getOperand(i).getReg(); 3836 if (OpReg == Reg) 3837 return true; 3838 } 3839 return false; 3840} 3841 3842// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 3843// the ARMInsts array) instead. Getting that here requires awkward 3844// API changes, though. Better way? 3845namespace llvm { 3846extern MCInstrDesc ARMInsts[]; 3847} 3848static MCInstrDesc &getInstDesc(unsigned Opcode) { 3849 return ARMInsts[Opcode]; 3850} 3851 3852// FIXME: We would really like to be able to tablegen'erate this. 3853bool ARMAsmParser:: 3854validateInstruction(MCInst &Inst, 3855 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3856 MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 3857 SMLoc Loc = Operands[0]->getStartLoc(); 3858 // Check the IT block state first. 3859 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 3860 // being allowed in IT blocks, but not being predicable. It just always 3861 // executes. 3862 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 3863 unsigned bit = 1; 3864 if (ITState.FirstCond) 3865 ITState.FirstCond = false; 3866 else 3867 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 3868 // The instruction must be predicable. 3869 if (!MCID.isPredicable()) 3870 return Error(Loc, "instructions in IT block must be predicable"); 3871 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 3872 unsigned ITCond = bit ? ITState.Cond : 3873 ARMCC::getOppositeCondition(ITState.Cond); 3874 if (Cond != ITCond) { 3875 // Find the condition code Operand to get its SMLoc information. 3876 SMLoc CondLoc; 3877 for (unsigned i = 1; i < Operands.size(); ++i) 3878 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 3879 CondLoc = Operands[i]->getStartLoc(); 3880 return Error(CondLoc, "incorrect condition in IT block; got '" + 3881 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 3882 "', but expected '" + 3883 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 3884 } 3885 // Check for non-'al' condition codes outside of the IT block. 3886 } else if (isThumbTwo() && MCID.isPredicable() && 3887 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 3888 ARMCC::AL && Inst.getOpcode() != ARM::tB && 3889 Inst.getOpcode() != ARM::t2B) 3890 return Error(Loc, "predicated instructions must be in IT block"); 3891 3892 switch (Inst.getOpcode()) { 3893 case ARM::LDRD: 3894 case ARM::LDRD_PRE: 3895 case ARM::LDRD_POST: 3896 case ARM::LDREXD: { 3897 // Rt2 must be Rt + 1. 3898 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 3899 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 3900 if (Rt2 != Rt + 1) 3901 return Error(Operands[3]->getStartLoc(), 3902 "destination operands must be sequential"); 3903 return false; 3904 } 3905 case ARM::STRD: { 3906 // Rt2 must be Rt + 1. 3907 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 3908 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 3909 if (Rt2 != Rt + 1) 3910 return Error(Operands[3]->getStartLoc(), 3911 "source operands must be sequential"); 3912 return false; 3913 } 3914 case ARM::STRD_PRE: 3915 case ARM::STRD_POST: 3916 case ARM::STREXD: { 3917 // Rt2 must be Rt + 1. 3918 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 3919 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 3920 if (Rt2 != Rt + 1) 3921 return Error(Operands[3]->getStartLoc(), 3922 "source operands must be sequential"); 3923 return false; 3924 } 3925 case ARM::SBFX: 3926 case ARM::UBFX: { 3927 // width must be in range [1, 32-lsb] 3928 unsigned lsb = Inst.getOperand(2).getImm(); 3929 unsigned widthm1 = Inst.getOperand(3).getImm(); 3930 if (widthm1 >= 32 - lsb) 3931 return Error(Operands[5]->getStartLoc(), 3932 "bitfield width must be in range [1,32-lsb]"); 3933 return false; 3934 } 3935 case ARM::tLDMIA: { 3936 // If we're parsing Thumb2, the .w variant is available and handles 3937 // most cases that are normally illegal for a Thumb1 LDM 3938 // instruction. We'll make the transformation in processInstruction() 3939 // if necessary. 3940 // 3941 // Thumb LDM instructions are writeback iff the base register is not 3942 // in the register list. 3943 unsigned Rn = Inst.getOperand(0).getReg(); 3944 bool hasWritebackToken = 3945 (static_cast<ARMOperand*>(Operands[3])->isToken() && 3946 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 3947 bool listContainsBase; 3948 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 3949 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 3950 "registers must be in range r0-r7"); 3951 // If we should have writeback, then there should be a '!' token. 3952 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 3953 return Error(Operands[2]->getStartLoc(), 3954 "writeback operator '!' expected"); 3955 // If we should not have writeback, there must not be a '!'. This is 3956 // true even for the 32-bit wide encodings. 3957 if (listContainsBase && hasWritebackToken) 3958 return Error(Operands[3]->getStartLoc(), 3959 "writeback operator '!' not allowed when base register " 3960 "in register list"); 3961 3962 break; 3963 } 3964 case ARM::t2LDMIA_UPD: { 3965 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 3966 return Error(Operands[4]->getStartLoc(), 3967 "writeback operator '!' not allowed when base register " 3968 "in register list"); 3969 break; 3970 } 3971 case ARM::tPOP: { 3972 bool listContainsBase; 3973 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 3974 return Error(Operands[2]->getStartLoc(), 3975 "registers must be in range r0-r7 or pc"); 3976 break; 3977 } 3978 case ARM::tPUSH: { 3979 bool listContainsBase; 3980 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 3981 return Error(Operands[2]->getStartLoc(), 3982 "registers must be in range r0-r7 or lr"); 3983 break; 3984 } 3985 case ARM::tSTMIA_UPD: { 3986 bool listContainsBase; 3987 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 3988 return Error(Operands[4]->getStartLoc(), 3989 "registers must be in range r0-r7"); 3990 break; 3991 } 3992 } 3993 3994 return false; 3995} 3996 3997void ARMAsmParser:: 3998processInstruction(MCInst &Inst, 3999 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4000 switch (Inst.getOpcode()) { 4001 case ARM::LDMIA_UPD: 4002 // If this is a load of a single register via a 'pop', then we should use 4003 // a post-indexed LDR instruction instead, per the ARM ARM. 4004 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4005 Inst.getNumOperands() == 5) { 4006 MCInst TmpInst; 4007 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4008 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4009 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4010 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4011 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4012 TmpInst.addOperand(MCOperand::CreateImm(4)); 4013 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4014 TmpInst.addOperand(Inst.getOperand(3)); 4015 Inst = TmpInst; 4016 } 4017 break; 4018 case ARM::STMDB_UPD: 4019 // If this is a store of a single register via a 'push', then we should use 4020 // a pre-indexed STR instruction instead, per the ARM ARM. 4021 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4022 Inst.getNumOperands() == 5) { 4023 MCInst TmpInst; 4024 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4025 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4026 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4027 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4028 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4029 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4030 TmpInst.addOperand(Inst.getOperand(3)); 4031 Inst = TmpInst; 4032 } 4033 break; 4034 case ARM::tADDi8: 4035 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4036 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4037 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4038 // to encoding T1 if <Rd> is omitted." 4039 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4040 Inst.setOpcode(ARM::tADDi3); 4041 break; 4042 case ARM::tSUBi8: 4043 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4044 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4045 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4046 // to encoding T1 if <Rd> is omitted." 4047 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4048 Inst.setOpcode(ARM::tSUBi3); 4049 break; 4050 case ARM::tB: 4051 // A Thumb conditional branch outside of an IT block is a tBcc. 4052 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4053 Inst.setOpcode(ARM::tBcc); 4054 break; 4055 case ARM::t2B: 4056 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4057 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4058 Inst.setOpcode(ARM::t2Bcc); 4059 break; 4060 case ARM::t2Bcc: 4061 // If the conditional is AL or we're in an IT block, we really want t2B. 4062 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4063 Inst.setOpcode(ARM::t2B); 4064 break; 4065 case ARM::tBcc: 4066 // If the conditional is AL, we really want tB. 4067 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4068 Inst.setOpcode(ARM::tB); 4069 break; 4070 case ARM::tLDMIA: { 4071 // If the register list contains any high registers, or if the writeback 4072 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4073 // instead if we're in Thumb2. Otherwise, this should have generated 4074 // an error in validateInstruction(). 4075 unsigned Rn = Inst.getOperand(0).getReg(); 4076 bool hasWritebackToken = 4077 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4078 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4079 bool listContainsBase; 4080 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4081 (!listContainsBase && !hasWritebackToken) || 4082 (listContainsBase && hasWritebackToken)) { 4083 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4084 assert (isThumbTwo()); 4085 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4086 // If we're switching to the updating version, we need to insert 4087 // the writeback tied operand. 4088 if (hasWritebackToken) 4089 Inst.insert(Inst.begin(), 4090 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4091 } 4092 break; 4093 } 4094 case ARM::tSTMIA_UPD: { 4095 // If the register list contains any high registers, we need to use 4096 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4097 // should have generated an error in validateInstruction(). 4098 unsigned Rn = Inst.getOperand(0).getReg(); 4099 bool listContainsBase; 4100 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4101 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4102 assert (isThumbTwo()); 4103 Inst.setOpcode(ARM::t2STMIA_UPD); 4104 } 4105 break; 4106 } 4107 case ARM::t2MOVi: { 4108 // If we can use the 16-bit encoding and the user didn't explicitly 4109 // request the 32-bit variant, transform it here. 4110 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4111 Inst.getOperand(1).getImm() <= 255 && 4112 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4113 Inst.getOperand(4).getReg() == ARM::CPSR) || 4114 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4115 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4116 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4117 // The operands aren't in the same order for tMOVi8... 4118 MCInst TmpInst; 4119 TmpInst.setOpcode(ARM::tMOVi8); 4120 TmpInst.addOperand(Inst.getOperand(0)); 4121 TmpInst.addOperand(Inst.getOperand(4)); 4122 TmpInst.addOperand(Inst.getOperand(1)); 4123 TmpInst.addOperand(Inst.getOperand(2)); 4124 TmpInst.addOperand(Inst.getOperand(3)); 4125 Inst = TmpInst; 4126 } 4127 break; 4128 } 4129 case ARM::t2MOVr: { 4130 // If we can use the 16-bit encoding and the user didn't explicitly 4131 // request the 32-bit variant, transform it here. 4132 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4133 isARMLowRegister(Inst.getOperand(1).getReg()) && 4134 Inst.getOperand(2).getImm() == ARMCC::AL && 4135 Inst.getOperand(4).getReg() == ARM::CPSR && 4136 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4137 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4138 // The operands aren't the same for tMOV[S]r... (no cc_out) 4139 MCInst TmpInst; 4140 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4141 TmpInst.addOperand(Inst.getOperand(0)); 4142 TmpInst.addOperand(Inst.getOperand(1)); 4143 TmpInst.addOperand(Inst.getOperand(2)); 4144 TmpInst.addOperand(Inst.getOperand(3)); 4145 Inst = TmpInst; 4146 } 4147 break; 4148 } 4149 case ARM::t2SXTH: 4150 case ARM::t2SXTB: 4151 case ARM::t2UXTH: 4152 case ARM::t2UXTB: { 4153 // If we can use the 16-bit encoding and the user didn't explicitly 4154 // request the 32-bit variant, transform it here. 4155 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4156 isARMLowRegister(Inst.getOperand(1).getReg()) && 4157 Inst.getOperand(2).getImm() == 0 && 4158 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4159 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4160 unsigned NewOpc; 4161 switch (Inst.getOpcode()) { 4162 default: llvm_unreachable("Illegal opcode!"); 4163 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4164 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4165 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4166 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4167 } 4168 // The operands aren't the same for thumb1 (no rotate operand). 4169 MCInst TmpInst; 4170 TmpInst.setOpcode(NewOpc); 4171 TmpInst.addOperand(Inst.getOperand(0)); 4172 TmpInst.addOperand(Inst.getOperand(1)); 4173 TmpInst.addOperand(Inst.getOperand(3)); 4174 TmpInst.addOperand(Inst.getOperand(4)); 4175 Inst = TmpInst; 4176 } 4177 break; 4178 } 4179 case ARM::t2IT: { 4180 // The mask bits for all but the first condition are represented as 4181 // the low bit of the condition code value implies 't'. We currently 4182 // always have 1 implies 't', so XOR toggle the bits if the low bit 4183 // of the condition code is zero. The encoding also expects the low 4184 // bit of the condition to be encoded as bit 4 of the mask operand, 4185 // so mask that in if needed 4186 MCOperand &MO = Inst.getOperand(1); 4187 unsigned Mask = MO.getImm(); 4188 unsigned OrigMask = Mask; 4189 unsigned TZ = CountTrailingZeros_32(Mask); 4190 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4191 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4192 for (unsigned i = 3; i != TZ; --i) 4193 Mask ^= 1 << i; 4194 } else 4195 Mask |= 0x10; 4196 MO.setImm(Mask); 4197 4198 // Set up the IT block state according to the IT instruction we just 4199 // matched. 4200 assert(!inITBlock() && "nested IT blocks?!"); 4201 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4202 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4203 ITState.CurPosition = 0; 4204 ITState.FirstCond = true; 4205 break; 4206 } 4207 } 4208} 4209 4210unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4211 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4212 // suffix depending on whether they're in an IT block or not. 4213 unsigned Opc = Inst.getOpcode(); 4214 MCInstrDesc &MCID = getInstDesc(Opc); 4215 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4216 assert(MCID.hasOptionalDef() && 4217 "optionally flag setting instruction missing optional def operand"); 4218 assert(MCID.NumOperands == Inst.getNumOperands() && 4219 "operand count mismatch!"); 4220 // Find the optional-def operand (cc_out). 4221 unsigned OpNo; 4222 for (OpNo = 0; 4223 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4224 ++OpNo) 4225 ; 4226 // If we're parsing Thumb1, reject it completely. 4227 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4228 return Match_MnemonicFail; 4229 // If we're parsing Thumb2, which form is legal depends on whether we're 4230 // in an IT block. 4231 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4232 !inITBlock()) 4233 return Match_RequiresITBlock; 4234 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4235 inITBlock()) 4236 return Match_RequiresNotITBlock; 4237 } 4238 // Some high-register supporting Thumb1 encodings only allow both registers 4239 // to be from r0-r7 when in Thumb2. 4240 else if (Opc == ARM::tADDhirr && isThumbOne() && 4241 isARMLowRegister(Inst.getOperand(1).getReg()) && 4242 isARMLowRegister(Inst.getOperand(2).getReg())) 4243 return Match_RequiresThumb2; 4244 // Others only require ARMv6 or later. 4245 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4246 isARMLowRegister(Inst.getOperand(0).getReg()) && 4247 isARMLowRegister(Inst.getOperand(1).getReg())) 4248 return Match_RequiresV6; 4249 return Match_Success; 4250} 4251 4252bool ARMAsmParser:: 4253MatchAndEmitInstruction(SMLoc IDLoc, 4254 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4255 MCStreamer &Out) { 4256 MCInst Inst; 4257 unsigned ErrorInfo; 4258 unsigned MatchResult; 4259 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4260 switch (MatchResult) { 4261 default: break; 4262 case Match_Success: 4263 // Context sensitive operand constraints aren't handled by the matcher, 4264 // so check them here. 4265 if (validateInstruction(Inst, Operands)) { 4266 // Still progress the IT block, otherwise one wrong condition causes 4267 // nasty cascading errors. 4268 forwardITPosition(); 4269 return true; 4270 } 4271 4272 // Some instructions need post-processing to, for example, tweak which 4273 // encoding is selected. 4274 processInstruction(Inst, Operands); 4275 4276 // Only move forward at the very end so that everything in validate 4277 // and process gets a consistent answer about whether we're in an IT 4278 // block. 4279 forwardITPosition(); 4280 4281 Out.EmitInstruction(Inst); 4282 return false; 4283 case Match_MissingFeature: 4284 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4285 return true; 4286 case Match_InvalidOperand: { 4287 SMLoc ErrorLoc = IDLoc; 4288 if (ErrorInfo != ~0U) { 4289 if (ErrorInfo >= Operands.size()) 4290 return Error(IDLoc, "too few operands for instruction"); 4291 4292 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4293 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4294 } 4295 4296 return Error(ErrorLoc, "invalid operand for instruction"); 4297 } 4298 case Match_MnemonicFail: 4299 return Error(IDLoc, "invalid instruction"); 4300 case Match_ConversionFail: 4301 // The converter function will have already emited a diagnostic. 4302 return true; 4303 case Match_RequiresNotITBlock: 4304 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4305 case Match_RequiresITBlock: 4306 return Error(IDLoc, "instruction only valid inside IT block"); 4307 case Match_RequiresV6: 4308 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4309 case Match_RequiresThumb2: 4310 return Error(IDLoc, "instruction variant requires Thumb2"); 4311 } 4312 4313 llvm_unreachable("Implement any new match types added!"); 4314 return true; 4315} 4316 4317/// parseDirective parses the arm specific directives 4318bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4319 StringRef IDVal = DirectiveID.getIdentifier(); 4320 if (IDVal == ".word") 4321 return parseDirectiveWord(4, DirectiveID.getLoc()); 4322 else if (IDVal == ".thumb") 4323 return parseDirectiveThumb(DirectiveID.getLoc()); 4324 else if (IDVal == ".thumb_func") 4325 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4326 else if (IDVal == ".code") 4327 return parseDirectiveCode(DirectiveID.getLoc()); 4328 else if (IDVal == ".syntax") 4329 return parseDirectiveSyntax(DirectiveID.getLoc()); 4330 return true; 4331} 4332 4333/// parseDirectiveWord 4334/// ::= .word [ expression (, expression)* ] 4335bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4336 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4337 for (;;) { 4338 const MCExpr *Value; 4339 if (getParser().ParseExpression(Value)) 4340 return true; 4341 4342 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4343 4344 if (getLexer().is(AsmToken::EndOfStatement)) 4345 break; 4346 4347 // FIXME: Improve diagnostic. 4348 if (getLexer().isNot(AsmToken::Comma)) 4349 return Error(L, "unexpected token in directive"); 4350 Parser.Lex(); 4351 } 4352 } 4353 4354 Parser.Lex(); 4355 return false; 4356} 4357 4358/// parseDirectiveThumb 4359/// ::= .thumb 4360bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4361 if (getLexer().isNot(AsmToken::EndOfStatement)) 4362 return Error(L, "unexpected token in directive"); 4363 Parser.Lex(); 4364 4365 // TODO: set thumb mode 4366 // TODO: tell the MC streamer the mode 4367 // getParser().getStreamer().Emit???(); 4368 return false; 4369} 4370 4371/// parseDirectiveThumbFunc 4372/// ::= .thumbfunc symbol_name 4373bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4374 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4375 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4376 StringRef Name; 4377 4378 // Darwin asm has function name after .thumb_func direction 4379 // ELF doesn't 4380 if (isMachO) { 4381 const AsmToken &Tok = Parser.getTok(); 4382 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4383 return Error(L, "unexpected token in .thumb_func directive"); 4384 Name = Tok.getString(); 4385 Parser.Lex(); // Consume the identifier token. 4386 } 4387 4388 if (getLexer().isNot(AsmToken::EndOfStatement)) 4389 return Error(L, "unexpected token in directive"); 4390 Parser.Lex(); 4391 4392 // FIXME: assuming function name will be the line following .thumb_func 4393 if (!isMachO) { 4394 Name = Parser.getTok().getString(); 4395 } 4396 4397 // Mark symbol as a thumb symbol. 4398 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4399 getParser().getStreamer().EmitThumbFunc(Func); 4400 return false; 4401} 4402 4403/// parseDirectiveSyntax 4404/// ::= .syntax unified | divided 4405bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4406 const AsmToken &Tok = Parser.getTok(); 4407 if (Tok.isNot(AsmToken::Identifier)) 4408 return Error(L, "unexpected token in .syntax directive"); 4409 StringRef Mode = Tok.getString(); 4410 if (Mode == "unified" || Mode == "UNIFIED") 4411 Parser.Lex(); 4412 else if (Mode == "divided" || Mode == "DIVIDED") 4413 return Error(L, "'.syntax divided' arm asssembly not supported"); 4414 else 4415 return Error(L, "unrecognized syntax mode in .syntax directive"); 4416 4417 if (getLexer().isNot(AsmToken::EndOfStatement)) 4418 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4419 Parser.Lex(); 4420 4421 // TODO tell the MC streamer the mode 4422 // getParser().getStreamer().Emit???(); 4423 return false; 4424} 4425 4426/// parseDirectiveCode 4427/// ::= .code 16 | 32 4428bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4429 const AsmToken &Tok = Parser.getTok(); 4430 if (Tok.isNot(AsmToken::Integer)) 4431 return Error(L, "unexpected token in .code directive"); 4432 int64_t Val = Parser.getTok().getIntVal(); 4433 if (Val == 16) 4434 Parser.Lex(); 4435 else if (Val == 32) 4436 Parser.Lex(); 4437 else 4438 return Error(L, "invalid operand to .code directive"); 4439 4440 if (getLexer().isNot(AsmToken::EndOfStatement)) 4441 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4442 Parser.Lex(); 4443 4444 if (Val == 16) { 4445 if (!isThumb()) 4446 SwitchMode(); 4447 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4448 } else { 4449 if (isThumb()) 4450 SwitchMode(); 4451 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4452 } 4453 4454 return false; 4455} 4456 4457extern "C" void LLVMInitializeARMAsmLexer(); 4458 4459/// Force static initialization. 4460extern "C" void LLVMInitializeARMAsmParser() { 4461 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4462 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4463 LLVMInitializeARMAsmLexer(); 4464} 4465 4466#define GET_REGISTER_MATCHER 4467#define GET_MATCHER_IMPLEMENTATION 4468#include "ARMGenAsmMatcher.inc" 4469