ARMAsmParser.cpp revision a39cda7aff2d379ad9c15500319ab037baa48747
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 43 44class ARMAsmParser : public MCTargetAsmParser { 45 MCSubtargetInfo &STI; 46 MCAsmParser &Parser; 47 48 // Map of register aliases registers via the .req directive. 49 StringMap<unsigned> RegisterReqs; 50 51 struct { 52 ARMCC::CondCodes Cond; // Condition for IT block. 53 unsigned Mask:4; // Condition mask for instructions. 54 // Starting at first 1 (from lsb). 55 // '1' condition as indicated in IT. 56 // '0' inverse of condition (else). 57 // Count of instructions in IT block is 58 // 4 - trailingzeroes(mask) 59 60 bool FirstCond; // Explicit flag for when we're parsing the 61 // First instruction in the IT block. It's 62 // implied in the mask, so needs special 63 // handling. 64 65 unsigned CurPosition; // Current position in parsing of IT 66 // block. In range [0,3]. Initialized 67 // according to count of instructions in block. 68 // ~0U if no active IT block. 69 } ITState; 70 bool inITBlock() { return ITState.CurPosition != ~0U;} 71 void forwardITPosition() { 72 if (!inITBlock()) return; 73 // Move to the next instruction in the IT block, if there is one. If not, 74 // mark the block as done. 75 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 76 if (++ITState.CurPosition == 5 - TZ) 77 ITState.CurPosition = ~0U; // Done with the IT block after this. 78 } 79 80 81 MCAsmParser &getParser() const { return Parser; } 82 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 83 84 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 85 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 86 87 int tryParseRegister(); 88 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 89 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 90 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 91 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 92 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 93 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 94 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 95 unsigned &ShiftAmount); 96 bool parseDirectiveWord(unsigned Size, SMLoc L); 97 bool parseDirectiveThumb(SMLoc L); 98 bool parseDirectiveARM(SMLoc L); 99 bool parseDirectiveThumbFunc(SMLoc L); 100 bool parseDirectiveCode(SMLoc L); 101 bool parseDirectiveSyntax(SMLoc L); 102 bool parseDirectiveReq(StringRef Name, SMLoc L); 103 bool parseDirectiveUnreq(SMLoc L); 104 105 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 106 bool &CarrySetting, unsigned &ProcessorIMod, 107 StringRef &ITMask); 108 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 109 bool &CanAcceptPredicationCode); 110 111 bool isThumb() const { 112 // FIXME: Can tablegen auto-generate this? 113 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 114 } 115 bool isThumbOne() const { 116 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 117 } 118 bool isThumbTwo() const { 119 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 120 } 121 bool hasV6Ops() const { 122 return STI.getFeatureBits() & ARM::HasV6Ops; 123 } 124 bool hasV7Ops() const { 125 return STI.getFeatureBits() & ARM::HasV7Ops; 126 } 127 void SwitchMode() { 128 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 129 setAvailableFeatures(FB); 130 } 131 bool isMClass() const { 132 return STI.getFeatureBits() & ARM::FeatureMClass; 133 } 134 135 /// @name Auto-generated Match Functions 136 /// { 137 138#define GET_ASSEMBLER_HEADER 139#include "ARMGenAsmMatcher.inc" 140 141 /// } 142 143 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 144 OperandMatchResultTy parseCoprocNumOperand( 145 SmallVectorImpl<MCParsedAsmOperand*>&); 146 OperandMatchResultTy parseCoprocRegOperand( 147 SmallVectorImpl<MCParsedAsmOperand*>&); 148 OperandMatchResultTy parseCoprocOptionOperand( 149 SmallVectorImpl<MCParsedAsmOperand*>&); 150 OperandMatchResultTy parseMemBarrierOptOperand( 151 SmallVectorImpl<MCParsedAsmOperand*>&); 152 OperandMatchResultTy parseProcIFlagsOperand( 153 SmallVectorImpl<MCParsedAsmOperand*>&); 154 OperandMatchResultTy parseMSRMaskOperand( 155 SmallVectorImpl<MCParsedAsmOperand*>&); 156 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 157 StringRef Op, int Low, int High); 158 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 159 return parsePKHImm(O, "lsl", 0, 31); 160 } 161 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 162 return parsePKHImm(O, "asr", 1, 32); 163 } 164 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 165 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 166 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 167 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 168 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 169 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 170 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 171 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 172 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index); 173 174 // Asm Match Converter Methods 175 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 202 const SmallVectorImpl<MCParsedAsmOperand*> &); 203 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 204 const SmallVectorImpl<MCParsedAsmOperand*> &); 205 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 206 const SmallVectorImpl<MCParsedAsmOperand*> &); 207 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 208 const SmallVectorImpl<MCParsedAsmOperand*> &); 209 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 210 const SmallVectorImpl<MCParsedAsmOperand*> &); 211 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 212 const SmallVectorImpl<MCParsedAsmOperand*> &); 213 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 214 const SmallVectorImpl<MCParsedAsmOperand*> &); 215 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 216 const SmallVectorImpl<MCParsedAsmOperand*> &); 217 218 bool validateInstruction(MCInst &Inst, 219 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 220 bool processInstruction(MCInst &Inst, 221 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 222 bool shouldOmitCCOutOperand(StringRef Mnemonic, 223 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 224 225public: 226 enum ARMMatchResultTy { 227 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 228 Match_RequiresNotITBlock, 229 Match_RequiresV6, 230 Match_RequiresThumb2 231 }; 232 233 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 234 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 235 MCAsmParserExtension::Initialize(_Parser); 236 237 // Initialize the set of available features. 238 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 239 240 // Not in an ITBlock to start with. 241 ITState.CurPosition = ~0U; 242 } 243 244 // Implementation of the MCTargetAsmParser interface: 245 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 246 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 247 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 248 bool ParseDirective(AsmToken DirectiveID); 249 250 unsigned checkTargetMatchPredicate(MCInst &Inst); 251 252 bool MatchAndEmitInstruction(SMLoc IDLoc, 253 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 254 MCStreamer &Out); 255}; 256} // end anonymous namespace 257 258namespace { 259 260/// ARMOperand - Instances of this class represent a parsed ARM machine 261/// instruction. 262class ARMOperand : public MCParsedAsmOperand { 263 enum KindTy { 264 k_CondCode, 265 k_CCOut, 266 k_ITCondMask, 267 k_CoprocNum, 268 k_CoprocReg, 269 k_CoprocOption, 270 k_Immediate, 271 k_FPImmediate, 272 k_MemBarrierOpt, 273 k_Memory, 274 k_PostIndexRegister, 275 k_MSRMask, 276 k_ProcIFlags, 277 k_VectorIndex, 278 k_Register, 279 k_RegisterList, 280 k_DPRRegisterList, 281 k_SPRRegisterList, 282 k_VectorList, 283 k_VectorListAllLanes, 284 k_VectorListIndexed, 285 k_ShiftedRegister, 286 k_ShiftedImmediate, 287 k_ShifterImmediate, 288 k_RotateImmediate, 289 k_BitfieldDescriptor, 290 k_Token 291 } Kind; 292 293 SMLoc StartLoc, EndLoc; 294 SmallVector<unsigned, 8> Registers; 295 296 union { 297 struct { 298 ARMCC::CondCodes Val; 299 } CC; 300 301 struct { 302 unsigned Val; 303 } Cop; 304 305 struct { 306 unsigned Val; 307 } CoprocOption; 308 309 struct { 310 unsigned Mask:4; 311 } ITMask; 312 313 struct { 314 ARM_MB::MemBOpt Val; 315 } MBOpt; 316 317 struct { 318 ARM_PROC::IFlags Val; 319 } IFlags; 320 321 struct { 322 unsigned Val; 323 } MMask; 324 325 struct { 326 const char *Data; 327 unsigned Length; 328 } Tok; 329 330 struct { 331 unsigned RegNum; 332 } Reg; 333 334 // A vector register list is a sequential list of 1 to 4 registers. 335 struct { 336 unsigned RegNum; 337 unsigned Count; 338 unsigned LaneIndex; 339 } VectorList; 340 341 struct { 342 unsigned Val; 343 } VectorIndex; 344 345 struct { 346 const MCExpr *Val; 347 } Imm; 348 349 struct { 350 unsigned Val; // encoded 8-bit representation 351 } FPImm; 352 353 /// Combined record for all forms of ARM address expressions. 354 struct { 355 unsigned BaseRegNum; 356 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 357 // was specified. 358 const MCConstantExpr *OffsetImm; // Offset immediate value 359 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 360 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 361 unsigned ShiftImm; // shift for OffsetReg. 362 unsigned Alignment; // 0 = no alignment specified 363 // n = alignment in bytes (8, 16, or 32) 364 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 365 } Memory; 366 367 struct { 368 unsigned RegNum; 369 bool isAdd; 370 ARM_AM::ShiftOpc ShiftTy; 371 unsigned ShiftImm; 372 } PostIdxReg; 373 374 struct { 375 bool isASR; 376 unsigned Imm; 377 } ShifterImm; 378 struct { 379 ARM_AM::ShiftOpc ShiftTy; 380 unsigned SrcReg; 381 unsigned ShiftReg; 382 unsigned ShiftImm; 383 } RegShiftedReg; 384 struct { 385 ARM_AM::ShiftOpc ShiftTy; 386 unsigned SrcReg; 387 unsigned ShiftImm; 388 } RegShiftedImm; 389 struct { 390 unsigned Imm; 391 } RotImm; 392 struct { 393 unsigned LSB; 394 unsigned Width; 395 } Bitfield; 396 }; 397 398 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 399public: 400 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 401 Kind = o.Kind; 402 StartLoc = o.StartLoc; 403 EndLoc = o.EndLoc; 404 switch (Kind) { 405 case k_CondCode: 406 CC = o.CC; 407 break; 408 case k_ITCondMask: 409 ITMask = o.ITMask; 410 break; 411 case k_Token: 412 Tok = o.Tok; 413 break; 414 case k_CCOut: 415 case k_Register: 416 Reg = o.Reg; 417 break; 418 case k_RegisterList: 419 case k_DPRRegisterList: 420 case k_SPRRegisterList: 421 Registers = o.Registers; 422 break; 423 case k_VectorList: 424 case k_VectorListAllLanes: 425 case k_VectorListIndexed: 426 VectorList = o.VectorList; 427 break; 428 case k_CoprocNum: 429 case k_CoprocReg: 430 Cop = o.Cop; 431 break; 432 case k_CoprocOption: 433 CoprocOption = o.CoprocOption; 434 break; 435 case k_Immediate: 436 Imm = o.Imm; 437 break; 438 case k_FPImmediate: 439 FPImm = o.FPImm; 440 break; 441 case k_MemBarrierOpt: 442 MBOpt = o.MBOpt; 443 break; 444 case k_Memory: 445 Memory = o.Memory; 446 break; 447 case k_PostIndexRegister: 448 PostIdxReg = o.PostIdxReg; 449 break; 450 case k_MSRMask: 451 MMask = o.MMask; 452 break; 453 case k_ProcIFlags: 454 IFlags = o.IFlags; 455 break; 456 case k_ShifterImmediate: 457 ShifterImm = o.ShifterImm; 458 break; 459 case k_ShiftedRegister: 460 RegShiftedReg = o.RegShiftedReg; 461 break; 462 case k_ShiftedImmediate: 463 RegShiftedImm = o.RegShiftedImm; 464 break; 465 case k_RotateImmediate: 466 RotImm = o.RotImm; 467 break; 468 case k_BitfieldDescriptor: 469 Bitfield = o.Bitfield; 470 break; 471 case k_VectorIndex: 472 VectorIndex = o.VectorIndex; 473 break; 474 } 475 } 476 477 /// getStartLoc - Get the location of the first token of this operand. 478 SMLoc getStartLoc() const { return StartLoc; } 479 /// getEndLoc - Get the location of the last token of this operand. 480 SMLoc getEndLoc() const { return EndLoc; } 481 482 ARMCC::CondCodes getCondCode() const { 483 assert(Kind == k_CondCode && "Invalid access!"); 484 return CC.Val; 485 } 486 487 unsigned getCoproc() const { 488 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 489 return Cop.Val; 490 } 491 492 StringRef getToken() const { 493 assert(Kind == k_Token && "Invalid access!"); 494 return StringRef(Tok.Data, Tok.Length); 495 } 496 497 unsigned getReg() const { 498 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 499 return Reg.RegNum; 500 } 501 502 const SmallVectorImpl<unsigned> &getRegList() const { 503 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 504 Kind == k_SPRRegisterList) && "Invalid access!"); 505 return Registers; 506 } 507 508 const MCExpr *getImm() const { 509 assert(Kind == k_Immediate && "Invalid access!"); 510 return Imm.Val; 511 } 512 513 unsigned getFPImm() const { 514 assert(Kind == k_FPImmediate && "Invalid access!"); 515 return FPImm.Val; 516 } 517 518 unsigned getVectorIndex() const { 519 assert(Kind == k_VectorIndex && "Invalid access!"); 520 return VectorIndex.Val; 521 } 522 523 ARM_MB::MemBOpt getMemBarrierOpt() const { 524 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 525 return MBOpt.Val; 526 } 527 528 ARM_PROC::IFlags getProcIFlags() const { 529 assert(Kind == k_ProcIFlags && "Invalid access!"); 530 return IFlags.Val; 531 } 532 533 unsigned getMSRMask() const { 534 assert(Kind == k_MSRMask && "Invalid access!"); 535 return MMask.Val; 536 } 537 538 bool isCoprocNum() const { return Kind == k_CoprocNum; } 539 bool isCoprocReg() const { return Kind == k_CoprocReg; } 540 bool isCoprocOption() const { return Kind == k_CoprocOption; } 541 bool isCondCode() const { return Kind == k_CondCode; } 542 bool isCCOut() const { return Kind == k_CCOut; } 543 bool isITMask() const { return Kind == k_ITCondMask; } 544 bool isITCondCode() const { return Kind == k_CondCode; } 545 bool isImm() const { return Kind == k_Immediate; } 546 bool isFPImm() const { return Kind == k_FPImmediate; } 547 bool isImm8s4() const { 548 if (Kind != k_Immediate) 549 return false; 550 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 551 if (!CE) return false; 552 int64_t Value = CE->getValue(); 553 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 554 } 555 bool isImm0_1020s4() const { 556 if (Kind != k_Immediate) 557 return false; 558 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 559 if (!CE) return false; 560 int64_t Value = CE->getValue(); 561 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 562 } 563 bool isImm0_508s4() const { 564 if (Kind != k_Immediate) 565 return false; 566 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 567 if (!CE) return false; 568 int64_t Value = CE->getValue(); 569 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 570 } 571 bool isImm0_255() const { 572 if (Kind != k_Immediate) 573 return false; 574 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 575 if (!CE) return false; 576 int64_t Value = CE->getValue(); 577 return Value >= 0 && Value < 256; 578 } 579 bool isImm0_1() const { 580 if (Kind != k_Immediate) 581 return false; 582 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 583 if (!CE) return false; 584 int64_t Value = CE->getValue(); 585 return Value >= 0 && Value < 2; 586 } 587 bool isImm0_3() const { 588 if (Kind != k_Immediate) 589 return false; 590 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 591 if (!CE) return false; 592 int64_t Value = CE->getValue(); 593 return Value >= 0 && Value < 4; 594 } 595 bool isImm0_7() const { 596 if (Kind != k_Immediate) 597 return false; 598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 599 if (!CE) return false; 600 int64_t Value = CE->getValue(); 601 return Value >= 0 && Value < 8; 602 } 603 bool isImm0_15() const { 604 if (Kind != k_Immediate) 605 return false; 606 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 607 if (!CE) return false; 608 int64_t Value = CE->getValue(); 609 return Value >= 0 && Value < 16; 610 } 611 bool isImm0_31() const { 612 if (Kind != k_Immediate) 613 return false; 614 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 615 if (!CE) return false; 616 int64_t Value = CE->getValue(); 617 return Value >= 0 && Value < 32; 618 } 619 bool isImm0_63() const { 620 if (Kind != k_Immediate) 621 return false; 622 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 623 if (!CE) return false; 624 int64_t Value = CE->getValue(); 625 return Value >= 0 && Value < 64; 626 } 627 bool isImm8() const { 628 if (Kind != k_Immediate) 629 return false; 630 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 631 if (!CE) return false; 632 int64_t Value = CE->getValue(); 633 return Value == 8; 634 } 635 bool isImm16() const { 636 if (Kind != k_Immediate) 637 return false; 638 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 639 if (!CE) return false; 640 int64_t Value = CE->getValue(); 641 return Value == 16; 642 } 643 bool isImm32() const { 644 if (Kind != k_Immediate) 645 return false; 646 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 647 if (!CE) return false; 648 int64_t Value = CE->getValue(); 649 return Value == 32; 650 } 651 bool isShrImm8() const { 652 if (Kind != k_Immediate) 653 return false; 654 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 655 if (!CE) return false; 656 int64_t Value = CE->getValue(); 657 return Value > 0 && Value <= 8; 658 } 659 bool isShrImm16() const { 660 if (Kind != k_Immediate) 661 return false; 662 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 663 if (!CE) return false; 664 int64_t Value = CE->getValue(); 665 return Value > 0 && Value <= 16; 666 } 667 bool isShrImm32() const { 668 if (Kind != k_Immediate) 669 return false; 670 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 671 if (!CE) return false; 672 int64_t Value = CE->getValue(); 673 return Value > 0 && Value <= 32; 674 } 675 bool isShrImm64() const { 676 if (Kind != k_Immediate) 677 return false; 678 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 679 if (!CE) return false; 680 int64_t Value = CE->getValue(); 681 return Value > 0 && Value <= 64; 682 } 683 bool isImm1_7() const { 684 if (Kind != k_Immediate) 685 return false; 686 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 687 if (!CE) return false; 688 int64_t Value = CE->getValue(); 689 return Value > 0 && Value < 8; 690 } 691 bool isImm1_15() const { 692 if (Kind != k_Immediate) 693 return false; 694 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 695 if (!CE) return false; 696 int64_t Value = CE->getValue(); 697 return Value > 0 && Value < 16; 698 } 699 bool isImm1_31() const { 700 if (Kind != k_Immediate) 701 return false; 702 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 703 if (!CE) return false; 704 int64_t Value = CE->getValue(); 705 return Value > 0 && Value < 32; 706 } 707 bool isImm1_16() const { 708 if (Kind != k_Immediate) 709 return false; 710 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 711 if (!CE) return false; 712 int64_t Value = CE->getValue(); 713 return Value > 0 && Value < 17; 714 } 715 bool isImm1_32() const { 716 if (Kind != k_Immediate) 717 return false; 718 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 719 if (!CE) return false; 720 int64_t Value = CE->getValue(); 721 return Value > 0 && Value < 33; 722 } 723 bool isImm0_32() const { 724 if (Kind != k_Immediate) 725 return false; 726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 727 if (!CE) return false; 728 int64_t Value = CE->getValue(); 729 return Value >= 0 && Value < 33; 730 } 731 bool isImm0_65535() const { 732 if (Kind != k_Immediate) 733 return false; 734 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 735 if (!CE) return false; 736 int64_t Value = CE->getValue(); 737 return Value >= 0 && Value < 65536; 738 } 739 bool isImm0_65535Expr() const { 740 if (Kind != k_Immediate) 741 return false; 742 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 743 // If it's not a constant expression, it'll generate a fixup and be 744 // handled later. 745 if (!CE) return true; 746 int64_t Value = CE->getValue(); 747 return Value >= 0 && Value < 65536; 748 } 749 bool isImm24bit() const { 750 if (Kind != k_Immediate) 751 return false; 752 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 753 if (!CE) return false; 754 int64_t Value = CE->getValue(); 755 return Value >= 0 && Value <= 0xffffff; 756 } 757 bool isImmThumbSR() const { 758 if (Kind != k_Immediate) 759 return false; 760 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 761 if (!CE) return false; 762 int64_t Value = CE->getValue(); 763 return Value > 0 && Value < 33; 764 } 765 bool isPKHLSLImm() const { 766 if (Kind != k_Immediate) 767 return false; 768 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 769 if (!CE) return false; 770 int64_t Value = CE->getValue(); 771 return Value >= 0 && Value < 32; 772 } 773 bool isPKHASRImm() const { 774 if (Kind != k_Immediate) 775 return false; 776 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 777 if (!CE) return false; 778 int64_t Value = CE->getValue(); 779 return Value > 0 && Value <= 32; 780 } 781 bool isARMSOImm() const { 782 if (Kind != k_Immediate) 783 return false; 784 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 785 if (!CE) return false; 786 int64_t Value = CE->getValue(); 787 return ARM_AM::getSOImmVal(Value) != -1; 788 } 789 bool isARMSOImmNot() const { 790 if (Kind != k_Immediate) 791 return false; 792 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 793 if (!CE) return false; 794 int64_t Value = CE->getValue(); 795 return ARM_AM::getSOImmVal(~Value) != -1; 796 } 797 bool isARMSOImmNeg() const { 798 if (Kind != k_Immediate) 799 return false; 800 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 801 if (!CE) return false; 802 int64_t Value = CE->getValue(); 803 return ARM_AM::getSOImmVal(-Value) != -1; 804 } 805 bool isT2SOImm() const { 806 if (Kind != k_Immediate) 807 return false; 808 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 809 if (!CE) return false; 810 int64_t Value = CE->getValue(); 811 return ARM_AM::getT2SOImmVal(Value) != -1; 812 } 813 bool isT2SOImmNot() const { 814 if (Kind != k_Immediate) 815 return false; 816 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 817 if (!CE) return false; 818 int64_t Value = CE->getValue(); 819 return ARM_AM::getT2SOImmVal(~Value) != -1; 820 } 821 bool isT2SOImmNeg() const { 822 if (Kind != k_Immediate) 823 return false; 824 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 825 if (!CE) return false; 826 int64_t Value = CE->getValue(); 827 return ARM_AM::getT2SOImmVal(-Value) != -1; 828 } 829 bool isSetEndImm() const { 830 if (Kind != k_Immediate) 831 return false; 832 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 833 if (!CE) return false; 834 int64_t Value = CE->getValue(); 835 return Value == 1 || Value == 0; 836 } 837 bool isReg() const { return Kind == k_Register; } 838 bool isRegList() const { return Kind == k_RegisterList; } 839 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 840 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 841 bool isToken() const { return Kind == k_Token; } 842 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 843 bool isMemory() const { return Kind == k_Memory; } 844 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 845 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 846 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 847 bool isRotImm() const { return Kind == k_RotateImmediate; } 848 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 849 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 850 bool isPostIdxReg() const { 851 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 852 } 853 bool isMemNoOffset(bool alignOK = false) const { 854 if (!isMemory()) 855 return false; 856 // No offset of any kind. 857 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 858 (alignOK || Memory.Alignment == 0); 859 } 860 bool isAlignedMemory() const { 861 return isMemNoOffset(true); 862 } 863 bool isAddrMode2() const { 864 if (!isMemory() || Memory.Alignment != 0) return false; 865 // Check for register offset. 866 if (Memory.OffsetRegNum) return true; 867 // Immediate offset in range [-4095, 4095]. 868 if (!Memory.OffsetImm) return true; 869 int64_t Val = Memory.OffsetImm->getValue(); 870 return Val > -4096 && Val < 4096; 871 } 872 bool isAM2OffsetImm() const { 873 if (Kind != k_Immediate) 874 return false; 875 // Immediate offset in range [-4095, 4095]. 876 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 877 if (!CE) return false; 878 int64_t Val = CE->getValue(); 879 return Val > -4096 && Val < 4096; 880 } 881 bool isAddrMode3() const { 882 if (!isMemory() || Memory.Alignment != 0) return false; 883 // No shifts are legal for AM3. 884 if (Memory.ShiftType != ARM_AM::no_shift) return false; 885 // Check for register offset. 886 if (Memory.OffsetRegNum) return true; 887 // Immediate offset in range [-255, 255]. 888 if (!Memory.OffsetImm) return true; 889 int64_t Val = Memory.OffsetImm->getValue(); 890 return Val > -256 && Val < 256; 891 } 892 bool isAM3Offset() const { 893 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 894 return false; 895 if (Kind == k_PostIndexRegister) 896 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 897 // Immediate offset in range [-255, 255]. 898 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 899 if (!CE) return false; 900 int64_t Val = CE->getValue(); 901 // Special case, #-0 is INT32_MIN. 902 return (Val > -256 && Val < 256) || Val == INT32_MIN; 903 } 904 bool isAddrMode5() const { 905 // If we have an immediate that's not a constant, treat it as a label 906 // reference needing a fixup. If it is a constant, it's something else 907 // and we reject it. 908 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 909 return true; 910 if (!isMemory() || Memory.Alignment != 0) return false; 911 // Check for register offset. 912 if (Memory.OffsetRegNum) return false; 913 // Immediate offset in range [-1020, 1020] and a multiple of 4. 914 if (!Memory.OffsetImm) return true; 915 int64_t Val = Memory.OffsetImm->getValue(); 916 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 917 Val == INT32_MIN; 918 } 919 bool isMemTBB() const { 920 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 921 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 922 return false; 923 return true; 924 } 925 bool isMemTBH() const { 926 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 927 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 928 Memory.Alignment != 0 ) 929 return false; 930 return true; 931 } 932 bool isMemRegOffset() const { 933 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 934 return false; 935 return true; 936 } 937 bool isT2MemRegOffset() const { 938 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 939 Memory.Alignment != 0) 940 return false; 941 // Only lsl #{0, 1, 2, 3} allowed. 942 if (Memory.ShiftType == ARM_AM::no_shift) 943 return true; 944 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 945 return false; 946 return true; 947 } 948 bool isMemThumbRR() const { 949 // Thumb reg+reg addressing is simple. Just two registers, a base and 950 // an offset. No shifts, negations or any other complicating factors. 951 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 952 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 953 return false; 954 return isARMLowRegister(Memory.BaseRegNum) && 955 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 956 } 957 bool isMemThumbRIs4() const { 958 if (!isMemory() || Memory.OffsetRegNum != 0 || 959 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 960 return false; 961 // Immediate offset, multiple of 4 in range [0, 124]. 962 if (!Memory.OffsetImm) return true; 963 int64_t Val = Memory.OffsetImm->getValue(); 964 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 965 } 966 bool isMemThumbRIs2() const { 967 if (!isMemory() || Memory.OffsetRegNum != 0 || 968 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 969 return false; 970 // Immediate offset, multiple of 4 in range [0, 62]. 971 if (!Memory.OffsetImm) return true; 972 int64_t Val = Memory.OffsetImm->getValue(); 973 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 974 } 975 bool isMemThumbRIs1() const { 976 if (!isMemory() || Memory.OffsetRegNum != 0 || 977 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 978 return false; 979 // Immediate offset in range [0, 31]. 980 if (!Memory.OffsetImm) return true; 981 int64_t Val = Memory.OffsetImm->getValue(); 982 return Val >= 0 && Val <= 31; 983 } 984 bool isMemThumbSPI() const { 985 if (!isMemory() || Memory.OffsetRegNum != 0 || 986 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 987 return false; 988 // Immediate offset, multiple of 4 in range [0, 1020]. 989 if (!Memory.OffsetImm) return true; 990 int64_t Val = Memory.OffsetImm->getValue(); 991 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 992 } 993 bool isMemImm8s4Offset() const { 994 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 995 return false; 996 // Immediate offset a multiple of 4 in range [-1020, 1020]. 997 if (!Memory.OffsetImm) return true; 998 int64_t Val = Memory.OffsetImm->getValue(); 999 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 1000 } 1001 bool isMemImm0_1020s4Offset() const { 1002 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1003 return false; 1004 // Immediate offset a multiple of 4 in range [0, 1020]. 1005 if (!Memory.OffsetImm) return true; 1006 int64_t Val = Memory.OffsetImm->getValue(); 1007 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 1008 } 1009 bool isMemImm8Offset() const { 1010 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1011 return false; 1012 // Immediate offset in range [-255, 255]. 1013 if (!Memory.OffsetImm) return true; 1014 int64_t Val = Memory.OffsetImm->getValue(); 1015 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 1016 } 1017 bool isMemPosImm8Offset() const { 1018 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1019 return false; 1020 // Immediate offset in range [0, 255]. 1021 if (!Memory.OffsetImm) return true; 1022 int64_t Val = Memory.OffsetImm->getValue(); 1023 return Val >= 0 && Val < 256; 1024 } 1025 bool isMemNegImm8Offset() const { 1026 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1027 return false; 1028 // Immediate offset in range [-255, -1]. 1029 if (!Memory.OffsetImm) return false; 1030 int64_t Val = Memory.OffsetImm->getValue(); 1031 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 1032 } 1033 bool isMemUImm12Offset() const { 1034 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1035 return false; 1036 // Immediate offset in range [0, 4095]. 1037 if (!Memory.OffsetImm) return true; 1038 int64_t Val = Memory.OffsetImm->getValue(); 1039 return (Val >= 0 && Val < 4096); 1040 } 1041 bool isMemImm12Offset() const { 1042 // If we have an immediate that's not a constant, treat it as a label 1043 // reference needing a fixup. If it is a constant, it's something else 1044 // and we reject it. 1045 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 1046 return true; 1047 1048 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1049 return false; 1050 // Immediate offset in range [-4095, 4095]. 1051 if (!Memory.OffsetImm) return true; 1052 int64_t Val = Memory.OffsetImm->getValue(); 1053 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 1054 } 1055 bool isPostIdxImm8() const { 1056 if (Kind != k_Immediate) 1057 return false; 1058 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1059 if (!CE) return false; 1060 int64_t Val = CE->getValue(); 1061 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 1062 } 1063 bool isPostIdxImm8s4() const { 1064 if (Kind != k_Immediate) 1065 return false; 1066 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1067 if (!CE) return false; 1068 int64_t Val = CE->getValue(); 1069 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1070 (Val == INT32_MIN); 1071 } 1072 1073 bool isMSRMask() const { return Kind == k_MSRMask; } 1074 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1075 1076 // NEON operands. 1077 bool isVecListOneD() const { 1078 if (Kind != k_VectorList) return false; 1079 return VectorList.Count == 1; 1080 } 1081 1082 bool isVecListTwoD() const { 1083 if (Kind != k_VectorList) return false; 1084 return VectorList.Count == 2; 1085 } 1086 1087 bool isVecListThreeD() const { 1088 if (Kind != k_VectorList) return false; 1089 return VectorList.Count == 3; 1090 } 1091 1092 bool isVecListFourD() const { 1093 if (Kind != k_VectorList) return false; 1094 return VectorList.Count == 4; 1095 } 1096 1097 bool isVecListTwoQ() const { 1098 if (Kind != k_VectorList) return false; 1099 //FIXME: We haven't taught the parser to handle by-two register lists 1100 // yet, so don't pretend to know one. 1101 return VectorList.Count == 2 && false; 1102 } 1103 1104 bool isVecListOneDAllLanes() const { 1105 if (Kind != k_VectorListAllLanes) return false; 1106 return VectorList.Count == 1; 1107 } 1108 1109 bool isVecListTwoDAllLanes() const { 1110 if (Kind != k_VectorListAllLanes) return false; 1111 return VectorList.Count == 2; 1112 } 1113 1114 bool isVecListOneDByteIndexed() const { 1115 if (Kind != k_VectorListIndexed) return false; 1116 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1117 } 1118 1119 bool isVectorIndex8() const { 1120 if (Kind != k_VectorIndex) return false; 1121 return VectorIndex.Val < 8; 1122 } 1123 bool isVectorIndex16() const { 1124 if (Kind != k_VectorIndex) return false; 1125 return VectorIndex.Val < 4; 1126 } 1127 bool isVectorIndex32() const { 1128 if (Kind != k_VectorIndex) return false; 1129 return VectorIndex.Val < 2; 1130 } 1131 1132 bool isNEONi8splat() const { 1133 if (Kind != k_Immediate) 1134 return false; 1135 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1136 // Must be a constant. 1137 if (!CE) return false; 1138 int64_t Value = CE->getValue(); 1139 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1140 // value. 1141 return Value >= 0 && Value < 256; 1142 } 1143 1144 bool isNEONi16splat() const { 1145 if (Kind != k_Immediate) 1146 return false; 1147 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1148 // Must be a constant. 1149 if (!CE) return false; 1150 int64_t Value = CE->getValue(); 1151 // i16 value in the range [0,255] or [0x0100, 0xff00] 1152 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1153 } 1154 1155 bool isNEONi32splat() const { 1156 if (Kind != k_Immediate) 1157 return false; 1158 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1159 // Must be a constant. 1160 if (!CE) return false; 1161 int64_t Value = CE->getValue(); 1162 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1163 return (Value >= 0 && Value < 256) || 1164 (Value >= 0x0100 && Value <= 0xff00) || 1165 (Value >= 0x010000 && Value <= 0xff0000) || 1166 (Value >= 0x01000000 && Value <= 0xff000000); 1167 } 1168 1169 bool isNEONi32vmov() const { 1170 if (Kind != k_Immediate) 1171 return false; 1172 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1173 // Must be a constant. 1174 if (!CE) return false; 1175 int64_t Value = CE->getValue(); 1176 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1177 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1178 return (Value >= 0 && Value < 256) || 1179 (Value >= 0x0100 && Value <= 0xff00) || 1180 (Value >= 0x010000 && Value <= 0xff0000) || 1181 (Value >= 0x01000000 && Value <= 0xff000000) || 1182 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1183 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1184 } 1185 1186 bool isNEONi64splat() const { 1187 if (Kind != k_Immediate) 1188 return false; 1189 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1190 // Must be a constant. 1191 if (!CE) return false; 1192 uint64_t Value = CE->getValue(); 1193 // i64 value with each byte being either 0 or 0xff. 1194 for (unsigned i = 0; i < 8; ++i) 1195 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1196 return true; 1197 } 1198 1199 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1200 // Add as immediates when possible. Null MCExpr = 0. 1201 if (Expr == 0) 1202 Inst.addOperand(MCOperand::CreateImm(0)); 1203 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1204 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1205 else 1206 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1207 } 1208 1209 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1210 assert(N == 2 && "Invalid number of operands!"); 1211 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1212 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1213 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1214 } 1215 1216 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1217 assert(N == 1 && "Invalid number of operands!"); 1218 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1219 } 1220 1221 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1222 assert(N == 1 && "Invalid number of operands!"); 1223 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1224 } 1225 1226 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1227 assert(N == 1 && "Invalid number of operands!"); 1228 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1229 } 1230 1231 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1232 assert(N == 1 && "Invalid number of operands!"); 1233 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1234 } 1235 1236 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1237 assert(N == 1 && "Invalid number of operands!"); 1238 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1239 } 1240 1241 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1242 assert(N == 1 && "Invalid number of operands!"); 1243 Inst.addOperand(MCOperand::CreateReg(getReg())); 1244 } 1245 1246 void addRegOperands(MCInst &Inst, unsigned N) const { 1247 assert(N == 1 && "Invalid number of operands!"); 1248 Inst.addOperand(MCOperand::CreateReg(getReg())); 1249 } 1250 1251 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1252 assert(N == 3 && "Invalid number of operands!"); 1253 assert(isRegShiftedReg() && 1254 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1255 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1256 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1257 Inst.addOperand(MCOperand::CreateImm( 1258 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1259 } 1260 1261 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1262 assert(N == 2 && "Invalid number of operands!"); 1263 assert(isRegShiftedImm() && 1264 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1265 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1266 Inst.addOperand(MCOperand::CreateImm( 1267 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1268 } 1269 1270 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1271 assert(N == 1 && "Invalid number of operands!"); 1272 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1273 ShifterImm.Imm)); 1274 } 1275 1276 void addRegListOperands(MCInst &Inst, unsigned N) const { 1277 assert(N == 1 && "Invalid number of operands!"); 1278 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1279 for (SmallVectorImpl<unsigned>::const_iterator 1280 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1281 Inst.addOperand(MCOperand::CreateReg(*I)); 1282 } 1283 1284 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1285 addRegListOperands(Inst, N); 1286 } 1287 1288 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1289 addRegListOperands(Inst, N); 1290 } 1291 1292 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1293 assert(N == 1 && "Invalid number of operands!"); 1294 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1295 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1296 } 1297 1298 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1299 assert(N == 1 && "Invalid number of operands!"); 1300 // Munge the lsb/width into a bitfield mask. 1301 unsigned lsb = Bitfield.LSB; 1302 unsigned width = Bitfield.Width; 1303 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1304 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1305 (32 - (lsb + width))); 1306 Inst.addOperand(MCOperand::CreateImm(Mask)); 1307 } 1308 1309 void addImmOperands(MCInst &Inst, unsigned N) const { 1310 assert(N == 1 && "Invalid number of operands!"); 1311 addExpr(Inst, getImm()); 1312 } 1313 1314 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1315 assert(N == 1 && "Invalid number of operands!"); 1316 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1317 } 1318 1319 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1320 assert(N == 1 && "Invalid number of operands!"); 1321 // FIXME: We really want to scale the value here, but the LDRD/STRD 1322 // instruction don't encode operands that way yet. 1323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1324 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1325 } 1326 1327 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1328 assert(N == 1 && "Invalid number of operands!"); 1329 // The immediate is scaled by four in the encoding and is stored 1330 // in the MCInst as such. Lop off the low two bits here. 1331 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1332 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1333 } 1334 1335 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1336 assert(N == 1 && "Invalid number of operands!"); 1337 // The immediate is scaled by four in the encoding and is stored 1338 // in the MCInst as such. Lop off the low two bits here. 1339 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1340 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1341 } 1342 1343 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1344 assert(N == 1 && "Invalid number of operands!"); 1345 // The constant encodes as the immediate-1, and we store in the instruction 1346 // the bits as encoded, so subtract off one here. 1347 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1348 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1349 } 1350 1351 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1352 assert(N == 1 && "Invalid number of operands!"); 1353 // The constant encodes as the immediate-1, and we store in the instruction 1354 // the bits as encoded, so subtract off one here. 1355 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1356 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1357 } 1358 1359 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1360 assert(N == 1 && "Invalid number of operands!"); 1361 // The constant encodes as the immediate, except for 32, which encodes as 1362 // zero. 1363 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1364 unsigned Imm = CE->getValue(); 1365 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1366 } 1367 1368 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1369 assert(N == 1 && "Invalid number of operands!"); 1370 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1371 // the instruction as well. 1372 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1373 int Val = CE->getValue(); 1374 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1375 } 1376 1377 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1378 assert(N == 1 && "Invalid number of operands!"); 1379 // The operand is actually a t2_so_imm, but we have its bitwise 1380 // negation in the assembly source, so twiddle it here. 1381 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1382 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1383 } 1384 1385 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 1386 assert(N == 1 && "Invalid number of operands!"); 1387 // The operand is actually a t2_so_imm, but we have its 1388 // negation in the assembly source, so twiddle it here. 1389 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1390 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1391 } 1392 1393 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1394 assert(N == 1 && "Invalid number of operands!"); 1395 // The operand is actually a so_imm, but we have its bitwise 1396 // negation in the assembly source, so twiddle it here. 1397 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1398 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1399 } 1400 1401 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const { 1402 assert(N == 1 && "Invalid number of operands!"); 1403 // The operand is actually a so_imm, but we have its 1404 // negation in the assembly source, so twiddle it here. 1405 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1406 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1407 } 1408 1409 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1410 assert(N == 1 && "Invalid number of operands!"); 1411 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1412 } 1413 1414 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1415 assert(N == 1 && "Invalid number of operands!"); 1416 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1417 } 1418 1419 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1420 assert(N == 2 && "Invalid number of operands!"); 1421 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1422 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1423 } 1424 1425 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1426 assert(N == 3 && "Invalid number of operands!"); 1427 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1428 if (!Memory.OffsetRegNum) { 1429 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1430 // Special case for #-0 1431 if (Val == INT32_MIN) Val = 0; 1432 if (Val < 0) Val = -Val; 1433 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1434 } else { 1435 // For register offset, we encode the shift type and negation flag 1436 // here. 1437 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1438 Memory.ShiftImm, Memory.ShiftType); 1439 } 1440 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1441 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1442 Inst.addOperand(MCOperand::CreateImm(Val)); 1443 } 1444 1445 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1446 assert(N == 2 && "Invalid number of operands!"); 1447 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1448 assert(CE && "non-constant AM2OffsetImm operand!"); 1449 int32_t Val = CE->getValue(); 1450 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1451 // Special case for #-0 1452 if (Val == INT32_MIN) Val = 0; 1453 if (Val < 0) Val = -Val; 1454 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1455 Inst.addOperand(MCOperand::CreateReg(0)); 1456 Inst.addOperand(MCOperand::CreateImm(Val)); 1457 } 1458 1459 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1460 assert(N == 3 && "Invalid number of operands!"); 1461 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1462 if (!Memory.OffsetRegNum) { 1463 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1464 // Special case for #-0 1465 if (Val == INT32_MIN) Val = 0; 1466 if (Val < 0) Val = -Val; 1467 Val = ARM_AM::getAM3Opc(AddSub, Val); 1468 } else { 1469 // For register offset, we encode the shift type and negation flag 1470 // here. 1471 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1472 } 1473 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1474 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1475 Inst.addOperand(MCOperand::CreateImm(Val)); 1476 } 1477 1478 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1479 assert(N == 2 && "Invalid number of operands!"); 1480 if (Kind == k_PostIndexRegister) { 1481 int32_t Val = 1482 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1483 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1484 Inst.addOperand(MCOperand::CreateImm(Val)); 1485 return; 1486 } 1487 1488 // Constant offset. 1489 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1490 int32_t Val = CE->getValue(); 1491 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1492 // Special case for #-0 1493 if (Val == INT32_MIN) Val = 0; 1494 if (Val < 0) Val = -Val; 1495 Val = ARM_AM::getAM3Opc(AddSub, Val); 1496 Inst.addOperand(MCOperand::CreateReg(0)); 1497 Inst.addOperand(MCOperand::CreateImm(Val)); 1498 } 1499 1500 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1501 assert(N == 2 && "Invalid number of operands!"); 1502 // If we have an immediate that's not a constant, treat it as a label 1503 // reference needing a fixup. If it is a constant, it's something else 1504 // and we reject it. 1505 if (isImm()) { 1506 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1507 Inst.addOperand(MCOperand::CreateImm(0)); 1508 return; 1509 } 1510 1511 // The lower two bits are always zero and as such are not encoded. 1512 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1513 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1514 // Special case for #-0 1515 if (Val == INT32_MIN) Val = 0; 1516 if (Val < 0) Val = -Val; 1517 Val = ARM_AM::getAM5Opc(AddSub, Val); 1518 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1519 Inst.addOperand(MCOperand::CreateImm(Val)); 1520 } 1521 1522 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1523 assert(N == 2 && "Invalid number of operands!"); 1524 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1525 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1526 Inst.addOperand(MCOperand::CreateImm(Val)); 1527 } 1528 1529 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1530 assert(N == 2 && "Invalid number of operands!"); 1531 // The lower two bits are always zero and as such are not encoded. 1532 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1533 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1534 Inst.addOperand(MCOperand::CreateImm(Val)); 1535 } 1536 1537 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1538 assert(N == 2 && "Invalid number of operands!"); 1539 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1540 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1541 Inst.addOperand(MCOperand::CreateImm(Val)); 1542 } 1543 1544 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1545 addMemImm8OffsetOperands(Inst, N); 1546 } 1547 1548 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1549 addMemImm8OffsetOperands(Inst, N); 1550 } 1551 1552 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1553 assert(N == 2 && "Invalid number of operands!"); 1554 // If this is an immediate, it's a label reference. 1555 if (Kind == k_Immediate) { 1556 addExpr(Inst, getImm()); 1557 Inst.addOperand(MCOperand::CreateImm(0)); 1558 return; 1559 } 1560 1561 // Otherwise, it's a normal memory reg+offset. 1562 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1563 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1564 Inst.addOperand(MCOperand::CreateImm(Val)); 1565 } 1566 1567 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1568 assert(N == 2 && "Invalid number of operands!"); 1569 // If this is an immediate, it's a label reference. 1570 if (Kind == k_Immediate) { 1571 addExpr(Inst, getImm()); 1572 Inst.addOperand(MCOperand::CreateImm(0)); 1573 return; 1574 } 1575 1576 // Otherwise, it's a normal memory reg+offset. 1577 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1578 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1579 Inst.addOperand(MCOperand::CreateImm(Val)); 1580 } 1581 1582 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1583 assert(N == 2 && "Invalid number of operands!"); 1584 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1585 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1586 } 1587 1588 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1589 assert(N == 2 && "Invalid number of operands!"); 1590 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1591 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1592 } 1593 1594 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1595 assert(N == 3 && "Invalid number of operands!"); 1596 unsigned Val = 1597 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1598 Memory.ShiftImm, Memory.ShiftType); 1599 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1600 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1601 Inst.addOperand(MCOperand::CreateImm(Val)); 1602 } 1603 1604 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1605 assert(N == 3 && "Invalid number of operands!"); 1606 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1607 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1608 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1609 } 1610 1611 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1612 assert(N == 2 && "Invalid number of operands!"); 1613 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1614 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1615 } 1616 1617 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1618 assert(N == 2 && "Invalid number of operands!"); 1619 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1620 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1621 Inst.addOperand(MCOperand::CreateImm(Val)); 1622 } 1623 1624 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1625 assert(N == 2 && "Invalid number of operands!"); 1626 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1627 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1628 Inst.addOperand(MCOperand::CreateImm(Val)); 1629 } 1630 1631 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1632 assert(N == 2 && "Invalid number of operands!"); 1633 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1634 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1635 Inst.addOperand(MCOperand::CreateImm(Val)); 1636 } 1637 1638 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1639 assert(N == 2 && "Invalid number of operands!"); 1640 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1641 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1642 Inst.addOperand(MCOperand::CreateImm(Val)); 1643 } 1644 1645 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1646 assert(N == 1 && "Invalid number of operands!"); 1647 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1648 assert(CE && "non-constant post-idx-imm8 operand!"); 1649 int Imm = CE->getValue(); 1650 bool isAdd = Imm >= 0; 1651 if (Imm == INT32_MIN) Imm = 0; 1652 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1653 Inst.addOperand(MCOperand::CreateImm(Imm)); 1654 } 1655 1656 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1657 assert(N == 1 && "Invalid number of operands!"); 1658 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1659 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1660 int Imm = CE->getValue(); 1661 bool isAdd = Imm >= 0; 1662 if (Imm == INT32_MIN) Imm = 0; 1663 // Immediate is scaled by 4. 1664 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1665 Inst.addOperand(MCOperand::CreateImm(Imm)); 1666 } 1667 1668 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1669 assert(N == 2 && "Invalid number of operands!"); 1670 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1671 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1672 } 1673 1674 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1675 assert(N == 2 && "Invalid number of operands!"); 1676 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1677 // The sign, shift type, and shift amount are encoded in a single operand 1678 // using the AM2 encoding helpers. 1679 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1680 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1681 PostIdxReg.ShiftTy); 1682 Inst.addOperand(MCOperand::CreateImm(Imm)); 1683 } 1684 1685 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1686 assert(N == 1 && "Invalid number of operands!"); 1687 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1688 } 1689 1690 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1691 assert(N == 1 && "Invalid number of operands!"); 1692 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1693 } 1694 1695 void addVecListOperands(MCInst &Inst, unsigned N) const { 1696 assert(N == 1 && "Invalid number of operands!"); 1697 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1698 } 1699 1700 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 1701 assert(N == 2 && "Invalid number of operands!"); 1702 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1703 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 1704 } 1705 1706 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1707 assert(N == 1 && "Invalid number of operands!"); 1708 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1709 } 1710 1711 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1712 assert(N == 1 && "Invalid number of operands!"); 1713 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1714 } 1715 1716 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1717 assert(N == 1 && "Invalid number of operands!"); 1718 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1719 } 1720 1721 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1722 assert(N == 1 && "Invalid number of operands!"); 1723 // The immediate encodes the type of constant as well as the value. 1724 // Mask in that this is an i8 splat. 1725 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1726 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1727 } 1728 1729 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1730 assert(N == 1 && "Invalid number of operands!"); 1731 // The immediate encodes the type of constant as well as the value. 1732 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1733 unsigned Value = CE->getValue(); 1734 if (Value >= 256) 1735 Value = (Value >> 8) | 0xa00; 1736 else 1737 Value |= 0x800; 1738 Inst.addOperand(MCOperand::CreateImm(Value)); 1739 } 1740 1741 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1742 assert(N == 1 && "Invalid number of operands!"); 1743 // The immediate encodes the type of constant as well as the value. 1744 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1745 unsigned Value = CE->getValue(); 1746 if (Value >= 256 && Value <= 0xff00) 1747 Value = (Value >> 8) | 0x200; 1748 else if (Value > 0xffff && Value <= 0xff0000) 1749 Value = (Value >> 16) | 0x400; 1750 else if (Value > 0xffffff) 1751 Value = (Value >> 24) | 0x600; 1752 Inst.addOperand(MCOperand::CreateImm(Value)); 1753 } 1754 1755 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1756 assert(N == 1 && "Invalid number of operands!"); 1757 // The immediate encodes the type of constant as well as the value. 1758 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1759 unsigned Value = CE->getValue(); 1760 if (Value >= 256 && Value <= 0xffff) 1761 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1762 else if (Value > 0xffff && Value <= 0xffffff) 1763 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1764 else if (Value > 0xffffff) 1765 Value = (Value >> 24) | 0x600; 1766 Inst.addOperand(MCOperand::CreateImm(Value)); 1767 } 1768 1769 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1770 assert(N == 1 && "Invalid number of operands!"); 1771 // The immediate encodes the type of constant as well as the value. 1772 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1773 uint64_t Value = CE->getValue(); 1774 unsigned Imm = 0; 1775 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1776 Imm |= (Value & 1) << i; 1777 } 1778 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1779 } 1780 1781 virtual void print(raw_ostream &OS) const; 1782 1783 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1784 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1785 Op->ITMask.Mask = Mask; 1786 Op->StartLoc = S; 1787 Op->EndLoc = S; 1788 return Op; 1789 } 1790 1791 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1792 ARMOperand *Op = new ARMOperand(k_CondCode); 1793 Op->CC.Val = CC; 1794 Op->StartLoc = S; 1795 Op->EndLoc = S; 1796 return Op; 1797 } 1798 1799 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1800 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1801 Op->Cop.Val = CopVal; 1802 Op->StartLoc = S; 1803 Op->EndLoc = S; 1804 return Op; 1805 } 1806 1807 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1808 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1809 Op->Cop.Val = CopVal; 1810 Op->StartLoc = S; 1811 Op->EndLoc = S; 1812 return Op; 1813 } 1814 1815 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1816 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1817 Op->Cop.Val = Val; 1818 Op->StartLoc = S; 1819 Op->EndLoc = E; 1820 return Op; 1821 } 1822 1823 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1824 ARMOperand *Op = new ARMOperand(k_CCOut); 1825 Op->Reg.RegNum = RegNum; 1826 Op->StartLoc = S; 1827 Op->EndLoc = S; 1828 return Op; 1829 } 1830 1831 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1832 ARMOperand *Op = new ARMOperand(k_Token); 1833 Op->Tok.Data = Str.data(); 1834 Op->Tok.Length = Str.size(); 1835 Op->StartLoc = S; 1836 Op->EndLoc = S; 1837 return Op; 1838 } 1839 1840 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1841 ARMOperand *Op = new ARMOperand(k_Register); 1842 Op->Reg.RegNum = RegNum; 1843 Op->StartLoc = S; 1844 Op->EndLoc = E; 1845 return Op; 1846 } 1847 1848 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1849 unsigned SrcReg, 1850 unsigned ShiftReg, 1851 unsigned ShiftImm, 1852 SMLoc S, SMLoc E) { 1853 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1854 Op->RegShiftedReg.ShiftTy = ShTy; 1855 Op->RegShiftedReg.SrcReg = SrcReg; 1856 Op->RegShiftedReg.ShiftReg = ShiftReg; 1857 Op->RegShiftedReg.ShiftImm = ShiftImm; 1858 Op->StartLoc = S; 1859 Op->EndLoc = E; 1860 return Op; 1861 } 1862 1863 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1864 unsigned SrcReg, 1865 unsigned ShiftImm, 1866 SMLoc S, SMLoc E) { 1867 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1868 Op->RegShiftedImm.ShiftTy = ShTy; 1869 Op->RegShiftedImm.SrcReg = SrcReg; 1870 Op->RegShiftedImm.ShiftImm = ShiftImm; 1871 Op->StartLoc = S; 1872 Op->EndLoc = E; 1873 return Op; 1874 } 1875 1876 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1877 SMLoc S, SMLoc E) { 1878 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1879 Op->ShifterImm.isASR = isASR; 1880 Op->ShifterImm.Imm = Imm; 1881 Op->StartLoc = S; 1882 Op->EndLoc = E; 1883 return Op; 1884 } 1885 1886 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1887 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1888 Op->RotImm.Imm = Imm; 1889 Op->StartLoc = S; 1890 Op->EndLoc = E; 1891 return Op; 1892 } 1893 1894 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1895 SMLoc S, SMLoc E) { 1896 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1897 Op->Bitfield.LSB = LSB; 1898 Op->Bitfield.Width = Width; 1899 Op->StartLoc = S; 1900 Op->EndLoc = E; 1901 return Op; 1902 } 1903 1904 static ARMOperand * 1905 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1906 SMLoc StartLoc, SMLoc EndLoc) { 1907 KindTy Kind = k_RegisterList; 1908 1909 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1910 Kind = k_DPRRegisterList; 1911 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1912 contains(Regs.front().first)) 1913 Kind = k_SPRRegisterList; 1914 1915 ARMOperand *Op = new ARMOperand(Kind); 1916 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1917 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1918 Op->Registers.push_back(I->first); 1919 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1920 Op->StartLoc = StartLoc; 1921 Op->EndLoc = EndLoc; 1922 return Op; 1923 } 1924 1925 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1926 SMLoc S, SMLoc E) { 1927 ARMOperand *Op = new ARMOperand(k_VectorList); 1928 Op->VectorList.RegNum = RegNum; 1929 Op->VectorList.Count = Count; 1930 Op->StartLoc = S; 1931 Op->EndLoc = E; 1932 return Op; 1933 } 1934 1935 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, 1936 SMLoc S, SMLoc E) { 1937 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); 1938 Op->VectorList.RegNum = RegNum; 1939 Op->VectorList.Count = Count; 1940 Op->StartLoc = S; 1941 Op->EndLoc = E; 1942 return Op; 1943 } 1944 1945 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, 1946 unsigned Index, SMLoc S, SMLoc E) { 1947 ARMOperand *Op = new ARMOperand(k_VectorListIndexed); 1948 Op->VectorList.RegNum = RegNum; 1949 Op->VectorList.Count = Count; 1950 Op->VectorList.LaneIndex = Index; 1951 Op->StartLoc = S; 1952 Op->EndLoc = E; 1953 return Op; 1954 } 1955 1956 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1957 MCContext &Ctx) { 1958 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1959 Op->VectorIndex.Val = Idx; 1960 Op->StartLoc = S; 1961 Op->EndLoc = E; 1962 return Op; 1963 } 1964 1965 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1966 ARMOperand *Op = new ARMOperand(k_Immediate); 1967 Op->Imm.Val = Val; 1968 Op->StartLoc = S; 1969 Op->EndLoc = E; 1970 return Op; 1971 } 1972 1973 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1974 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1975 Op->FPImm.Val = Val; 1976 Op->StartLoc = S; 1977 Op->EndLoc = S; 1978 return Op; 1979 } 1980 1981 static ARMOperand *CreateMem(unsigned BaseRegNum, 1982 const MCConstantExpr *OffsetImm, 1983 unsigned OffsetRegNum, 1984 ARM_AM::ShiftOpc ShiftType, 1985 unsigned ShiftImm, 1986 unsigned Alignment, 1987 bool isNegative, 1988 SMLoc S, SMLoc E) { 1989 ARMOperand *Op = new ARMOperand(k_Memory); 1990 Op->Memory.BaseRegNum = BaseRegNum; 1991 Op->Memory.OffsetImm = OffsetImm; 1992 Op->Memory.OffsetRegNum = OffsetRegNum; 1993 Op->Memory.ShiftType = ShiftType; 1994 Op->Memory.ShiftImm = ShiftImm; 1995 Op->Memory.Alignment = Alignment; 1996 Op->Memory.isNegative = isNegative; 1997 Op->StartLoc = S; 1998 Op->EndLoc = E; 1999 return Op; 2000 } 2001 2002 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 2003 ARM_AM::ShiftOpc ShiftTy, 2004 unsigned ShiftImm, 2005 SMLoc S, SMLoc E) { 2006 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 2007 Op->PostIdxReg.RegNum = RegNum; 2008 Op->PostIdxReg.isAdd = isAdd; 2009 Op->PostIdxReg.ShiftTy = ShiftTy; 2010 Op->PostIdxReg.ShiftImm = ShiftImm; 2011 Op->StartLoc = S; 2012 Op->EndLoc = E; 2013 return Op; 2014 } 2015 2016 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 2017 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 2018 Op->MBOpt.Val = Opt; 2019 Op->StartLoc = S; 2020 Op->EndLoc = S; 2021 return Op; 2022 } 2023 2024 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 2025 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 2026 Op->IFlags.Val = IFlags; 2027 Op->StartLoc = S; 2028 Op->EndLoc = S; 2029 return Op; 2030 } 2031 2032 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 2033 ARMOperand *Op = new ARMOperand(k_MSRMask); 2034 Op->MMask.Val = MMask; 2035 Op->StartLoc = S; 2036 Op->EndLoc = S; 2037 return Op; 2038 } 2039}; 2040 2041} // end anonymous namespace. 2042 2043void ARMOperand::print(raw_ostream &OS) const { 2044 switch (Kind) { 2045 case k_FPImmediate: 2046 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 2047 << ") >"; 2048 break; 2049 case k_CondCode: 2050 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 2051 break; 2052 case k_CCOut: 2053 OS << "<ccout " << getReg() << ">"; 2054 break; 2055 case k_ITCondMask: { 2056 static const char *MaskStr[] = { 2057 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 2058 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 2059 }; 2060 assert((ITMask.Mask & 0xf) == ITMask.Mask); 2061 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 2062 break; 2063 } 2064 case k_CoprocNum: 2065 OS << "<coprocessor number: " << getCoproc() << ">"; 2066 break; 2067 case k_CoprocReg: 2068 OS << "<coprocessor register: " << getCoproc() << ">"; 2069 break; 2070 case k_CoprocOption: 2071 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 2072 break; 2073 case k_MSRMask: 2074 OS << "<mask: " << getMSRMask() << ">"; 2075 break; 2076 case k_Immediate: 2077 getImm()->print(OS); 2078 break; 2079 case k_MemBarrierOpt: 2080 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 2081 break; 2082 case k_Memory: 2083 OS << "<memory " 2084 << " base:" << Memory.BaseRegNum; 2085 OS << ">"; 2086 break; 2087 case k_PostIndexRegister: 2088 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 2089 << PostIdxReg.RegNum; 2090 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 2091 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 2092 << PostIdxReg.ShiftImm; 2093 OS << ">"; 2094 break; 2095 case k_ProcIFlags: { 2096 OS << "<ARM_PROC::"; 2097 unsigned IFlags = getProcIFlags(); 2098 for (int i=2; i >= 0; --i) 2099 if (IFlags & (1 << i)) 2100 OS << ARM_PROC::IFlagsToString(1 << i); 2101 OS << ">"; 2102 break; 2103 } 2104 case k_Register: 2105 OS << "<register " << getReg() << ">"; 2106 break; 2107 case k_ShifterImmediate: 2108 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2109 << " #" << ShifterImm.Imm << ">"; 2110 break; 2111 case k_ShiftedRegister: 2112 OS << "<so_reg_reg " 2113 << RegShiftedReg.SrcReg << " " 2114 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 2115 << " " << RegShiftedReg.ShiftReg << ">"; 2116 break; 2117 case k_ShiftedImmediate: 2118 OS << "<so_reg_imm " 2119 << RegShiftedImm.SrcReg << " " 2120 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 2121 << " #" << RegShiftedImm.ShiftImm << ">"; 2122 break; 2123 case k_RotateImmediate: 2124 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2125 break; 2126 case k_BitfieldDescriptor: 2127 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2128 << ", width: " << Bitfield.Width << ">"; 2129 break; 2130 case k_RegisterList: 2131 case k_DPRRegisterList: 2132 case k_SPRRegisterList: { 2133 OS << "<register_list "; 2134 2135 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2136 for (SmallVectorImpl<unsigned>::const_iterator 2137 I = RegList.begin(), E = RegList.end(); I != E; ) { 2138 OS << *I; 2139 if (++I < E) OS << ", "; 2140 } 2141 2142 OS << ">"; 2143 break; 2144 } 2145 case k_VectorList: 2146 OS << "<vector_list " << VectorList.Count << " * " 2147 << VectorList.RegNum << ">"; 2148 break; 2149 case k_VectorListAllLanes: 2150 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 2151 << VectorList.RegNum << ">"; 2152 break; 2153 case k_VectorListIndexed: 2154 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 2155 << VectorList.Count << " * " << VectorList.RegNum << ">"; 2156 break; 2157 case k_Token: 2158 OS << "'" << getToken() << "'"; 2159 break; 2160 case k_VectorIndex: 2161 OS << "<vectorindex " << getVectorIndex() << ">"; 2162 break; 2163 } 2164} 2165 2166/// @name Auto-generated Match Functions 2167/// { 2168 2169static unsigned MatchRegisterName(StringRef Name); 2170 2171/// } 2172 2173bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2174 SMLoc &StartLoc, SMLoc &EndLoc) { 2175 StartLoc = Parser.getTok().getLoc(); 2176 RegNo = tryParseRegister(); 2177 EndLoc = Parser.getTok().getLoc(); 2178 2179 return (RegNo == (unsigned)-1); 2180} 2181 2182/// Try to parse a register name. The token must be an Identifier when called, 2183/// and if it is a register name the token is eaten and the register number is 2184/// returned. Otherwise return -1. 2185/// 2186int ARMAsmParser::tryParseRegister() { 2187 const AsmToken &Tok = Parser.getTok(); 2188 if (Tok.isNot(AsmToken::Identifier)) return -1; 2189 2190 std::string lowerCase = Tok.getString().lower(); 2191 unsigned RegNum = MatchRegisterName(lowerCase); 2192 if (!RegNum) { 2193 RegNum = StringSwitch<unsigned>(lowerCase) 2194 .Case("r13", ARM::SP) 2195 .Case("r14", ARM::LR) 2196 .Case("r15", ARM::PC) 2197 .Case("ip", ARM::R12) 2198 // Additional register name aliases for 'gas' compatibility. 2199 .Case("a1", ARM::R0) 2200 .Case("a2", ARM::R1) 2201 .Case("a3", ARM::R2) 2202 .Case("a4", ARM::R3) 2203 .Case("v1", ARM::R4) 2204 .Case("v2", ARM::R5) 2205 .Case("v3", ARM::R6) 2206 .Case("v4", ARM::R7) 2207 .Case("v5", ARM::R8) 2208 .Case("v6", ARM::R9) 2209 .Case("v7", ARM::R10) 2210 .Case("v8", ARM::R11) 2211 .Case("sb", ARM::R9) 2212 .Case("sl", ARM::R10) 2213 .Case("fp", ARM::R11) 2214 .Default(0); 2215 } 2216 if (!RegNum) { 2217 // Check for aliases registered via .req. 2218 StringMap<unsigned>::const_iterator Entry = 2219 RegisterReqs.find(Tok.getIdentifier()); 2220 // If no match, return failure. 2221 if (Entry == RegisterReqs.end()) 2222 return -1; 2223 Parser.Lex(); // Eat identifier token. 2224 return Entry->getValue(); 2225 } 2226 2227 Parser.Lex(); // Eat identifier token. 2228 2229 return RegNum; 2230} 2231 2232// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2233// If a recoverable error occurs, return 1. If an irrecoverable error 2234// occurs, return -1. An irrecoverable error is one where tokens have been 2235// consumed in the process of trying to parse the shifter (i.e., when it is 2236// indeed a shifter operand, but malformed). 2237int ARMAsmParser::tryParseShiftRegister( 2238 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2239 SMLoc S = Parser.getTok().getLoc(); 2240 const AsmToken &Tok = Parser.getTok(); 2241 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2242 2243 std::string lowerCase = Tok.getString().lower(); 2244 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2245 .Case("asl", ARM_AM::lsl) 2246 .Case("lsl", ARM_AM::lsl) 2247 .Case("lsr", ARM_AM::lsr) 2248 .Case("asr", ARM_AM::asr) 2249 .Case("ror", ARM_AM::ror) 2250 .Case("rrx", ARM_AM::rrx) 2251 .Default(ARM_AM::no_shift); 2252 2253 if (ShiftTy == ARM_AM::no_shift) 2254 return 1; 2255 2256 Parser.Lex(); // Eat the operator. 2257 2258 // The source register for the shift has already been added to the 2259 // operand list, so we need to pop it off and combine it into the shifted 2260 // register operand instead. 2261 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2262 if (!PrevOp->isReg()) 2263 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2264 int SrcReg = PrevOp->getReg(); 2265 int64_t Imm = 0; 2266 int ShiftReg = 0; 2267 if (ShiftTy == ARM_AM::rrx) { 2268 // RRX Doesn't have an explicit shift amount. The encoder expects 2269 // the shift register to be the same as the source register. Seems odd, 2270 // but OK. 2271 ShiftReg = SrcReg; 2272 } else { 2273 // Figure out if this is shifted by a constant or a register (for non-RRX). 2274 if (Parser.getTok().is(AsmToken::Hash) || 2275 Parser.getTok().is(AsmToken::Dollar)) { 2276 Parser.Lex(); // Eat hash. 2277 SMLoc ImmLoc = Parser.getTok().getLoc(); 2278 const MCExpr *ShiftExpr = 0; 2279 if (getParser().ParseExpression(ShiftExpr)) { 2280 Error(ImmLoc, "invalid immediate shift value"); 2281 return -1; 2282 } 2283 // The expression must be evaluatable as an immediate. 2284 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2285 if (!CE) { 2286 Error(ImmLoc, "invalid immediate shift value"); 2287 return -1; 2288 } 2289 // Range check the immediate. 2290 // lsl, ror: 0 <= imm <= 31 2291 // lsr, asr: 0 <= imm <= 32 2292 Imm = CE->getValue(); 2293 if (Imm < 0 || 2294 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2295 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2296 Error(ImmLoc, "immediate shift value out of range"); 2297 return -1; 2298 } 2299 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2300 ShiftReg = tryParseRegister(); 2301 SMLoc L = Parser.getTok().getLoc(); 2302 if (ShiftReg == -1) { 2303 Error (L, "expected immediate or register in shift operand"); 2304 return -1; 2305 } 2306 } else { 2307 Error (Parser.getTok().getLoc(), 2308 "expected immediate or register in shift operand"); 2309 return -1; 2310 } 2311 } 2312 2313 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2314 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2315 ShiftReg, Imm, 2316 S, Parser.getTok().getLoc())); 2317 else 2318 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2319 S, Parser.getTok().getLoc())); 2320 2321 return 0; 2322} 2323 2324 2325/// Try to parse a register name. The token must be an Identifier when called. 2326/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2327/// if there is a "writeback". 'true' if it's not a register. 2328/// 2329/// TODO this is likely to change to allow different register types and or to 2330/// parse for a specific register type. 2331bool ARMAsmParser:: 2332tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2333 SMLoc S = Parser.getTok().getLoc(); 2334 int RegNo = tryParseRegister(); 2335 if (RegNo == -1) 2336 return true; 2337 2338 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2339 2340 const AsmToken &ExclaimTok = Parser.getTok(); 2341 if (ExclaimTok.is(AsmToken::Exclaim)) { 2342 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2343 ExclaimTok.getLoc())); 2344 Parser.Lex(); // Eat exclaim token 2345 return false; 2346 } 2347 2348 // Also check for an index operand. This is only legal for vector registers, 2349 // but that'll get caught OK in operand matching, so we don't need to 2350 // explicitly filter everything else out here. 2351 if (Parser.getTok().is(AsmToken::LBrac)) { 2352 SMLoc SIdx = Parser.getTok().getLoc(); 2353 Parser.Lex(); // Eat left bracket token. 2354 2355 const MCExpr *ImmVal; 2356 if (getParser().ParseExpression(ImmVal)) 2357 return MatchOperand_ParseFail; 2358 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2359 if (!MCE) { 2360 TokError("immediate value expected for vector index"); 2361 return MatchOperand_ParseFail; 2362 } 2363 2364 SMLoc E = Parser.getTok().getLoc(); 2365 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2366 Error(E, "']' expected"); 2367 return MatchOperand_ParseFail; 2368 } 2369 2370 Parser.Lex(); // Eat right bracket token. 2371 2372 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2373 SIdx, E, 2374 getContext())); 2375 } 2376 2377 return false; 2378} 2379 2380/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2381/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2382/// "c5", ... 2383static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2384 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2385 // but efficient. 2386 switch (Name.size()) { 2387 default: break; 2388 case 2: 2389 if (Name[0] != CoprocOp) 2390 return -1; 2391 switch (Name[1]) { 2392 default: return -1; 2393 case '0': return 0; 2394 case '1': return 1; 2395 case '2': return 2; 2396 case '3': return 3; 2397 case '4': return 4; 2398 case '5': return 5; 2399 case '6': return 6; 2400 case '7': return 7; 2401 case '8': return 8; 2402 case '9': return 9; 2403 } 2404 break; 2405 case 3: 2406 if (Name[0] != CoprocOp || Name[1] != '1') 2407 return -1; 2408 switch (Name[2]) { 2409 default: return -1; 2410 case '0': return 10; 2411 case '1': return 11; 2412 case '2': return 12; 2413 case '3': return 13; 2414 case '4': return 14; 2415 case '5': return 15; 2416 } 2417 break; 2418 } 2419 2420 return -1; 2421} 2422 2423/// parseITCondCode - Try to parse a condition code for an IT instruction. 2424ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2425parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2426 SMLoc S = Parser.getTok().getLoc(); 2427 const AsmToken &Tok = Parser.getTok(); 2428 if (!Tok.is(AsmToken::Identifier)) 2429 return MatchOperand_NoMatch; 2430 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2431 .Case("eq", ARMCC::EQ) 2432 .Case("ne", ARMCC::NE) 2433 .Case("hs", ARMCC::HS) 2434 .Case("cs", ARMCC::HS) 2435 .Case("lo", ARMCC::LO) 2436 .Case("cc", ARMCC::LO) 2437 .Case("mi", ARMCC::MI) 2438 .Case("pl", ARMCC::PL) 2439 .Case("vs", ARMCC::VS) 2440 .Case("vc", ARMCC::VC) 2441 .Case("hi", ARMCC::HI) 2442 .Case("ls", ARMCC::LS) 2443 .Case("ge", ARMCC::GE) 2444 .Case("lt", ARMCC::LT) 2445 .Case("gt", ARMCC::GT) 2446 .Case("le", ARMCC::LE) 2447 .Case("al", ARMCC::AL) 2448 .Default(~0U); 2449 if (CC == ~0U) 2450 return MatchOperand_NoMatch; 2451 Parser.Lex(); // Eat the token. 2452 2453 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2454 2455 return MatchOperand_Success; 2456} 2457 2458/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2459/// token must be an Identifier when called, and if it is a coprocessor 2460/// number, the token is eaten and the operand is added to the operand list. 2461ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2462parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2463 SMLoc S = Parser.getTok().getLoc(); 2464 const AsmToken &Tok = Parser.getTok(); 2465 if (Tok.isNot(AsmToken::Identifier)) 2466 return MatchOperand_NoMatch; 2467 2468 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2469 if (Num == -1) 2470 return MatchOperand_NoMatch; 2471 2472 Parser.Lex(); // Eat identifier token. 2473 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2474 return MatchOperand_Success; 2475} 2476 2477/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2478/// token must be an Identifier when called, and if it is a coprocessor 2479/// number, the token is eaten and the operand is added to the operand list. 2480ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2481parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2482 SMLoc S = Parser.getTok().getLoc(); 2483 const AsmToken &Tok = Parser.getTok(); 2484 if (Tok.isNot(AsmToken::Identifier)) 2485 return MatchOperand_NoMatch; 2486 2487 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2488 if (Reg == -1) 2489 return MatchOperand_NoMatch; 2490 2491 Parser.Lex(); // Eat identifier token. 2492 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2493 return MatchOperand_Success; 2494} 2495 2496/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2497/// coproc_option : '{' imm0_255 '}' 2498ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2499parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2500 SMLoc S = Parser.getTok().getLoc(); 2501 2502 // If this isn't a '{', this isn't a coprocessor immediate operand. 2503 if (Parser.getTok().isNot(AsmToken::LCurly)) 2504 return MatchOperand_NoMatch; 2505 Parser.Lex(); // Eat the '{' 2506 2507 const MCExpr *Expr; 2508 SMLoc Loc = Parser.getTok().getLoc(); 2509 if (getParser().ParseExpression(Expr)) { 2510 Error(Loc, "illegal expression"); 2511 return MatchOperand_ParseFail; 2512 } 2513 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2514 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2515 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2516 return MatchOperand_ParseFail; 2517 } 2518 int Val = CE->getValue(); 2519 2520 // Check for and consume the closing '}' 2521 if (Parser.getTok().isNot(AsmToken::RCurly)) 2522 return MatchOperand_ParseFail; 2523 SMLoc E = Parser.getTok().getLoc(); 2524 Parser.Lex(); // Eat the '}' 2525 2526 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2527 return MatchOperand_Success; 2528} 2529 2530// For register list parsing, we need to map from raw GPR register numbering 2531// to the enumeration values. The enumeration values aren't sorted by 2532// register number due to our using "sp", "lr" and "pc" as canonical names. 2533static unsigned getNextRegister(unsigned Reg) { 2534 // If this is a GPR, we need to do it manually, otherwise we can rely 2535 // on the sort ordering of the enumeration since the other reg-classes 2536 // are sane. 2537 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2538 return Reg + 1; 2539 switch(Reg) { 2540 default: assert(0 && "Invalid GPR number!"); 2541 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2542 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2543 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2544 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2545 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2546 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2547 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2548 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2549 } 2550} 2551 2552// Return the low-subreg of a given Q register. 2553static unsigned getDRegFromQReg(unsigned QReg) { 2554 switch (QReg) { 2555 default: llvm_unreachable("expected a Q register!"); 2556 case ARM::Q0: return ARM::D0; 2557 case ARM::Q1: return ARM::D2; 2558 case ARM::Q2: return ARM::D4; 2559 case ARM::Q3: return ARM::D6; 2560 case ARM::Q4: return ARM::D8; 2561 case ARM::Q5: return ARM::D10; 2562 case ARM::Q6: return ARM::D12; 2563 case ARM::Q7: return ARM::D14; 2564 case ARM::Q8: return ARM::D16; 2565 case ARM::Q9: return ARM::D18; 2566 case ARM::Q10: return ARM::D20; 2567 case ARM::Q11: return ARM::D22; 2568 case ARM::Q12: return ARM::D24; 2569 case ARM::Q13: return ARM::D26; 2570 case ARM::Q14: return ARM::D28; 2571 case ARM::Q15: return ARM::D30; 2572 } 2573} 2574 2575/// Parse a register list. 2576bool ARMAsmParser:: 2577parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2578 assert(Parser.getTok().is(AsmToken::LCurly) && 2579 "Token is not a Left Curly Brace"); 2580 SMLoc S = Parser.getTok().getLoc(); 2581 Parser.Lex(); // Eat '{' token. 2582 SMLoc RegLoc = Parser.getTok().getLoc(); 2583 2584 // Check the first register in the list to see what register class 2585 // this is a list of. 2586 int Reg = tryParseRegister(); 2587 if (Reg == -1) 2588 return Error(RegLoc, "register expected"); 2589 2590 // The reglist instructions have at most 16 registers, so reserve 2591 // space for that many. 2592 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2593 2594 // Allow Q regs and just interpret them as the two D sub-registers. 2595 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2596 Reg = getDRegFromQReg(Reg); 2597 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2598 ++Reg; 2599 } 2600 const MCRegisterClass *RC; 2601 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2602 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2603 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2604 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2605 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2606 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2607 else 2608 return Error(RegLoc, "invalid register in register list"); 2609 2610 // Store the register. 2611 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2612 2613 // This starts immediately after the first register token in the list, 2614 // so we can see either a comma or a minus (range separator) as a legal 2615 // next token. 2616 while (Parser.getTok().is(AsmToken::Comma) || 2617 Parser.getTok().is(AsmToken::Minus)) { 2618 if (Parser.getTok().is(AsmToken::Minus)) { 2619 Parser.Lex(); // Eat the minus. 2620 SMLoc EndLoc = Parser.getTok().getLoc(); 2621 int EndReg = tryParseRegister(); 2622 if (EndReg == -1) 2623 return Error(EndLoc, "register expected"); 2624 // Allow Q regs and just interpret them as the two D sub-registers. 2625 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2626 EndReg = getDRegFromQReg(EndReg) + 1; 2627 // If the register is the same as the start reg, there's nothing 2628 // more to do. 2629 if (Reg == EndReg) 2630 continue; 2631 // The register must be in the same register class as the first. 2632 if (!RC->contains(EndReg)) 2633 return Error(EndLoc, "invalid register in register list"); 2634 // Ranges must go from low to high. 2635 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2636 return Error(EndLoc, "bad range in register list"); 2637 2638 // Add all the registers in the range to the register list. 2639 while (Reg != EndReg) { 2640 Reg = getNextRegister(Reg); 2641 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2642 } 2643 continue; 2644 } 2645 Parser.Lex(); // Eat the comma. 2646 RegLoc = Parser.getTok().getLoc(); 2647 int OldReg = Reg; 2648 const AsmToken RegTok = Parser.getTok(); 2649 Reg = tryParseRegister(); 2650 if (Reg == -1) 2651 return Error(RegLoc, "register expected"); 2652 // Allow Q regs and just interpret them as the two D sub-registers. 2653 bool isQReg = false; 2654 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2655 Reg = getDRegFromQReg(Reg); 2656 isQReg = true; 2657 } 2658 // The register must be in the same register class as the first. 2659 if (!RC->contains(Reg)) 2660 return Error(RegLoc, "invalid register in register list"); 2661 // List must be monotonically increasing. 2662 if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) 2663 return Error(RegLoc, "register list not in ascending order"); 2664 if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) { 2665 Warning(RegLoc, "duplicated register (" + RegTok.getString() + 2666 ") in register list"); 2667 continue; 2668 } 2669 // VFP register lists must also be contiguous. 2670 // It's OK to use the enumeration values directly here rather, as the 2671 // VFP register classes have the enum sorted properly. 2672 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2673 Reg != OldReg + 1) 2674 return Error(RegLoc, "non-contiguous register range"); 2675 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2676 if (isQReg) 2677 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2678 } 2679 2680 SMLoc E = Parser.getTok().getLoc(); 2681 if (Parser.getTok().isNot(AsmToken::RCurly)) 2682 return Error(E, "'}' expected"); 2683 Parser.Lex(); // Eat '}' token. 2684 2685 // Push the register list operand. 2686 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2687 2688 // The ARM system instruction variants for LDM/STM have a '^' token here. 2689 if (Parser.getTok().is(AsmToken::Caret)) { 2690 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); 2691 Parser.Lex(); // Eat '^' token. 2692 } 2693 2694 return false; 2695} 2696 2697// Helper function to parse the lane index for vector lists. 2698ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2699parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) { 2700 Index = 0; // Always return a defined index value. 2701 if (Parser.getTok().is(AsmToken::LBrac)) { 2702 Parser.Lex(); // Eat the '['. 2703 if (Parser.getTok().is(AsmToken::RBrac)) { 2704 // "Dn[]" is the 'all lanes' syntax. 2705 LaneKind = AllLanes; 2706 Parser.Lex(); // Eat the ']'. 2707 return MatchOperand_Success; 2708 } 2709 if (Parser.getTok().is(AsmToken::Integer)) { 2710 int64_t Val = Parser.getTok().getIntVal(); 2711 // Make this range check context sensitive for .8, .16, .32. 2712 if (Val < 0 && Val > 7) 2713 Error(Parser.getTok().getLoc(), "lane index out of range"); 2714 Index = Val; 2715 LaneKind = IndexedLane; 2716 Parser.Lex(); // Eat the token; 2717 if (Parser.getTok().isNot(AsmToken::RBrac)) 2718 Error(Parser.getTok().getLoc(), "']' expected"); 2719 Parser.Lex(); // Eat the ']'. 2720 return MatchOperand_Success; 2721 } 2722 Error(Parser.getTok().getLoc(), "lane index must be empty or an integer"); 2723 return MatchOperand_ParseFail; 2724 } 2725 LaneKind = NoLanes; 2726 return MatchOperand_Success; 2727} 2728 2729// parse a vector register list 2730ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2731parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2732 VectorLaneTy LaneKind; 2733 unsigned LaneIndex; 2734 SMLoc S = Parser.getTok().getLoc(); 2735 // As an extension (to match gas), support a plain D register or Q register 2736 // (without encosing curly braces) as a single or double entry list, 2737 // respectively. 2738 if (Parser.getTok().is(AsmToken::Identifier)) { 2739 int Reg = tryParseRegister(); 2740 if (Reg == -1) 2741 return MatchOperand_NoMatch; 2742 SMLoc E = Parser.getTok().getLoc(); 2743 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 2744 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2745 if (Res != MatchOperand_Success) 2746 return Res; 2747 switch (LaneKind) { 2748 default: 2749 assert(0 && "unexpected lane kind!"); 2750 case NoLanes: 2751 E = Parser.getTok().getLoc(); 2752 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E)); 2753 break; 2754 case AllLanes: 2755 E = Parser.getTok().getLoc(); 2756 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E)); 2757 break; 2758 case IndexedLane: 2759 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 2760 LaneIndex, S,E)); 2761 break; 2762 } 2763 return MatchOperand_Success; 2764 } 2765 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2766 Reg = getDRegFromQReg(Reg); 2767 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2768 if (Res != MatchOperand_Success) 2769 return Res; 2770 switch (LaneKind) { 2771 default: 2772 assert(0 && "unexpected lane kind!"); 2773 case NoLanes: 2774 E = Parser.getTok().getLoc(); 2775 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E)); 2776 break; 2777 case AllLanes: 2778 E = Parser.getTok().getLoc(); 2779 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E)); 2780 break; 2781 case IndexedLane: 2782 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 2783 LaneIndex, S,E)); 2784 break; 2785 } 2786 return MatchOperand_Success; 2787 } 2788 Error(S, "vector register expected"); 2789 return MatchOperand_ParseFail; 2790 } 2791 2792 if (Parser.getTok().isNot(AsmToken::LCurly)) 2793 return MatchOperand_NoMatch; 2794 2795 Parser.Lex(); // Eat '{' token. 2796 SMLoc RegLoc = Parser.getTok().getLoc(); 2797 2798 int Reg = tryParseRegister(); 2799 if (Reg == -1) { 2800 Error(RegLoc, "register expected"); 2801 return MatchOperand_ParseFail; 2802 } 2803 unsigned Count = 1; 2804 unsigned FirstReg = Reg; 2805 // The list is of D registers, but we also allow Q regs and just interpret 2806 // them as the two D sub-registers. 2807 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2808 FirstReg = Reg = getDRegFromQReg(Reg); 2809 ++Reg; 2810 ++Count; 2811 } 2812 if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success) 2813 return MatchOperand_ParseFail; 2814 2815 while (Parser.getTok().is(AsmToken::Comma) || 2816 Parser.getTok().is(AsmToken::Minus)) { 2817 if (Parser.getTok().is(AsmToken::Minus)) { 2818 Parser.Lex(); // Eat the minus. 2819 SMLoc EndLoc = Parser.getTok().getLoc(); 2820 int EndReg = tryParseRegister(); 2821 if (EndReg == -1) { 2822 Error(EndLoc, "register expected"); 2823 return MatchOperand_ParseFail; 2824 } 2825 // Allow Q regs and just interpret them as the two D sub-registers. 2826 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2827 EndReg = getDRegFromQReg(EndReg) + 1; 2828 // If the register is the same as the start reg, there's nothing 2829 // more to do. 2830 if (Reg == EndReg) 2831 continue; 2832 // The register must be in the same register class as the first. 2833 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 2834 Error(EndLoc, "invalid register in register list"); 2835 return MatchOperand_ParseFail; 2836 } 2837 // Ranges must go from low to high. 2838 if (Reg > EndReg) { 2839 Error(EndLoc, "bad range in register list"); 2840 return MatchOperand_ParseFail; 2841 } 2842 // Parse the lane specifier if present. 2843 VectorLaneTy NextLaneKind; 2844 unsigned NextLaneIndex; 2845 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2846 return MatchOperand_ParseFail; 2847 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2848 Error(EndLoc, "mismatched lane index in register list"); 2849 return MatchOperand_ParseFail; 2850 } 2851 EndLoc = Parser.getTok().getLoc(); 2852 2853 // Add all the registers in the range to the register list. 2854 Count += EndReg - Reg; 2855 Reg = EndReg; 2856 continue; 2857 } 2858 Parser.Lex(); // Eat the comma. 2859 RegLoc = Parser.getTok().getLoc(); 2860 int OldReg = Reg; 2861 Reg = tryParseRegister(); 2862 if (Reg == -1) { 2863 Error(RegLoc, "register expected"); 2864 return MatchOperand_ParseFail; 2865 } 2866 // vector register lists must be contiguous. 2867 // It's OK to use the enumeration values directly here rather, as the 2868 // VFP register classes have the enum sorted properly. 2869 // 2870 // The list is of D registers, but we also allow Q regs and just interpret 2871 // them as the two D sub-registers. 2872 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2873 Reg = getDRegFromQReg(Reg); 2874 if (Reg != OldReg + 1) { 2875 Error(RegLoc, "non-contiguous register range"); 2876 return MatchOperand_ParseFail; 2877 } 2878 ++Reg; 2879 Count += 2; 2880 // Parse the lane specifier if present. 2881 VectorLaneTy NextLaneKind; 2882 unsigned NextLaneIndex; 2883 SMLoc EndLoc = Parser.getTok().getLoc(); 2884 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2885 return MatchOperand_ParseFail; 2886 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2887 Error(EndLoc, "mismatched lane index in register list"); 2888 return MatchOperand_ParseFail; 2889 } 2890 continue; 2891 } 2892 // Normal D register. Just check that it's contiguous and keep going. 2893 if (Reg != OldReg + 1) { 2894 Error(RegLoc, "non-contiguous register range"); 2895 return MatchOperand_ParseFail; 2896 } 2897 ++Count; 2898 // Parse the lane specifier if present. 2899 VectorLaneTy NextLaneKind; 2900 unsigned NextLaneIndex; 2901 SMLoc EndLoc = Parser.getTok().getLoc(); 2902 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2903 return MatchOperand_ParseFail; 2904 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2905 Error(EndLoc, "mismatched lane index in register list"); 2906 return MatchOperand_ParseFail; 2907 } 2908 } 2909 2910 SMLoc E = Parser.getTok().getLoc(); 2911 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2912 Error(E, "'}' expected"); 2913 return MatchOperand_ParseFail; 2914 } 2915 Parser.Lex(); // Eat '}' token. 2916 2917 switch (LaneKind) { 2918 default: 2919 assert(0 && "unexpected lane kind in register list."); 2920 case NoLanes: 2921 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2922 break; 2923 case AllLanes: 2924 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 2925 S, E)); 2926 break; 2927 case IndexedLane: 2928 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 2929 LaneIndex, S, E)); 2930 break; 2931 } 2932 return MatchOperand_Success; 2933} 2934 2935/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2936ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2937parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2938 SMLoc S = Parser.getTok().getLoc(); 2939 const AsmToken &Tok = Parser.getTok(); 2940 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2941 StringRef OptStr = Tok.getString(); 2942 2943 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2944 .Case("sy", ARM_MB::SY) 2945 .Case("st", ARM_MB::ST) 2946 .Case("sh", ARM_MB::ISH) 2947 .Case("ish", ARM_MB::ISH) 2948 .Case("shst", ARM_MB::ISHST) 2949 .Case("ishst", ARM_MB::ISHST) 2950 .Case("nsh", ARM_MB::NSH) 2951 .Case("un", ARM_MB::NSH) 2952 .Case("nshst", ARM_MB::NSHST) 2953 .Case("unst", ARM_MB::NSHST) 2954 .Case("osh", ARM_MB::OSH) 2955 .Case("oshst", ARM_MB::OSHST) 2956 .Default(~0U); 2957 2958 if (Opt == ~0U) 2959 return MatchOperand_NoMatch; 2960 2961 Parser.Lex(); // Eat identifier token. 2962 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2963 return MatchOperand_Success; 2964} 2965 2966/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2967ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2968parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2969 SMLoc S = Parser.getTok().getLoc(); 2970 const AsmToken &Tok = Parser.getTok(); 2971 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2972 StringRef IFlagsStr = Tok.getString(); 2973 2974 // An iflags string of "none" is interpreted to mean that none of the AIF 2975 // bits are set. Not a terribly useful instruction, but a valid encoding. 2976 unsigned IFlags = 0; 2977 if (IFlagsStr != "none") { 2978 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2979 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2980 .Case("a", ARM_PROC::A) 2981 .Case("i", ARM_PROC::I) 2982 .Case("f", ARM_PROC::F) 2983 .Default(~0U); 2984 2985 // If some specific iflag is already set, it means that some letter is 2986 // present more than once, this is not acceptable. 2987 if (Flag == ~0U || (IFlags & Flag)) 2988 return MatchOperand_NoMatch; 2989 2990 IFlags |= Flag; 2991 } 2992 } 2993 2994 Parser.Lex(); // Eat identifier token. 2995 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2996 return MatchOperand_Success; 2997} 2998 2999/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 3000ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3001parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3002 SMLoc S = Parser.getTok().getLoc(); 3003 const AsmToken &Tok = Parser.getTok(); 3004 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 3005 StringRef Mask = Tok.getString(); 3006 3007 if (isMClass()) { 3008 // See ARMv6-M 10.1.1 3009 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 3010 .Case("apsr", 0) 3011 .Case("iapsr", 1) 3012 .Case("eapsr", 2) 3013 .Case("xpsr", 3) 3014 .Case("ipsr", 5) 3015 .Case("epsr", 6) 3016 .Case("iepsr", 7) 3017 .Case("msp", 8) 3018 .Case("psp", 9) 3019 .Case("primask", 16) 3020 .Case("basepri", 17) 3021 .Case("basepri_max", 18) 3022 .Case("faultmask", 19) 3023 .Case("control", 20) 3024 .Default(~0U); 3025 3026 if (FlagsVal == ~0U) 3027 return MatchOperand_NoMatch; 3028 3029 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 3030 // basepri, basepri_max and faultmask only valid for V7m. 3031 return MatchOperand_NoMatch; 3032 3033 Parser.Lex(); // Eat identifier token. 3034 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3035 return MatchOperand_Success; 3036 } 3037 3038 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 3039 size_t Start = 0, Next = Mask.find('_'); 3040 StringRef Flags = ""; 3041 std::string SpecReg = Mask.slice(Start, Next).lower(); 3042 if (Next != StringRef::npos) 3043 Flags = Mask.slice(Next+1, Mask.size()); 3044 3045 // FlagsVal contains the complete mask: 3046 // 3-0: Mask 3047 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3048 unsigned FlagsVal = 0; 3049 3050 if (SpecReg == "apsr") { 3051 FlagsVal = StringSwitch<unsigned>(Flags) 3052 .Case("nzcvq", 0x8) // same as CPSR_f 3053 .Case("g", 0x4) // same as CPSR_s 3054 .Case("nzcvqg", 0xc) // same as CPSR_fs 3055 .Default(~0U); 3056 3057 if (FlagsVal == ~0U) { 3058 if (!Flags.empty()) 3059 return MatchOperand_NoMatch; 3060 else 3061 FlagsVal = 8; // No flag 3062 } 3063 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 3064 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 3065 Flags = "fc"; 3066 for (int i = 0, e = Flags.size(); i != e; ++i) { 3067 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 3068 .Case("c", 1) 3069 .Case("x", 2) 3070 .Case("s", 4) 3071 .Case("f", 8) 3072 .Default(~0U); 3073 3074 // If some specific flag is already set, it means that some letter is 3075 // present more than once, this is not acceptable. 3076 if (FlagsVal == ~0U || (FlagsVal & Flag)) 3077 return MatchOperand_NoMatch; 3078 FlagsVal |= Flag; 3079 } 3080 } else // No match for special register. 3081 return MatchOperand_NoMatch; 3082 3083 // Special register without flags is NOT equivalent to "fc" flags. 3084 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 3085 // two lines would enable gas compatibility at the expense of breaking 3086 // round-tripping. 3087 // 3088 // if (!FlagsVal) 3089 // FlagsVal = 0x9; 3090 3091 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3092 if (SpecReg == "spsr") 3093 FlagsVal |= 16; 3094 3095 Parser.Lex(); // Eat identifier token. 3096 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3097 return MatchOperand_Success; 3098} 3099 3100ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3101parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 3102 int Low, int High) { 3103 const AsmToken &Tok = Parser.getTok(); 3104 if (Tok.isNot(AsmToken::Identifier)) { 3105 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3106 return MatchOperand_ParseFail; 3107 } 3108 StringRef ShiftName = Tok.getString(); 3109 std::string LowerOp = Op.lower(); 3110 std::string UpperOp = Op.upper(); 3111 if (ShiftName != LowerOp && ShiftName != UpperOp) { 3112 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3113 return MatchOperand_ParseFail; 3114 } 3115 Parser.Lex(); // Eat shift type token. 3116 3117 // There must be a '#' and a shift amount. 3118 if (Parser.getTok().isNot(AsmToken::Hash) && 3119 Parser.getTok().isNot(AsmToken::Dollar)) { 3120 Error(Parser.getTok().getLoc(), "'#' expected"); 3121 return MatchOperand_ParseFail; 3122 } 3123 Parser.Lex(); // Eat hash token. 3124 3125 const MCExpr *ShiftAmount; 3126 SMLoc Loc = Parser.getTok().getLoc(); 3127 if (getParser().ParseExpression(ShiftAmount)) { 3128 Error(Loc, "illegal expression"); 3129 return MatchOperand_ParseFail; 3130 } 3131 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3132 if (!CE) { 3133 Error(Loc, "constant expression expected"); 3134 return MatchOperand_ParseFail; 3135 } 3136 int Val = CE->getValue(); 3137 if (Val < Low || Val > High) { 3138 Error(Loc, "immediate value out of range"); 3139 return MatchOperand_ParseFail; 3140 } 3141 3142 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 3143 3144 return MatchOperand_Success; 3145} 3146 3147ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3148parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3149 const AsmToken &Tok = Parser.getTok(); 3150 SMLoc S = Tok.getLoc(); 3151 if (Tok.isNot(AsmToken::Identifier)) { 3152 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3153 return MatchOperand_ParseFail; 3154 } 3155 int Val = StringSwitch<int>(Tok.getString()) 3156 .Case("be", 1) 3157 .Case("le", 0) 3158 .Default(-1); 3159 Parser.Lex(); // Eat the token. 3160 3161 if (Val == -1) { 3162 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3163 return MatchOperand_ParseFail; 3164 } 3165 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 3166 getContext()), 3167 S, Parser.getTok().getLoc())); 3168 return MatchOperand_Success; 3169} 3170 3171/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 3172/// instructions. Legal values are: 3173/// lsl #n 'n' in [0,31] 3174/// asr #n 'n' in [1,32] 3175/// n == 32 encoded as n == 0. 3176ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3177parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3178 const AsmToken &Tok = Parser.getTok(); 3179 SMLoc S = Tok.getLoc(); 3180 if (Tok.isNot(AsmToken::Identifier)) { 3181 Error(S, "shift operator 'asr' or 'lsl' expected"); 3182 return MatchOperand_ParseFail; 3183 } 3184 StringRef ShiftName = Tok.getString(); 3185 bool isASR; 3186 if (ShiftName == "lsl" || ShiftName == "LSL") 3187 isASR = false; 3188 else if (ShiftName == "asr" || ShiftName == "ASR") 3189 isASR = true; 3190 else { 3191 Error(S, "shift operator 'asr' or 'lsl' expected"); 3192 return MatchOperand_ParseFail; 3193 } 3194 Parser.Lex(); // Eat the operator. 3195 3196 // A '#' and a shift amount. 3197 if (Parser.getTok().isNot(AsmToken::Hash) && 3198 Parser.getTok().isNot(AsmToken::Dollar)) { 3199 Error(Parser.getTok().getLoc(), "'#' expected"); 3200 return MatchOperand_ParseFail; 3201 } 3202 Parser.Lex(); // Eat hash token. 3203 3204 const MCExpr *ShiftAmount; 3205 SMLoc E = Parser.getTok().getLoc(); 3206 if (getParser().ParseExpression(ShiftAmount)) { 3207 Error(E, "malformed shift expression"); 3208 return MatchOperand_ParseFail; 3209 } 3210 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3211 if (!CE) { 3212 Error(E, "shift amount must be an immediate"); 3213 return MatchOperand_ParseFail; 3214 } 3215 3216 int64_t Val = CE->getValue(); 3217 if (isASR) { 3218 // Shift amount must be in [1,32] 3219 if (Val < 1 || Val > 32) { 3220 Error(E, "'asr' shift amount must be in range [1,32]"); 3221 return MatchOperand_ParseFail; 3222 } 3223 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 3224 if (isThumb() && Val == 32) { 3225 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 3226 return MatchOperand_ParseFail; 3227 } 3228 if (Val == 32) Val = 0; 3229 } else { 3230 // Shift amount must be in [1,32] 3231 if (Val < 0 || Val > 31) { 3232 Error(E, "'lsr' shift amount must be in range [0,31]"); 3233 return MatchOperand_ParseFail; 3234 } 3235 } 3236 3237 E = Parser.getTok().getLoc(); 3238 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 3239 3240 return MatchOperand_Success; 3241} 3242 3243/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 3244/// of instructions. Legal values are: 3245/// ror #n 'n' in {0, 8, 16, 24} 3246ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3247parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3248 const AsmToken &Tok = Parser.getTok(); 3249 SMLoc S = Tok.getLoc(); 3250 if (Tok.isNot(AsmToken::Identifier)) 3251 return MatchOperand_NoMatch; 3252 StringRef ShiftName = Tok.getString(); 3253 if (ShiftName != "ror" && ShiftName != "ROR") 3254 return MatchOperand_NoMatch; 3255 Parser.Lex(); // Eat the operator. 3256 3257 // A '#' and a rotate amount. 3258 if (Parser.getTok().isNot(AsmToken::Hash) && 3259 Parser.getTok().isNot(AsmToken::Dollar)) { 3260 Error(Parser.getTok().getLoc(), "'#' expected"); 3261 return MatchOperand_ParseFail; 3262 } 3263 Parser.Lex(); // Eat hash token. 3264 3265 const MCExpr *ShiftAmount; 3266 SMLoc E = Parser.getTok().getLoc(); 3267 if (getParser().ParseExpression(ShiftAmount)) { 3268 Error(E, "malformed rotate expression"); 3269 return MatchOperand_ParseFail; 3270 } 3271 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3272 if (!CE) { 3273 Error(E, "rotate amount must be an immediate"); 3274 return MatchOperand_ParseFail; 3275 } 3276 3277 int64_t Val = CE->getValue(); 3278 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 3279 // normally, zero is represented in asm by omitting the rotate operand 3280 // entirely. 3281 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 3282 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 3283 return MatchOperand_ParseFail; 3284 } 3285 3286 E = Parser.getTok().getLoc(); 3287 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 3288 3289 return MatchOperand_Success; 3290} 3291 3292ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3293parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3294 SMLoc S = Parser.getTok().getLoc(); 3295 // The bitfield descriptor is really two operands, the LSB and the width. 3296 if (Parser.getTok().isNot(AsmToken::Hash) && 3297 Parser.getTok().isNot(AsmToken::Dollar)) { 3298 Error(Parser.getTok().getLoc(), "'#' expected"); 3299 return MatchOperand_ParseFail; 3300 } 3301 Parser.Lex(); // Eat hash token. 3302 3303 const MCExpr *LSBExpr; 3304 SMLoc E = Parser.getTok().getLoc(); 3305 if (getParser().ParseExpression(LSBExpr)) { 3306 Error(E, "malformed immediate expression"); 3307 return MatchOperand_ParseFail; 3308 } 3309 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 3310 if (!CE) { 3311 Error(E, "'lsb' operand must be an immediate"); 3312 return MatchOperand_ParseFail; 3313 } 3314 3315 int64_t LSB = CE->getValue(); 3316 // The LSB must be in the range [0,31] 3317 if (LSB < 0 || LSB > 31) { 3318 Error(E, "'lsb' operand must be in the range [0,31]"); 3319 return MatchOperand_ParseFail; 3320 } 3321 E = Parser.getTok().getLoc(); 3322 3323 // Expect another immediate operand. 3324 if (Parser.getTok().isNot(AsmToken::Comma)) { 3325 Error(Parser.getTok().getLoc(), "too few operands"); 3326 return MatchOperand_ParseFail; 3327 } 3328 Parser.Lex(); // Eat hash token. 3329 if (Parser.getTok().isNot(AsmToken::Hash) && 3330 Parser.getTok().isNot(AsmToken::Dollar)) { 3331 Error(Parser.getTok().getLoc(), "'#' expected"); 3332 return MatchOperand_ParseFail; 3333 } 3334 Parser.Lex(); // Eat hash token. 3335 3336 const MCExpr *WidthExpr; 3337 if (getParser().ParseExpression(WidthExpr)) { 3338 Error(E, "malformed immediate expression"); 3339 return MatchOperand_ParseFail; 3340 } 3341 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3342 if (!CE) { 3343 Error(E, "'width' operand must be an immediate"); 3344 return MatchOperand_ParseFail; 3345 } 3346 3347 int64_t Width = CE->getValue(); 3348 // The LSB must be in the range [1,32-lsb] 3349 if (Width < 1 || Width > 32 - LSB) { 3350 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3351 return MatchOperand_ParseFail; 3352 } 3353 E = Parser.getTok().getLoc(); 3354 3355 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3356 3357 return MatchOperand_Success; 3358} 3359 3360ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3361parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3362 // Check for a post-index addressing register operand. Specifically: 3363 // postidx_reg := '+' register {, shift} 3364 // | '-' register {, shift} 3365 // | register {, shift} 3366 3367 // This method must return MatchOperand_NoMatch without consuming any tokens 3368 // in the case where there is no match, as other alternatives take other 3369 // parse methods. 3370 AsmToken Tok = Parser.getTok(); 3371 SMLoc S = Tok.getLoc(); 3372 bool haveEaten = false; 3373 bool isAdd = true; 3374 int Reg = -1; 3375 if (Tok.is(AsmToken::Plus)) { 3376 Parser.Lex(); // Eat the '+' token. 3377 haveEaten = true; 3378 } else if (Tok.is(AsmToken::Minus)) { 3379 Parser.Lex(); // Eat the '-' token. 3380 isAdd = false; 3381 haveEaten = true; 3382 } 3383 if (Parser.getTok().is(AsmToken::Identifier)) 3384 Reg = tryParseRegister(); 3385 if (Reg == -1) { 3386 if (!haveEaten) 3387 return MatchOperand_NoMatch; 3388 Error(Parser.getTok().getLoc(), "register expected"); 3389 return MatchOperand_ParseFail; 3390 } 3391 SMLoc E = Parser.getTok().getLoc(); 3392 3393 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3394 unsigned ShiftImm = 0; 3395 if (Parser.getTok().is(AsmToken::Comma)) { 3396 Parser.Lex(); // Eat the ','. 3397 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3398 return MatchOperand_ParseFail; 3399 } 3400 3401 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3402 ShiftImm, S, E)); 3403 3404 return MatchOperand_Success; 3405} 3406 3407ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3408parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3409 // Check for a post-index addressing register operand. Specifically: 3410 // am3offset := '+' register 3411 // | '-' register 3412 // | register 3413 // | # imm 3414 // | # + imm 3415 // | # - imm 3416 3417 // This method must return MatchOperand_NoMatch without consuming any tokens 3418 // in the case where there is no match, as other alternatives take other 3419 // parse methods. 3420 AsmToken Tok = Parser.getTok(); 3421 SMLoc S = Tok.getLoc(); 3422 3423 // Do immediates first, as we always parse those if we have a '#'. 3424 if (Parser.getTok().is(AsmToken::Hash) || 3425 Parser.getTok().is(AsmToken::Dollar)) { 3426 Parser.Lex(); // Eat the '#'. 3427 // Explicitly look for a '-', as we need to encode negative zero 3428 // differently. 3429 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3430 const MCExpr *Offset; 3431 if (getParser().ParseExpression(Offset)) 3432 return MatchOperand_ParseFail; 3433 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3434 if (!CE) { 3435 Error(S, "constant expression expected"); 3436 return MatchOperand_ParseFail; 3437 } 3438 SMLoc E = Tok.getLoc(); 3439 // Negative zero is encoded as the flag value INT32_MIN. 3440 int32_t Val = CE->getValue(); 3441 if (isNegative && Val == 0) 3442 Val = INT32_MIN; 3443 3444 Operands.push_back( 3445 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3446 3447 return MatchOperand_Success; 3448 } 3449 3450 3451 bool haveEaten = false; 3452 bool isAdd = true; 3453 int Reg = -1; 3454 if (Tok.is(AsmToken::Plus)) { 3455 Parser.Lex(); // Eat the '+' token. 3456 haveEaten = true; 3457 } else if (Tok.is(AsmToken::Minus)) { 3458 Parser.Lex(); // Eat the '-' token. 3459 isAdd = false; 3460 haveEaten = true; 3461 } 3462 if (Parser.getTok().is(AsmToken::Identifier)) 3463 Reg = tryParseRegister(); 3464 if (Reg == -1) { 3465 if (!haveEaten) 3466 return MatchOperand_NoMatch; 3467 Error(Parser.getTok().getLoc(), "register expected"); 3468 return MatchOperand_ParseFail; 3469 } 3470 SMLoc E = Parser.getTok().getLoc(); 3471 3472 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3473 0, S, E)); 3474 3475 return MatchOperand_Success; 3476} 3477 3478/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3479/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3480/// when they refer multiple MIOperands inside a single one. 3481bool ARMAsmParser:: 3482cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3483 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3484 // Rt, Rt2 3485 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3486 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3487 // Create a writeback register dummy placeholder. 3488 Inst.addOperand(MCOperand::CreateReg(0)); 3489 // addr 3490 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3491 // pred 3492 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3493 return true; 3494} 3495 3496/// cvtT2StrdPre - Convert parsed operands to MCInst. 3497/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3498/// when they refer multiple MIOperands inside a single one. 3499bool ARMAsmParser:: 3500cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3501 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3502 // Create a writeback register dummy placeholder. 3503 Inst.addOperand(MCOperand::CreateReg(0)); 3504 // Rt, Rt2 3505 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3506 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3507 // addr 3508 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3509 // pred 3510 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3511 return true; 3512} 3513 3514/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3515/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3516/// when they refer multiple MIOperands inside a single one. 3517bool ARMAsmParser:: 3518cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3519 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3520 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3521 3522 // Create a writeback register dummy placeholder. 3523 Inst.addOperand(MCOperand::CreateImm(0)); 3524 3525 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3526 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3527 return true; 3528} 3529 3530/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3531/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3532/// when they refer multiple MIOperands inside a single one. 3533bool ARMAsmParser:: 3534cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3535 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3536 // Create a writeback register dummy placeholder. 3537 Inst.addOperand(MCOperand::CreateImm(0)); 3538 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3539 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3540 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3541 return true; 3542} 3543 3544/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3545/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3546/// when they refer multiple MIOperands inside a single one. 3547bool ARMAsmParser:: 3548cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3549 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3550 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3551 3552 // Create a writeback register dummy placeholder. 3553 Inst.addOperand(MCOperand::CreateImm(0)); 3554 3555 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3556 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3557 return true; 3558} 3559 3560/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3561/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3562/// when they refer multiple MIOperands inside a single one. 3563bool ARMAsmParser:: 3564cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3565 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3566 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3567 3568 // Create a writeback register dummy placeholder. 3569 Inst.addOperand(MCOperand::CreateImm(0)); 3570 3571 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3572 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3573 return true; 3574} 3575 3576 3577/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3578/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3579/// when they refer multiple MIOperands inside a single one. 3580bool ARMAsmParser:: 3581cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3582 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3583 // Create a writeback register dummy placeholder. 3584 Inst.addOperand(MCOperand::CreateImm(0)); 3585 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3586 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3587 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3588 return true; 3589} 3590 3591/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3592/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3593/// when they refer multiple MIOperands inside a single one. 3594bool ARMAsmParser:: 3595cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3596 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3597 // Create a writeback register dummy placeholder. 3598 Inst.addOperand(MCOperand::CreateImm(0)); 3599 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3600 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3601 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3602 return true; 3603} 3604 3605/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3606/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3607/// when they refer multiple MIOperands inside a single one. 3608bool ARMAsmParser:: 3609cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3610 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3611 // Create a writeback register dummy placeholder. 3612 Inst.addOperand(MCOperand::CreateImm(0)); 3613 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3614 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3615 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3616 return true; 3617} 3618 3619/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3620/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3621/// when they refer multiple MIOperands inside a single one. 3622bool ARMAsmParser:: 3623cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3624 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3625 // Rt 3626 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3627 // Create a writeback register dummy placeholder. 3628 Inst.addOperand(MCOperand::CreateImm(0)); 3629 // addr 3630 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3631 // offset 3632 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3633 // pred 3634 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3635 return true; 3636} 3637 3638/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3639/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3640/// when they refer multiple MIOperands inside a single one. 3641bool ARMAsmParser:: 3642cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3643 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3644 // Rt 3645 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3646 // Create a writeback register dummy placeholder. 3647 Inst.addOperand(MCOperand::CreateImm(0)); 3648 // addr 3649 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3650 // offset 3651 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3652 // pred 3653 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3654 return true; 3655} 3656 3657/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3658/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3659/// when they refer multiple MIOperands inside a single one. 3660bool ARMAsmParser:: 3661cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3662 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3663 // Create a writeback register dummy placeholder. 3664 Inst.addOperand(MCOperand::CreateImm(0)); 3665 // Rt 3666 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3667 // addr 3668 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3669 // offset 3670 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3671 // pred 3672 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3673 return true; 3674} 3675 3676/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3677/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3678/// when they refer multiple MIOperands inside a single one. 3679bool ARMAsmParser:: 3680cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3681 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3682 // Create a writeback register dummy placeholder. 3683 Inst.addOperand(MCOperand::CreateImm(0)); 3684 // Rt 3685 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3686 // addr 3687 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3688 // offset 3689 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3690 // pred 3691 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3692 return true; 3693} 3694 3695/// cvtLdrdPre - Convert parsed operands to MCInst. 3696/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3697/// when they refer multiple MIOperands inside a single one. 3698bool ARMAsmParser:: 3699cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3700 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3701 // Rt, Rt2 3702 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3703 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3704 // Create a writeback register dummy placeholder. 3705 Inst.addOperand(MCOperand::CreateImm(0)); 3706 // addr 3707 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3708 // pred 3709 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3710 return true; 3711} 3712 3713/// cvtStrdPre - Convert parsed operands to MCInst. 3714/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3715/// when they refer multiple MIOperands inside a single one. 3716bool ARMAsmParser:: 3717cvtStrdPre(MCInst &Inst, unsigned Opcode, 3718 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3719 // Create a writeback register dummy placeholder. 3720 Inst.addOperand(MCOperand::CreateImm(0)); 3721 // Rt, Rt2 3722 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3723 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3724 // addr 3725 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3726 // pred 3727 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3728 return true; 3729} 3730 3731/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3732/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3733/// when they refer multiple MIOperands inside a single one. 3734bool ARMAsmParser:: 3735cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3736 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3737 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3738 // Create a writeback register dummy placeholder. 3739 Inst.addOperand(MCOperand::CreateImm(0)); 3740 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3741 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3742 return true; 3743} 3744 3745/// cvtThumbMultiple- Convert parsed operands to MCInst. 3746/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3747/// when they refer multiple MIOperands inside a single one. 3748bool ARMAsmParser:: 3749cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3750 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3751 // The second source operand must be the same register as the destination 3752 // operand. 3753 if (Operands.size() == 6 && 3754 (((ARMOperand*)Operands[3])->getReg() != 3755 ((ARMOperand*)Operands[5])->getReg()) && 3756 (((ARMOperand*)Operands[3])->getReg() != 3757 ((ARMOperand*)Operands[4])->getReg())) { 3758 Error(Operands[3]->getStartLoc(), 3759 "destination register must match source register"); 3760 return false; 3761 } 3762 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3763 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3764 // If we have a three-operand form, make sure to set Rn to be the operand 3765 // that isn't the same as Rd. 3766 unsigned RegOp = 4; 3767 if (Operands.size() == 6 && 3768 ((ARMOperand*)Operands[4])->getReg() == 3769 ((ARMOperand*)Operands[3])->getReg()) 3770 RegOp = 5; 3771 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 3772 Inst.addOperand(Inst.getOperand(0)); 3773 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3774 3775 return true; 3776} 3777 3778bool ARMAsmParser:: 3779cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3780 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3781 // Vd 3782 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3783 // Create a writeback register dummy placeholder. 3784 Inst.addOperand(MCOperand::CreateImm(0)); 3785 // Vn 3786 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3787 // pred 3788 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3789 return true; 3790} 3791 3792bool ARMAsmParser:: 3793cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3794 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3795 // Vd 3796 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3797 // Create a writeback register dummy placeholder. 3798 Inst.addOperand(MCOperand::CreateImm(0)); 3799 // Vn 3800 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3801 // Vm 3802 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3803 // pred 3804 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3805 return true; 3806} 3807 3808bool ARMAsmParser:: 3809cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3810 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3811 // Create a writeback register dummy placeholder. 3812 Inst.addOperand(MCOperand::CreateImm(0)); 3813 // Vn 3814 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3815 // Vt 3816 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3817 // pred 3818 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3819 return true; 3820} 3821 3822bool ARMAsmParser:: 3823cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3824 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3825 // Create a writeback register dummy placeholder. 3826 Inst.addOperand(MCOperand::CreateImm(0)); 3827 // Vn 3828 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3829 // Vm 3830 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3831 // Vt 3832 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3833 // pred 3834 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3835 return true; 3836} 3837 3838/// Parse an ARM memory expression, return false if successful else return true 3839/// or an error. The first token must be a '[' when called. 3840bool ARMAsmParser:: 3841parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3842 SMLoc S, E; 3843 assert(Parser.getTok().is(AsmToken::LBrac) && 3844 "Token is not a Left Bracket"); 3845 S = Parser.getTok().getLoc(); 3846 Parser.Lex(); // Eat left bracket token. 3847 3848 const AsmToken &BaseRegTok = Parser.getTok(); 3849 int BaseRegNum = tryParseRegister(); 3850 if (BaseRegNum == -1) 3851 return Error(BaseRegTok.getLoc(), "register expected"); 3852 3853 // The next token must either be a comma or a closing bracket. 3854 const AsmToken &Tok = Parser.getTok(); 3855 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3856 return Error(Tok.getLoc(), "malformed memory operand"); 3857 3858 if (Tok.is(AsmToken::RBrac)) { 3859 E = Tok.getLoc(); 3860 Parser.Lex(); // Eat right bracket token. 3861 3862 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3863 0, 0, false, S, E)); 3864 3865 // If there's a pre-indexing writeback marker, '!', just add it as a token 3866 // operand. It's rather odd, but syntactically valid. 3867 if (Parser.getTok().is(AsmToken::Exclaim)) { 3868 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3869 Parser.Lex(); // Eat the '!'. 3870 } 3871 3872 return false; 3873 } 3874 3875 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3876 Parser.Lex(); // Eat the comma. 3877 3878 // If we have a ':', it's an alignment specifier. 3879 if (Parser.getTok().is(AsmToken::Colon)) { 3880 Parser.Lex(); // Eat the ':'. 3881 E = Parser.getTok().getLoc(); 3882 3883 const MCExpr *Expr; 3884 if (getParser().ParseExpression(Expr)) 3885 return true; 3886 3887 // The expression has to be a constant. Memory references with relocations 3888 // don't come through here, as they use the <label> forms of the relevant 3889 // instructions. 3890 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3891 if (!CE) 3892 return Error (E, "constant expression expected"); 3893 3894 unsigned Align = 0; 3895 switch (CE->getValue()) { 3896 default: 3897 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3898 case 64: Align = 8; break; 3899 case 128: Align = 16; break; 3900 case 256: Align = 32; break; 3901 } 3902 3903 // Now we should have the closing ']' 3904 E = Parser.getTok().getLoc(); 3905 if (Parser.getTok().isNot(AsmToken::RBrac)) 3906 return Error(E, "']' expected"); 3907 Parser.Lex(); // Eat right bracket token. 3908 3909 // Don't worry about range checking the value here. That's handled by 3910 // the is*() predicates. 3911 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3912 ARM_AM::no_shift, 0, Align, 3913 false, S, E)); 3914 3915 // If there's a pre-indexing writeback marker, '!', just add it as a token 3916 // operand. 3917 if (Parser.getTok().is(AsmToken::Exclaim)) { 3918 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3919 Parser.Lex(); // Eat the '!'. 3920 } 3921 3922 return false; 3923 } 3924 3925 // If we have a '#', it's an immediate offset, else assume it's a register 3926 // offset. Be friendly and also accept a plain integer (without a leading 3927 // hash) for gas compatibility. 3928 if (Parser.getTok().is(AsmToken::Hash) || 3929 Parser.getTok().is(AsmToken::Dollar) || 3930 Parser.getTok().is(AsmToken::Integer)) { 3931 if (Parser.getTok().isNot(AsmToken::Integer)) 3932 Parser.Lex(); // Eat the '#'. 3933 E = Parser.getTok().getLoc(); 3934 3935 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3936 const MCExpr *Offset; 3937 if (getParser().ParseExpression(Offset)) 3938 return true; 3939 3940 // The expression has to be a constant. Memory references with relocations 3941 // don't come through here, as they use the <label> forms of the relevant 3942 // instructions. 3943 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3944 if (!CE) 3945 return Error (E, "constant expression expected"); 3946 3947 // If the constant was #-0, represent it as INT32_MIN. 3948 int32_t Val = CE->getValue(); 3949 if (isNegative && Val == 0) 3950 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3951 3952 // Now we should have the closing ']' 3953 E = Parser.getTok().getLoc(); 3954 if (Parser.getTok().isNot(AsmToken::RBrac)) 3955 return Error(E, "']' expected"); 3956 Parser.Lex(); // Eat right bracket token. 3957 3958 // Don't worry about range checking the value here. That's handled by 3959 // the is*() predicates. 3960 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3961 ARM_AM::no_shift, 0, 0, 3962 false, S, E)); 3963 3964 // If there's a pre-indexing writeback marker, '!', just add it as a token 3965 // operand. 3966 if (Parser.getTok().is(AsmToken::Exclaim)) { 3967 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3968 Parser.Lex(); // Eat the '!'. 3969 } 3970 3971 return false; 3972 } 3973 3974 // The register offset is optionally preceded by a '+' or '-' 3975 bool isNegative = false; 3976 if (Parser.getTok().is(AsmToken::Minus)) { 3977 isNegative = true; 3978 Parser.Lex(); // Eat the '-'. 3979 } else if (Parser.getTok().is(AsmToken::Plus)) { 3980 // Nothing to do. 3981 Parser.Lex(); // Eat the '+'. 3982 } 3983 3984 E = Parser.getTok().getLoc(); 3985 int OffsetRegNum = tryParseRegister(); 3986 if (OffsetRegNum == -1) 3987 return Error(E, "register expected"); 3988 3989 // If there's a shift operator, handle it. 3990 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3991 unsigned ShiftImm = 0; 3992 if (Parser.getTok().is(AsmToken::Comma)) { 3993 Parser.Lex(); // Eat the ','. 3994 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3995 return true; 3996 } 3997 3998 // Now we should have the closing ']' 3999 E = Parser.getTok().getLoc(); 4000 if (Parser.getTok().isNot(AsmToken::RBrac)) 4001 return Error(E, "']' expected"); 4002 Parser.Lex(); // Eat right bracket token. 4003 4004 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 4005 ShiftType, ShiftImm, 0, isNegative, 4006 S, E)); 4007 4008 // If there's a pre-indexing writeback marker, '!', just add it as a token 4009 // operand. 4010 if (Parser.getTok().is(AsmToken::Exclaim)) { 4011 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4012 Parser.Lex(); // Eat the '!'. 4013 } 4014 4015 return false; 4016} 4017 4018/// parseMemRegOffsetShift - one of these two: 4019/// ( lsl | lsr | asr | ror ) , # shift_amount 4020/// rrx 4021/// return true if it parses a shift otherwise it returns false. 4022bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 4023 unsigned &Amount) { 4024 SMLoc Loc = Parser.getTok().getLoc(); 4025 const AsmToken &Tok = Parser.getTok(); 4026 if (Tok.isNot(AsmToken::Identifier)) 4027 return true; 4028 StringRef ShiftName = Tok.getString(); 4029 if (ShiftName == "lsl" || ShiftName == "LSL" || 4030 ShiftName == "asl" || ShiftName == "ASL") 4031 St = ARM_AM::lsl; 4032 else if (ShiftName == "lsr" || ShiftName == "LSR") 4033 St = ARM_AM::lsr; 4034 else if (ShiftName == "asr" || ShiftName == "ASR") 4035 St = ARM_AM::asr; 4036 else if (ShiftName == "ror" || ShiftName == "ROR") 4037 St = ARM_AM::ror; 4038 else if (ShiftName == "rrx" || ShiftName == "RRX") 4039 St = ARM_AM::rrx; 4040 else 4041 return Error(Loc, "illegal shift operator"); 4042 Parser.Lex(); // Eat shift type token. 4043 4044 // rrx stands alone. 4045 Amount = 0; 4046 if (St != ARM_AM::rrx) { 4047 Loc = Parser.getTok().getLoc(); 4048 // A '#' and a shift amount. 4049 const AsmToken &HashTok = Parser.getTok(); 4050 if (HashTok.isNot(AsmToken::Hash) && 4051 HashTok.isNot(AsmToken::Dollar)) 4052 return Error(HashTok.getLoc(), "'#' expected"); 4053 Parser.Lex(); // Eat hash token. 4054 4055 const MCExpr *Expr; 4056 if (getParser().ParseExpression(Expr)) 4057 return true; 4058 // Range check the immediate. 4059 // lsl, ror: 0 <= imm <= 31 4060 // lsr, asr: 0 <= imm <= 32 4061 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4062 if (!CE) 4063 return Error(Loc, "shift amount must be an immediate"); 4064 int64_t Imm = CE->getValue(); 4065 if (Imm < 0 || 4066 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 4067 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 4068 return Error(Loc, "immediate shift value out of range"); 4069 Amount = Imm; 4070 } 4071 4072 return false; 4073} 4074 4075/// parseFPImm - A floating point immediate expression operand. 4076ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 4077parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4078 SMLoc S = Parser.getTok().getLoc(); 4079 4080 if (Parser.getTok().isNot(AsmToken::Hash) && 4081 Parser.getTok().isNot(AsmToken::Dollar)) 4082 return MatchOperand_NoMatch; 4083 4084 // Disambiguate the VMOV forms that can accept an FP immediate. 4085 // vmov.f32 <sreg>, #imm 4086 // vmov.f64 <dreg>, #imm 4087 // vmov.f32 <dreg>, #imm @ vector f32x2 4088 // vmov.f32 <qreg>, #imm @ vector f32x4 4089 // 4090 // There are also the NEON VMOV instructions which expect an 4091 // integer constant. Make sure we don't try to parse an FPImm 4092 // for these: 4093 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 4094 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 4095 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 4096 TyOp->getToken() != ".f64")) 4097 return MatchOperand_NoMatch; 4098 4099 Parser.Lex(); // Eat the '#'. 4100 4101 // Handle negation, as that still comes through as a separate token. 4102 bool isNegative = false; 4103 if (Parser.getTok().is(AsmToken::Minus)) { 4104 isNegative = true; 4105 Parser.Lex(); 4106 } 4107 const AsmToken &Tok = Parser.getTok(); 4108 if (Tok.is(AsmToken::Real)) { 4109 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 4110 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 4111 // If we had a '-' in front, toggle the sign bit. 4112 IntVal ^= (uint64_t)isNegative << 63; 4113 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 4114 Parser.Lex(); // Eat the token. 4115 if (Val == -1) { 4116 TokError("floating point value out of range"); 4117 return MatchOperand_ParseFail; 4118 } 4119 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4120 return MatchOperand_Success; 4121 } 4122 if (Tok.is(AsmToken::Integer)) { 4123 int64_t Val = Tok.getIntVal(); 4124 Parser.Lex(); // Eat the token. 4125 if (Val > 255 || Val < 0) { 4126 TokError("encoded floating point value out of range"); 4127 return MatchOperand_ParseFail; 4128 } 4129 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4130 return MatchOperand_Success; 4131 } 4132 4133 TokError("invalid floating point immediate"); 4134 return MatchOperand_ParseFail; 4135} 4136/// Parse a arm instruction operand. For now this parses the operand regardless 4137/// of the mnemonic. 4138bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4139 StringRef Mnemonic) { 4140 SMLoc S, E; 4141 4142 // Check if the current operand has a custom associated parser, if so, try to 4143 // custom parse the operand, or fallback to the general approach. 4144 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 4145 if (ResTy == MatchOperand_Success) 4146 return false; 4147 // If there wasn't a custom match, try the generic matcher below. Otherwise, 4148 // there was a match, but an error occurred, in which case, just return that 4149 // the operand parsing failed. 4150 if (ResTy == MatchOperand_ParseFail) 4151 return true; 4152 4153 switch (getLexer().getKind()) { 4154 default: 4155 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 4156 return true; 4157 case AsmToken::Identifier: { 4158 // If this is VMRS, check for the apsr_nzcv operand. 4159 if (!tryParseRegisterWithWriteBack(Operands)) 4160 return false; 4161 int Res = tryParseShiftRegister(Operands); 4162 if (Res == 0) // success 4163 return false; 4164 else if (Res == -1) // irrecoverable error 4165 return true; 4166 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 4167 S = Parser.getTok().getLoc(); 4168 Parser.Lex(); 4169 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 4170 return false; 4171 } 4172 4173 // Fall though for the Identifier case that is not a register or a 4174 // special name. 4175 } 4176 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 4177 case AsmToken::Integer: // things like 1f and 2b as a branch targets 4178 case AsmToken::String: // quoted label names. 4179 case AsmToken::Dot: { // . as a branch target 4180 // This was not a register so parse other operands that start with an 4181 // identifier (like labels) as expressions and create them as immediates. 4182 const MCExpr *IdVal; 4183 S = Parser.getTok().getLoc(); 4184 if (getParser().ParseExpression(IdVal)) 4185 return true; 4186 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4187 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 4188 return false; 4189 } 4190 case AsmToken::LBrac: 4191 return parseMemory(Operands); 4192 case AsmToken::LCurly: 4193 return parseRegisterList(Operands); 4194 case AsmToken::Dollar: 4195 case AsmToken::Hash: { 4196 // #42 -> immediate. 4197 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 4198 S = Parser.getTok().getLoc(); 4199 Parser.Lex(); 4200 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4201 const MCExpr *ImmVal; 4202 if (getParser().ParseExpression(ImmVal)) 4203 return true; 4204 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 4205 if (CE) { 4206 int32_t Val = CE->getValue(); 4207 if (isNegative && Val == 0) 4208 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 4209 } 4210 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4211 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 4212 return false; 4213 } 4214 case AsmToken::Colon: { 4215 // ":lower16:" and ":upper16:" expression prefixes 4216 // FIXME: Check it's an expression prefix, 4217 // e.g. (FOO - :lower16:BAR) isn't legal. 4218 ARMMCExpr::VariantKind RefKind; 4219 if (parsePrefix(RefKind)) 4220 return true; 4221 4222 const MCExpr *SubExprVal; 4223 if (getParser().ParseExpression(SubExprVal)) 4224 return true; 4225 4226 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 4227 getContext()); 4228 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4229 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 4230 return false; 4231 } 4232 } 4233} 4234 4235// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 4236// :lower16: and :upper16:. 4237bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 4238 RefKind = ARMMCExpr::VK_ARM_None; 4239 4240 // :lower16: and :upper16: modifiers 4241 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 4242 Parser.Lex(); // Eat ':' 4243 4244 if (getLexer().isNot(AsmToken::Identifier)) { 4245 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 4246 return true; 4247 } 4248 4249 StringRef IDVal = Parser.getTok().getIdentifier(); 4250 if (IDVal == "lower16") { 4251 RefKind = ARMMCExpr::VK_ARM_LO16; 4252 } else if (IDVal == "upper16") { 4253 RefKind = ARMMCExpr::VK_ARM_HI16; 4254 } else { 4255 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 4256 return true; 4257 } 4258 Parser.Lex(); 4259 4260 if (getLexer().isNot(AsmToken::Colon)) { 4261 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 4262 return true; 4263 } 4264 Parser.Lex(); // Eat the last ':' 4265 return false; 4266} 4267 4268/// \brief Given a mnemonic, split out possible predication code and carry 4269/// setting letters to form a canonical mnemonic and flags. 4270// 4271// FIXME: Would be nice to autogen this. 4272// FIXME: This is a bit of a maze of special cases. 4273StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 4274 unsigned &PredicationCode, 4275 bool &CarrySetting, 4276 unsigned &ProcessorIMod, 4277 StringRef &ITMask) { 4278 PredicationCode = ARMCC::AL; 4279 CarrySetting = false; 4280 ProcessorIMod = 0; 4281 4282 // Ignore some mnemonics we know aren't predicated forms. 4283 // 4284 // FIXME: Would be nice to autogen this. 4285 if ((Mnemonic == "movs" && isThumb()) || 4286 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 4287 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 4288 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 4289 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 4290 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 4291 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 4292 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 4293 return Mnemonic; 4294 4295 // First, split out any predication code. Ignore mnemonics we know aren't 4296 // predicated but do have a carry-set and so weren't caught above. 4297 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 4298 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 4299 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 4300 Mnemonic != "sbcs" && Mnemonic != "rscs") { 4301 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 4302 .Case("eq", ARMCC::EQ) 4303 .Case("ne", ARMCC::NE) 4304 .Case("hs", ARMCC::HS) 4305 .Case("cs", ARMCC::HS) 4306 .Case("lo", ARMCC::LO) 4307 .Case("cc", ARMCC::LO) 4308 .Case("mi", ARMCC::MI) 4309 .Case("pl", ARMCC::PL) 4310 .Case("vs", ARMCC::VS) 4311 .Case("vc", ARMCC::VC) 4312 .Case("hi", ARMCC::HI) 4313 .Case("ls", ARMCC::LS) 4314 .Case("ge", ARMCC::GE) 4315 .Case("lt", ARMCC::LT) 4316 .Case("gt", ARMCC::GT) 4317 .Case("le", ARMCC::LE) 4318 .Case("al", ARMCC::AL) 4319 .Default(~0U); 4320 if (CC != ~0U) { 4321 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 4322 PredicationCode = CC; 4323 } 4324 } 4325 4326 // Next, determine if we have a carry setting bit. We explicitly ignore all 4327 // the instructions we know end in 's'. 4328 if (Mnemonic.endswith("s") && 4329 !(Mnemonic == "cps" || Mnemonic == "mls" || 4330 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 4331 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 4332 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 4333 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || 4334 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || 4335 Mnemonic == "fsts" || 4336 (Mnemonic == "movs" && isThumb()))) { 4337 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4338 CarrySetting = true; 4339 } 4340 4341 // The "cps" instruction can have a interrupt mode operand which is glued into 4342 // the mnemonic. Check if this is the case, split it and parse the imod op 4343 if (Mnemonic.startswith("cps")) { 4344 // Split out any imod code. 4345 unsigned IMod = 4346 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4347 .Case("ie", ARM_PROC::IE) 4348 .Case("id", ARM_PROC::ID) 4349 .Default(~0U); 4350 if (IMod != ~0U) { 4351 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4352 ProcessorIMod = IMod; 4353 } 4354 } 4355 4356 // The "it" instruction has the condition mask on the end of the mnemonic. 4357 if (Mnemonic.startswith("it")) { 4358 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4359 Mnemonic = Mnemonic.slice(0, 2); 4360 } 4361 4362 return Mnemonic; 4363} 4364 4365/// \brief Given a canonical mnemonic, determine if the instruction ever allows 4366/// inclusion of carry set or predication code operands. 4367// 4368// FIXME: It would be nice to autogen this. 4369void ARMAsmParser:: 4370getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4371 bool &CanAcceptPredicationCode) { 4372 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4373 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4374 Mnemonic == "add" || Mnemonic == "adc" || 4375 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4376 Mnemonic == "orr" || Mnemonic == "mvn" || 4377 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4378 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4379 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4380 Mnemonic == "mla" || Mnemonic == "smlal" || 4381 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4382 CanAcceptCarrySet = true; 4383 } else 4384 CanAcceptCarrySet = false; 4385 4386 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4387 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4388 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4389 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4390 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4391 (Mnemonic == "clrex" && !isThumb()) || 4392 (Mnemonic == "nop" && isThumbOne()) || 4393 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4394 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4395 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4396 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4397 !isThumb()) || 4398 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4399 CanAcceptPredicationCode = false; 4400 } else 4401 CanAcceptPredicationCode = true; 4402 4403 if (isThumb()) { 4404 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4405 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4406 CanAcceptPredicationCode = false; 4407 } 4408} 4409 4410bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4411 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4412 // FIXME: This is all horribly hacky. We really need a better way to deal 4413 // with optional operands like this in the matcher table. 4414 4415 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4416 // another does not. Specifically, the MOVW instruction does not. So we 4417 // special case it here and remove the defaulted (non-setting) cc_out 4418 // operand if that's the instruction we're trying to match. 4419 // 4420 // We do this as post-processing of the explicit operands rather than just 4421 // conditionally adding the cc_out in the first place because we need 4422 // to check the type of the parsed immediate operand. 4423 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4424 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4425 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4426 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4427 return true; 4428 4429 // Register-register 'add' for thumb does not have a cc_out operand 4430 // when there are only two register operands. 4431 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4432 static_cast<ARMOperand*>(Operands[3])->isReg() && 4433 static_cast<ARMOperand*>(Operands[4])->isReg() && 4434 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4435 return true; 4436 // Register-register 'add' for thumb does not have a cc_out operand 4437 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4438 // have to check the immediate range here since Thumb2 has a variant 4439 // that can handle a different range and has a cc_out operand. 4440 if (((isThumb() && Mnemonic == "add") || 4441 (isThumbTwo() && Mnemonic == "sub")) && 4442 Operands.size() == 6 && 4443 static_cast<ARMOperand*>(Operands[3])->isReg() && 4444 static_cast<ARMOperand*>(Operands[4])->isReg() && 4445 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4446 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4447 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4448 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4449 return true; 4450 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4451 // imm0_4095 variant. That's the least-preferred variant when 4452 // selecting via the generic "add" mnemonic, so to know that we 4453 // should remove the cc_out operand, we have to explicitly check that 4454 // it's not one of the other variants. Ugh. 4455 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4456 Operands.size() == 6 && 4457 static_cast<ARMOperand*>(Operands[3])->isReg() && 4458 static_cast<ARMOperand*>(Operands[4])->isReg() && 4459 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4460 // Nest conditions rather than one big 'if' statement for readability. 4461 // 4462 // If either register is a high reg, it's either one of the SP 4463 // variants (handled above) or a 32-bit encoding, so we just 4464 // check against T3. 4465 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4466 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4467 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4468 return false; 4469 // If both registers are low, we're in an IT block, and the immediate is 4470 // in range, we should use encoding T1 instead, which has a cc_out. 4471 if (inITBlock() && 4472 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4473 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4474 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4475 return false; 4476 4477 // Otherwise, we use encoding T4, which does not have a cc_out 4478 // operand. 4479 return true; 4480 } 4481 4482 // The thumb2 multiply instruction doesn't have a CCOut register, so 4483 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4484 // use the 16-bit encoding or not. 4485 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4486 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4487 static_cast<ARMOperand*>(Operands[3])->isReg() && 4488 static_cast<ARMOperand*>(Operands[4])->isReg() && 4489 static_cast<ARMOperand*>(Operands[5])->isReg() && 4490 // If the registers aren't low regs, the destination reg isn't the 4491 // same as one of the source regs, or the cc_out operand is zero 4492 // outside of an IT block, we have to use the 32-bit encoding, so 4493 // remove the cc_out operand. 4494 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4495 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4496 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4497 !inITBlock() || 4498 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4499 static_cast<ARMOperand*>(Operands[5])->getReg() && 4500 static_cast<ARMOperand*>(Operands[3])->getReg() != 4501 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4502 return true; 4503 4504 // Also check the 'mul' syntax variant that doesn't specify an explicit 4505 // destination register. 4506 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4507 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4508 static_cast<ARMOperand*>(Operands[3])->isReg() && 4509 static_cast<ARMOperand*>(Operands[4])->isReg() && 4510 // If the registers aren't low regs or the cc_out operand is zero 4511 // outside of an IT block, we have to use the 32-bit encoding, so 4512 // remove the cc_out operand. 4513 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4514 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4515 !inITBlock())) 4516 return true; 4517 4518 4519 4520 // Register-register 'add/sub' for thumb does not have a cc_out operand 4521 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4522 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4523 // right, this will result in better diagnostics (which operand is off) 4524 // anyway. 4525 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4526 (Operands.size() == 5 || Operands.size() == 6) && 4527 static_cast<ARMOperand*>(Operands[3])->isReg() && 4528 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4529 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4530 return true; 4531 4532 return false; 4533} 4534 4535static bool isDataTypeToken(StringRef Tok) { 4536 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4537 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4538 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4539 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4540 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4541 Tok == ".f" || Tok == ".d"; 4542} 4543 4544// FIXME: This bit should probably be handled via an explicit match class 4545// in the .td files that matches the suffix instead of having it be 4546// a literal string token the way it is now. 4547static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4548 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4549} 4550 4551static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features); 4552/// Parse an arm instruction mnemonic followed by its operands. 4553bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4554 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4555 // Apply mnemonic aliases before doing anything else, as the destination 4556 // mnemnonic may include suffices and we want to handle them normally. 4557 // The generic tblgen'erated code does this later, at the start of 4558 // MatchInstructionImpl(), but that's too late for aliases that include 4559 // any sort of suffix. 4560 unsigned AvailableFeatures = getAvailableFeatures(); 4561 applyMnemonicAliases(Name, AvailableFeatures); 4562 4563 // First check for the ARM-specific .req directive. 4564 if (Parser.getTok().is(AsmToken::Identifier) && 4565 Parser.getTok().getIdentifier() == ".req") { 4566 parseDirectiveReq(Name, NameLoc); 4567 // We always return 'error' for this, as we're done with this 4568 // statement and don't need to match the 'instruction." 4569 return true; 4570 } 4571 4572 // Create the leading tokens for the mnemonic, split by '.' characters. 4573 size_t Start = 0, Next = Name.find('.'); 4574 StringRef Mnemonic = Name.slice(Start, Next); 4575 4576 // Split out the predication code and carry setting flag from the mnemonic. 4577 unsigned PredicationCode; 4578 unsigned ProcessorIMod; 4579 bool CarrySetting; 4580 StringRef ITMask; 4581 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4582 ProcessorIMod, ITMask); 4583 4584 // In Thumb1, only the branch (B) instruction can be predicated. 4585 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4586 Parser.EatToEndOfStatement(); 4587 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4588 } 4589 4590 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4591 4592 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4593 // is the mask as it will be for the IT encoding if the conditional 4594 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4595 // where the conditional bit0 is zero, the instruction post-processing 4596 // will adjust the mask accordingly. 4597 if (Mnemonic == "it") { 4598 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4599 if (ITMask.size() > 3) { 4600 Parser.EatToEndOfStatement(); 4601 return Error(Loc, "too many conditions on IT instruction"); 4602 } 4603 unsigned Mask = 8; 4604 for (unsigned i = ITMask.size(); i != 0; --i) { 4605 char pos = ITMask[i - 1]; 4606 if (pos != 't' && pos != 'e') { 4607 Parser.EatToEndOfStatement(); 4608 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4609 } 4610 Mask >>= 1; 4611 if (ITMask[i - 1] == 't') 4612 Mask |= 8; 4613 } 4614 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4615 } 4616 4617 // FIXME: This is all a pretty gross hack. We should automatically handle 4618 // optional operands like this via tblgen. 4619 4620 // Next, add the CCOut and ConditionCode operands, if needed. 4621 // 4622 // For mnemonics which can ever incorporate a carry setting bit or predication 4623 // code, our matching model involves us always generating CCOut and 4624 // ConditionCode operands to match the mnemonic "as written" and then we let 4625 // the matcher deal with finding the right instruction or generating an 4626 // appropriate error. 4627 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4628 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4629 4630 // If we had a carry-set on an instruction that can't do that, issue an 4631 // error. 4632 if (!CanAcceptCarrySet && CarrySetting) { 4633 Parser.EatToEndOfStatement(); 4634 return Error(NameLoc, "instruction '" + Mnemonic + 4635 "' can not set flags, but 's' suffix specified"); 4636 } 4637 // If we had a predication code on an instruction that can't do that, issue an 4638 // error. 4639 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4640 Parser.EatToEndOfStatement(); 4641 return Error(NameLoc, "instruction '" + Mnemonic + 4642 "' is not predicable, but condition code specified"); 4643 } 4644 4645 // Add the carry setting operand, if necessary. 4646 if (CanAcceptCarrySet) { 4647 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4648 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4649 Loc)); 4650 } 4651 4652 // Add the predication code operand, if necessary. 4653 if (CanAcceptPredicationCode) { 4654 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4655 CarrySetting); 4656 Operands.push_back(ARMOperand::CreateCondCode( 4657 ARMCC::CondCodes(PredicationCode), Loc)); 4658 } 4659 4660 // Add the processor imod operand, if necessary. 4661 if (ProcessorIMod) { 4662 Operands.push_back(ARMOperand::CreateImm( 4663 MCConstantExpr::Create(ProcessorIMod, getContext()), 4664 NameLoc, NameLoc)); 4665 } 4666 4667 // Add the remaining tokens in the mnemonic. 4668 while (Next != StringRef::npos) { 4669 Start = Next; 4670 Next = Name.find('.', Start + 1); 4671 StringRef ExtraToken = Name.slice(Start, Next); 4672 4673 // Some NEON instructions have an optional datatype suffix that is 4674 // completely ignored. Check for that. 4675 if (isDataTypeToken(ExtraToken) && 4676 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 4677 continue; 4678 4679 if (ExtraToken != ".n") { 4680 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4681 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4682 } 4683 } 4684 4685 // Read the remaining operands. 4686 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4687 // Read the first operand. 4688 if (parseOperand(Operands, Mnemonic)) { 4689 Parser.EatToEndOfStatement(); 4690 return true; 4691 } 4692 4693 while (getLexer().is(AsmToken::Comma)) { 4694 Parser.Lex(); // Eat the comma. 4695 4696 // Parse and remember the operand. 4697 if (parseOperand(Operands, Mnemonic)) { 4698 Parser.EatToEndOfStatement(); 4699 return true; 4700 } 4701 } 4702 } 4703 4704 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4705 SMLoc Loc = getLexer().getLoc(); 4706 Parser.EatToEndOfStatement(); 4707 return Error(Loc, "unexpected token in argument list"); 4708 } 4709 4710 Parser.Lex(); // Consume the EndOfStatement 4711 4712 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4713 // do and don't have a cc_out optional-def operand. With some spot-checks 4714 // of the operand list, we can figure out which variant we're trying to 4715 // parse and adjust accordingly before actually matching. We shouldn't ever 4716 // try to remove a cc_out operand that was explicitly set on the the 4717 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4718 // table driven matcher doesn't fit well with the ARM instruction set. 4719 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4720 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4721 Operands.erase(Operands.begin() + 1); 4722 delete Op; 4723 } 4724 4725 // ARM mode 'blx' need special handling, as the register operand version 4726 // is predicable, but the label operand version is not. So, we can't rely 4727 // on the Mnemonic based checking to correctly figure out when to put 4728 // a k_CondCode operand in the list. If we're trying to match the label 4729 // version, remove the k_CondCode operand here. 4730 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4731 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4732 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4733 Operands.erase(Operands.begin() + 1); 4734 delete Op; 4735 } 4736 4737 // The vector-compare-to-zero instructions have a literal token "#0" at 4738 // the end that comes to here as an immediate operand. Convert it to a 4739 // token to play nicely with the matcher. 4740 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4741 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4742 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4743 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4744 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4745 if (CE && CE->getValue() == 0) { 4746 Operands.erase(Operands.begin() + 5); 4747 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4748 delete Op; 4749 } 4750 } 4751 // VCMP{E} does the same thing, but with a different operand count. 4752 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4753 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4754 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4755 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4756 if (CE && CE->getValue() == 0) { 4757 Operands.erase(Operands.begin() + 4); 4758 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4759 delete Op; 4760 } 4761 } 4762 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4763 // end. Convert it to a token here. Take care not to convert those 4764 // that should hit the Thumb2 encoding. 4765 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4766 static_cast<ARMOperand*>(Operands[3])->isReg() && 4767 static_cast<ARMOperand*>(Operands[4])->isReg() && 4768 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4769 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4770 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4771 if (CE && CE->getValue() == 0 && 4772 (isThumbOne() || 4773 // The cc_out operand matches the IT block. 4774 ((inITBlock() != CarrySetting) && 4775 // Neither register operand is a high register. 4776 (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4777 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){ 4778 Operands.erase(Operands.begin() + 5); 4779 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4780 delete Op; 4781 } 4782 } 4783 4784 return false; 4785} 4786 4787// Validate context-sensitive operand constraints. 4788 4789// return 'true' if register list contains non-low GPR registers, 4790// 'false' otherwise. If Reg is in the register list or is HiReg, set 4791// 'containsReg' to true. 4792static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4793 unsigned HiReg, bool &containsReg) { 4794 containsReg = false; 4795 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4796 unsigned OpReg = Inst.getOperand(i).getReg(); 4797 if (OpReg == Reg) 4798 containsReg = true; 4799 // Anything other than a low register isn't legal here. 4800 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4801 return true; 4802 } 4803 return false; 4804} 4805 4806// Check if the specified regisgter is in the register list of the inst, 4807// starting at the indicated operand number. 4808static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4809 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4810 unsigned OpReg = Inst.getOperand(i).getReg(); 4811 if (OpReg == Reg) 4812 return true; 4813 } 4814 return false; 4815} 4816 4817// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4818// the ARMInsts array) instead. Getting that here requires awkward 4819// API changes, though. Better way? 4820namespace llvm { 4821extern const MCInstrDesc ARMInsts[]; 4822} 4823static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4824 return ARMInsts[Opcode]; 4825} 4826 4827// FIXME: We would really like to be able to tablegen'erate this. 4828bool ARMAsmParser:: 4829validateInstruction(MCInst &Inst, 4830 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4831 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4832 SMLoc Loc = Operands[0]->getStartLoc(); 4833 // Check the IT block state first. 4834 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4835 // being allowed in IT blocks, but not being predicable. It just always 4836 // executes. 4837 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4838 unsigned bit = 1; 4839 if (ITState.FirstCond) 4840 ITState.FirstCond = false; 4841 else 4842 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4843 // The instruction must be predicable. 4844 if (!MCID.isPredicable()) 4845 return Error(Loc, "instructions in IT block must be predicable"); 4846 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4847 unsigned ITCond = bit ? ITState.Cond : 4848 ARMCC::getOppositeCondition(ITState.Cond); 4849 if (Cond != ITCond) { 4850 // Find the condition code Operand to get its SMLoc information. 4851 SMLoc CondLoc; 4852 for (unsigned i = 1; i < Operands.size(); ++i) 4853 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4854 CondLoc = Operands[i]->getStartLoc(); 4855 return Error(CondLoc, "incorrect condition in IT block; got '" + 4856 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4857 "', but expected '" + 4858 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4859 } 4860 // Check for non-'al' condition codes outside of the IT block. 4861 } else if (isThumbTwo() && MCID.isPredicable() && 4862 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4863 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4864 Inst.getOpcode() != ARM::t2B) 4865 return Error(Loc, "predicated instructions must be in IT block"); 4866 4867 switch (Inst.getOpcode()) { 4868 case ARM::LDRD: 4869 case ARM::LDRD_PRE: 4870 case ARM::LDRD_POST: 4871 case ARM::LDREXD: { 4872 // Rt2 must be Rt + 1. 4873 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4874 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4875 if (Rt2 != Rt + 1) 4876 return Error(Operands[3]->getStartLoc(), 4877 "destination operands must be sequential"); 4878 return false; 4879 } 4880 case ARM::STRD: { 4881 // Rt2 must be Rt + 1. 4882 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4883 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4884 if (Rt2 != Rt + 1) 4885 return Error(Operands[3]->getStartLoc(), 4886 "source operands must be sequential"); 4887 return false; 4888 } 4889 case ARM::STRD_PRE: 4890 case ARM::STRD_POST: 4891 case ARM::STREXD: { 4892 // Rt2 must be Rt + 1. 4893 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4894 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4895 if (Rt2 != Rt + 1) 4896 return Error(Operands[3]->getStartLoc(), 4897 "source operands must be sequential"); 4898 return false; 4899 } 4900 case ARM::SBFX: 4901 case ARM::UBFX: { 4902 // width must be in range [1, 32-lsb] 4903 unsigned lsb = Inst.getOperand(2).getImm(); 4904 unsigned widthm1 = Inst.getOperand(3).getImm(); 4905 if (widthm1 >= 32 - lsb) 4906 return Error(Operands[5]->getStartLoc(), 4907 "bitfield width must be in range [1,32-lsb]"); 4908 return false; 4909 } 4910 case ARM::tLDMIA: { 4911 // If we're parsing Thumb2, the .w variant is available and handles 4912 // most cases that are normally illegal for a Thumb1 LDM 4913 // instruction. We'll make the transformation in processInstruction() 4914 // if necessary. 4915 // 4916 // Thumb LDM instructions are writeback iff the base register is not 4917 // in the register list. 4918 unsigned Rn = Inst.getOperand(0).getReg(); 4919 bool hasWritebackToken = 4920 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4921 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4922 bool listContainsBase; 4923 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4924 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4925 "registers must be in range r0-r7"); 4926 // If we should have writeback, then there should be a '!' token. 4927 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4928 return Error(Operands[2]->getStartLoc(), 4929 "writeback operator '!' expected"); 4930 // If we should not have writeback, there must not be a '!'. This is 4931 // true even for the 32-bit wide encodings. 4932 if (listContainsBase && hasWritebackToken) 4933 return Error(Operands[3]->getStartLoc(), 4934 "writeback operator '!' not allowed when base register " 4935 "in register list"); 4936 4937 break; 4938 } 4939 case ARM::t2LDMIA_UPD: { 4940 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4941 return Error(Operands[4]->getStartLoc(), 4942 "writeback operator '!' not allowed when base register " 4943 "in register list"); 4944 break; 4945 } 4946 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 4947 // so only issue a diagnostic for thumb1. The instructions will be 4948 // switched to the t2 encodings in processInstruction() if necessary. 4949 case ARM::tPOP: { 4950 bool listContainsBase; 4951 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 4952 !isThumbTwo()) 4953 return Error(Operands[2]->getStartLoc(), 4954 "registers must be in range r0-r7 or pc"); 4955 break; 4956 } 4957 case ARM::tPUSH: { 4958 bool listContainsBase; 4959 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 4960 !isThumbTwo()) 4961 return Error(Operands[2]->getStartLoc(), 4962 "registers must be in range r0-r7 or lr"); 4963 break; 4964 } 4965 case ARM::tSTMIA_UPD: { 4966 bool listContainsBase; 4967 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4968 return Error(Operands[4]->getStartLoc(), 4969 "registers must be in range r0-r7"); 4970 break; 4971 } 4972 } 4973 4974 return false; 4975} 4976 4977static unsigned getRealVSTLNOpcode(unsigned Opc) { 4978 switch(Opc) { 4979 default: assert(0 && "unexpected opcode!"); 4980 case ARM::VST1LNdWB_fixed_Asm_8: return ARM::VST1LNd8_UPD; 4981 case ARM::VST1LNdWB_fixed_Asm_P8: return ARM::VST1LNd8_UPD; 4982 case ARM::VST1LNdWB_fixed_Asm_I8: return ARM::VST1LNd8_UPD; 4983 case ARM::VST1LNdWB_fixed_Asm_S8: return ARM::VST1LNd8_UPD; 4984 case ARM::VST1LNdWB_fixed_Asm_U8: return ARM::VST1LNd8_UPD; 4985 case ARM::VST1LNdWB_fixed_Asm_16: return ARM::VST1LNd16_UPD; 4986 case ARM::VST1LNdWB_fixed_Asm_P16: return ARM::VST1LNd16_UPD; 4987 case ARM::VST1LNdWB_fixed_Asm_I16: return ARM::VST1LNd16_UPD; 4988 case ARM::VST1LNdWB_fixed_Asm_S16: return ARM::VST1LNd16_UPD; 4989 case ARM::VST1LNdWB_fixed_Asm_U16: return ARM::VST1LNd16_UPD; 4990 case ARM::VST1LNdWB_fixed_Asm_32: return ARM::VST1LNd32_UPD; 4991 case ARM::VST1LNdWB_fixed_Asm_F: return ARM::VST1LNd32_UPD; 4992 case ARM::VST1LNdWB_fixed_Asm_F32: return ARM::VST1LNd32_UPD; 4993 case ARM::VST1LNdWB_fixed_Asm_I32: return ARM::VST1LNd32_UPD; 4994 case ARM::VST1LNdWB_fixed_Asm_S32: return ARM::VST1LNd32_UPD; 4995 case ARM::VST1LNdWB_fixed_Asm_U32: return ARM::VST1LNd32_UPD; 4996 case ARM::VST1LNdWB_register_Asm_8: return ARM::VST1LNd8_UPD; 4997 case ARM::VST1LNdWB_register_Asm_P8: return ARM::VST1LNd8_UPD; 4998 case ARM::VST1LNdWB_register_Asm_I8: return ARM::VST1LNd8_UPD; 4999 case ARM::VST1LNdWB_register_Asm_S8: return ARM::VST1LNd8_UPD; 5000 case ARM::VST1LNdWB_register_Asm_U8: return ARM::VST1LNd8_UPD; 5001 case ARM::VST1LNdWB_register_Asm_16: return ARM::VST1LNd16_UPD; 5002 case ARM::VST1LNdWB_register_Asm_P16: return ARM::VST1LNd16_UPD; 5003 case ARM::VST1LNdWB_register_Asm_I16: return ARM::VST1LNd16_UPD; 5004 case ARM::VST1LNdWB_register_Asm_S16: return ARM::VST1LNd16_UPD; 5005 case ARM::VST1LNdWB_register_Asm_U16: return ARM::VST1LNd16_UPD; 5006 case ARM::VST1LNdWB_register_Asm_32: return ARM::VST1LNd32_UPD; 5007 case ARM::VST1LNdWB_register_Asm_F: return ARM::VST1LNd32_UPD; 5008 case ARM::VST1LNdWB_register_Asm_F32: return ARM::VST1LNd32_UPD; 5009 case ARM::VST1LNdWB_register_Asm_I32: return ARM::VST1LNd32_UPD; 5010 case ARM::VST1LNdWB_register_Asm_S32: return ARM::VST1LNd32_UPD; 5011 case ARM::VST1LNdWB_register_Asm_U32: return ARM::VST1LNd32_UPD; 5012 case ARM::VST1LNdAsm_8: return ARM::VST1LNd8; 5013 case ARM::VST1LNdAsm_P8: return ARM::VST1LNd8; 5014 case ARM::VST1LNdAsm_I8: return ARM::VST1LNd8; 5015 case ARM::VST1LNdAsm_S8: return ARM::VST1LNd8; 5016 case ARM::VST1LNdAsm_U8: return ARM::VST1LNd8; 5017 case ARM::VST1LNdAsm_16: return ARM::VST1LNd16; 5018 case ARM::VST1LNdAsm_P16: return ARM::VST1LNd16; 5019 case ARM::VST1LNdAsm_I16: return ARM::VST1LNd16; 5020 case ARM::VST1LNdAsm_S16: return ARM::VST1LNd16; 5021 case ARM::VST1LNdAsm_U16: return ARM::VST1LNd16; 5022 case ARM::VST1LNdAsm_32: return ARM::VST1LNd32; 5023 case ARM::VST1LNdAsm_F: return ARM::VST1LNd32; 5024 case ARM::VST1LNdAsm_F32: return ARM::VST1LNd32; 5025 case ARM::VST1LNdAsm_I32: return ARM::VST1LNd32; 5026 case ARM::VST1LNdAsm_S32: return ARM::VST1LNd32; 5027 case ARM::VST1LNdAsm_U32: return ARM::VST1LNd32; 5028 } 5029} 5030 5031static unsigned getRealVLDLNOpcode(unsigned Opc) { 5032 switch(Opc) { 5033 default: assert(0 && "unexpected opcode!"); 5034 case ARM::VLD1LNdWB_fixed_Asm_8: return ARM::VLD1LNd8_UPD; 5035 case ARM::VLD1LNdWB_fixed_Asm_P8: return ARM::VLD1LNd8_UPD; 5036 case ARM::VLD1LNdWB_fixed_Asm_I8: return ARM::VLD1LNd8_UPD; 5037 case ARM::VLD1LNdWB_fixed_Asm_S8: return ARM::VLD1LNd8_UPD; 5038 case ARM::VLD1LNdWB_fixed_Asm_U8: return ARM::VLD1LNd8_UPD; 5039 case ARM::VLD1LNdWB_fixed_Asm_16: return ARM::VLD1LNd16_UPD; 5040 case ARM::VLD1LNdWB_fixed_Asm_P16: return ARM::VLD1LNd16_UPD; 5041 case ARM::VLD1LNdWB_fixed_Asm_I16: return ARM::VLD1LNd16_UPD; 5042 case ARM::VLD1LNdWB_fixed_Asm_S16: return ARM::VLD1LNd16_UPD; 5043 case ARM::VLD1LNdWB_fixed_Asm_U16: return ARM::VLD1LNd16_UPD; 5044 case ARM::VLD1LNdWB_fixed_Asm_32: return ARM::VLD1LNd32_UPD; 5045 case ARM::VLD1LNdWB_fixed_Asm_F: return ARM::VLD1LNd32_UPD; 5046 case ARM::VLD1LNdWB_fixed_Asm_F32: return ARM::VLD1LNd32_UPD; 5047 case ARM::VLD1LNdWB_fixed_Asm_I32: return ARM::VLD1LNd32_UPD; 5048 case ARM::VLD1LNdWB_fixed_Asm_S32: return ARM::VLD1LNd32_UPD; 5049 case ARM::VLD1LNdWB_fixed_Asm_U32: return ARM::VLD1LNd32_UPD; 5050 case ARM::VLD1LNdWB_register_Asm_8: return ARM::VLD1LNd8_UPD; 5051 case ARM::VLD1LNdWB_register_Asm_P8: return ARM::VLD1LNd8_UPD; 5052 case ARM::VLD1LNdWB_register_Asm_I8: return ARM::VLD1LNd8_UPD; 5053 case ARM::VLD1LNdWB_register_Asm_S8: return ARM::VLD1LNd8_UPD; 5054 case ARM::VLD1LNdWB_register_Asm_U8: return ARM::VLD1LNd8_UPD; 5055 case ARM::VLD1LNdWB_register_Asm_16: return ARM::VLD1LNd16_UPD; 5056 case ARM::VLD1LNdWB_register_Asm_P16: return ARM::VLD1LNd16_UPD; 5057 case ARM::VLD1LNdWB_register_Asm_I16: return ARM::VLD1LNd16_UPD; 5058 case ARM::VLD1LNdWB_register_Asm_S16: return ARM::VLD1LNd16_UPD; 5059 case ARM::VLD1LNdWB_register_Asm_U16: return ARM::VLD1LNd16_UPD; 5060 case ARM::VLD1LNdWB_register_Asm_32: return ARM::VLD1LNd32_UPD; 5061 case ARM::VLD1LNdWB_register_Asm_F: return ARM::VLD1LNd32_UPD; 5062 case ARM::VLD1LNdWB_register_Asm_F32: return ARM::VLD1LNd32_UPD; 5063 case ARM::VLD1LNdWB_register_Asm_I32: return ARM::VLD1LNd32_UPD; 5064 case ARM::VLD1LNdWB_register_Asm_S32: return ARM::VLD1LNd32_UPD; 5065 case ARM::VLD1LNdWB_register_Asm_U32: return ARM::VLD1LNd32_UPD; 5066 case ARM::VLD1LNdAsm_8: return ARM::VLD1LNd8; 5067 case ARM::VLD1LNdAsm_P8: return ARM::VLD1LNd8; 5068 case ARM::VLD1LNdAsm_I8: return ARM::VLD1LNd8; 5069 case ARM::VLD1LNdAsm_S8: return ARM::VLD1LNd8; 5070 case ARM::VLD1LNdAsm_U8: return ARM::VLD1LNd8; 5071 case ARM::VLD1LNdAsm_16: return ARM::VLD1LNd16; 5072 case ARM::VLD1LNdAsm_P16: return ARM::VLD1LNd16; 5073 case ARM::VLD1LNdAsm_I16: return ARM::VLD1LNd16; 5074 case ARM::VLD1LNdAsm_S16: return ARM::VLD1LNd16; 5075 case ARM::VLD1LNdAsm_U16: return ARM::VLD1LNd16; 5076 case ARM::VLD1LNdAsm_32: return ARM::VLD1LNd32; 5077 case ARM::VLD1LNdAsm_F: return ARM::VLD1LNd32; 5078 case ARM::VLD1LNdAsm_F32: return ARM::VLD1LNd32; 5079 case ARM::VLD1LNdAsm_I32: return ARM::VLD1LNd32; 5080 case ARM::VLD1LNdAsm_S32: return ARM::VLD1LNd32; 5081 case ARM::VLD1LNdAsm_U32: return ARM::VLD1LNd32; 5082 } 5083} 5084 5085bool ARMAsmParser:: 5086processInstruction(MCInst &Inst, 5087 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5088 switch (Inst.getOpcode()) { 5089 // Handle NEON VST1 complex aliases. 5090 case ARM::VST1LNdWB_register_Asm_8: 5091 case ARM::VST1LNdWB_register_Asm_P8: 5092 case ARM::VST1LNdWB_register_Asm_I8: 5093 case ARM::VST1LNdWB_register_Asm_S8: 5094 case ARM::VST1LNdWB_register_Asm_U8: 5095 case ARM::VST1LNdWB_register_Asm_16: 5096 case ARM::VST1LNdWB_register_Asm_P16: 5097 case ARM::VST1LNdWB_register_Asm_I16: 5098 case ARM::VST1LNdWB_register_Asm_S16: 5099 case ARM::VST1LNdWB_register_Asm_U16: 5100 case ARM::VST1LNdWB_register_Asm_32: 5101 case ARM::VST1LNdWB_register_Asm_F: 5102 case ARM::VST1LNdWB_register_Asm_F32: 5103 case ARM::VST1LNdWB_register_Asm_I32: 5104 case ARM::VST1LNdWB_register_Asm_S32: 5105 case ARM::VST1LNdWB_register_Asm_U32: { 5106 MCInst TmpInst; 5107 // Shuffle the operands around so the lane index operand is in the 5108 // right place. 5109 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5110 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5111 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5112 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5113 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5114 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5115 TmpInst.addOperand(Inst.getOperand(1)); // lane 5116 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5117 TmpInst.addOperand(Inst.getOperand(6)); 5118 Inst = TmpInst; 5119 return true; 5120 } 5121 case ARM::VST1LNdWB_fixed_Asm_8: 5122 case ARM::VST1LNdWB_fixed_Asm_P8: 5123 case ARM::VST1LNdWB_fixed_Asm_I8: 5124 case ARM::VST1LNdWB_fixed_Asm_S8: 5125 case ARM::VST1LNdWB_fixed_Asm_U8: 5126 case ARM::VST1LNdWB_fixed_Asm_16: 5127 case ARM::VST1LNdWB_fixed_Asm_P16: 5128 case ARM::VST1LNdWB_fixed_Asm_I16: 5129 case ARM::VST1LNdWB_fixed_Asm_S16: 5130 case ARM::VST1LNdWB_fixed_Asm_U16: 5131 case ARM::VST1LNdWB_fixed_Asm_32: 5132 case ARM::VST1LNdWB_fixed_Asm_F: 5133 case ARM::VST1LNdWB_fixed_Asm_F32: 5134 case ARM::VST1LNdWB_fixed_Asm_I32: 5135 case ARM::VST1LNdWB_fixed_Asm_S32: 5136 case ARM::VST1LNdWB_fixed_Asm_U32: { 5137 MCInst TmpInst; 5138 // Shuffle the operands around so the lane index operand is in the 5139 // right place. 5140 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5141 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5142 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5143 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5144 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5145 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5146 TmpInst.addOperand(Inst.getOperand(1)); // lane 5147 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5148 TmpInst.addOperand(Inst.getOperand(5)); 5149 Inst = TmpInst; 5150 return true; 5151 } 5152 case ARM::VST1LNdAsm_8: 5153 case ARM::VST1LNdAsm_P8: 5154 case ARM::VST1LNdAsm_I8: 5155 case ARM::VST1LNdAsm_S8: 5156 case ARM::VST1LNdAsm_U8: 5157 case ARM::VST1LNdAsm_16: 5158 case ARM::VST1LNdAsm_P16: 5159 case ARM::VST1LNdAsm_I16: 5160 case ARM::VST1LNdAsm_S16: 5161 case ARM::VST1LNdAsm_U16: 5162 case ARM::VST1LNdAsm_32: 5163 case ARM::VST1LNdAsm_F: 5164 case ARM::VST1LNdAsm_F32: 5165 case ARM::VST1LNdAsm_I32: 5166 case ARM::VST1LNdAsm_S32: 5167 case ARM::VST1LNdAsm_U32: { 5168 MCInst TmpInst; 5169 // Shuffle the operands around so the lane index operand is in the 5170 // right place. 5171 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5172 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5173 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5174 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5175 TmpInst.addOperand(Inst.getOperand(1)); // lane 5176 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5177 TmpInst.addOperand(Inst.getOperand(5)); 5178 Inst = TmpInst; 5179 return true; 5180 } 5181 // Handle NEON VLD1 complex aliases. 5182 case ARM::VLD1LNdWB_register_Asm_8: 5183 case ARM::VLD1LNdWB_register_Asm_P8: 5184 case ARM::VLD1LNdWB_register_Asm_I8: 5185 case ARM::VLD1LNdWB_register_Asm_S8: 5186 case ARM::VLD1LNdWB_register_Asm_U8: 5187 case ARM::VLD1LNdWB_register_Asm_16: 5188 case ARM::VLD1LNdWB_register_Asm_P16: 5189 case ARM::VLD1LNdWB_register_Asm_I16: 5190 case ARM::VLD1LNdWB_register_Asm_S16: 5191 case ARM::VLD1LNdWB_register_Asm_U16: 5192 case ARM::VLD1LNdWB_register_Asm_32: 5193 case ARM::VLD1LNdWB_register_Asm_F: 5194 case ARM::VLD1LNdWB_register_Asm_F32: 5195 case ARM::VLD1LNdWB_register_Asm_I32: 5196 case ARM::VLD1LNdWB_register_Asm_S32: 5197 case ARM::VLD1LNdWB_register_Asm_U32: { 5198 MCInst TmpInst; 5199 // Shuffle the operands around so the lane index operand is in the 5200 // right place. 5201 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5202 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5203 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5204 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5205 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5206 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5207 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5208 TmpInst.addOperand(Inst.getOperand(1)); // lane 5209 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5210 TmpInst.addOperand(Inst.getOperand(6)); 5211 Inst = TmpInst; 5212 return true; 5213 } 5214 case ARM::VLD1LNdWB_fixed_Asm_8: 5215 case ARM::VLD1LNdWB_fixed_Asm_P8: 5216 case ARM::VLD1LNdWB_fixed_Asm_I8: 5217 case ARM::VLD1LNdWB_fixed_Asm_S8: 5218 case ARM::VLD1LNdWB_fixed_Asm_U8: 5219 case ARM::VLD1LNdWB_fixed_Asm_16: 5220 case ARM::VLD1LNdWB_fixed_Asm_P16: 5221 case ARM::VLD1LNdWB_fixed_Asm_I16: 5222 case ARM::VLD1LNdWB_fixed_Asm_S16: 5223 case ARM::VLD1LNdWB_fixed_Asm_U16: 5224 case ARM::VLD1LNdWB_fixed_Asm_32: 5225 case ARM::VLD1LNdWB_fixed_Asm_F: 5226 case ARM::VLD1LNdWB_fixed_Asm_F32: 5227 case ARM::VLD1LNdWB_fixed_Asm_I32: 5228 case ARM::VLD1LNdWB_fixed_Asm_S32: 5229 case ARM::VLD1LNdWB_fixed_Asm_U32: { 5230 MCInst TmpInst; 5231 // Shuffle the operands around so the lane index operand is in the 5232 // right place. 5233 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5234 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5235 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5236 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5237 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5238 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5239 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5240 TmpInst.addOperand(Inst.getOperand(1)); // lane 5241 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5242 TmpInst.addOperand(Inst.getOperand(5)); 5243 Inst = TmpInst; 5244 return true; 5245 } 5246 case ARM::VLD1LNdAsm_8: 5247 case ARM::VLD1LNdAsm_P8: 5248 case ARM::VLD1LNdAsm_I8: 5249 case ARM::VLD1LNdAsm_S8: 5250 case ARM::VLD1LNdAsm_U8: 5251 case ARM::VLD1LNdAsm_16: 5252 case ARM::VLD1LNdAsm_P16: 5253 case ARM::VLD1LNdAsm_I16: 5254 case ARM::VLD1LNdAsm_S16: 5255 case ARM::VLD1LNdAsm_U16: 5256 case ARM::VLD1LNdAsm_32: 5257 case ARM::VLD1LNdAsm_F: 5258 case ARM::VLD1LNdAsm_F32: 5259 case ARM::VLD1LNdAsm_I32: 5260 case ARM::VLD1LNdAsm_S32: 5261 case ARM::VLD1LNdAsm_U32: { 5262 MCInst TmpInst; 5263 // Shuffle the operands around so the lane index operand is in the 5264 // right place. 5265 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5266 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5267 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5268 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5269 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5270 TmpInst.addOperand(Inst.getOperand(1)); // lane 5271 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5272 TmpInst.addOperand(Inst.getOperand(5)); 5273 Inst = TmpInst; 5274 return true; 5275 } 5276 // Handle the Thumb2 mode MOV complex aliases. 5277 case ARM::t2MOVsi: 5278 case ARM::t2MOVSsi: { 5279 // Which instruction to expand to depends on the CCOut operand and 5280 // whether we're in an IT block if the register operands are low 5281 // registers. 5282 bool isNarrow = false; 5283 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5284 isARMLowRegister(Inst.getOperand(1).getReg()) && 5285 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi)) 5286 isNarrow = true; 5287 MCInst TmpInst; 5288 unsigned newOpc; 5289 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) { 5290 default: llvm_unreachable("unexpected opcode!"); 5291 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; 5292 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; 5293 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; 5294 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; 5295 } 5296 unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); 5297 if (Ammount == 32) Ammount = 0; 5298 TmpInst.setOpcode(newOpc); 5299 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5300 if (isNarrow) 5301 TmpInst.addOperand(MCOperand::CreateReg( 5302 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 5303 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5304 TmpInst.addOperand(MCOperand::CreateImm(Ammount)); 5305 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5306 TmpInst.addOperand(Inst.getOperand(4)); 5307 if (!isNarrow) 5308 TmpInst.addOperand(MCOperand::CreateReg( 5309 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 5310 Inst = TmpInst; 5311 return true; 5312 } 5313 // Handle the ARM mode MOV complex aliases. 5314 case ARM::ASRr: 5315 case ARM::LSRr: 5316 case ARM::LSLr: 5317 case ARM::RORr: { 5318 ARM_AM::ShiftOpc ShiftTy; 5319 switch(Inst.getOpcode()) { 5320 default: llvm_unreachable("unexpected opcode!"); 5321 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 5322 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 5323 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 5324 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 5325 } 5326 // A shift by zero is a plain MOVr, not a MOVsi. 5327 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 5328 MCInst TmpInst; 5329 TmpInst.setOpcode(ARM::MOVsr); 5330 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5331 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5332 TmpInst.addOperand(Inst.getOperand(2)); // Rm 5333 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5334 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5335 TmpInst.addOperand(Inst.getOperand(4)); 5336 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5337 Inst = TmpInst; 5338 return true; 5339 } 5340 case ARM::ASRi: 5341 case ARM::LSRi: 5342 case ARM::LSLi: 5343 case ARM::RORi: { 5344 ARM_AM::ShiftOpc ShiftTy; 5345 switch(Inst.getOpcode()) { 5346 default: llvm_unreachable("unexpected opcode!"); 5347 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 5348 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 5349 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 5350 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 5351 } 5352 // A shift by zero is a plain MOVr, not a MOVsi. 5353 unsigned Amt = Inst.getOperand(2).getImm(); 5354 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 5355 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 5356 MCInst TmpInst; 5357 TmpInst.setOpcode(Opc); 5358 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5359 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5360 if (Opc == ARM::MOVsi) 5361 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5362 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5363 TmpInst.addOperand(Inst.getOperand(4)); 5364 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5365 Inst = TmpInst; 5366 return true; 5367 } 5368 case ARM::RRXi: { 5369 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 5370 MCInst TmpInst; 5371 TmpInst.setOpcode(ARM::MOVsi); 5372 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5373 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5374 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5375 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5376 TmpInst.addOperand(Inst.getOperand(3)); 5377 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 5378 Inst = TmpInst; 5379 return true; 5380 } 5381 case ARM::t2LDMIA_UPD: { 5382 // If this is a load of a single register, then we should use 5383 // a post-indexed LDR instruction instead, per the ARM ARM. 5384 if (Inst.getNumOperands() != 5) 5385 return false; 5386 MCInst TmpInst; 5387 TmpInst.setOpcode(ARM::t2LDR_POST); 5388 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5389 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5390 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5391 TmpInst.addOperand(MCOperand::CreateImm(4)); 5392 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5393 TmpInst.addOperand(Inst.getOperand(3)); 5394 Inst = TmpInst; 5395 return true; 5396 } 5397 case ARM::t2STMDB_UPD: { 5398 // If this is a store of a single register, then we should use 5399 // a pre-indexed STR instruction instead, per the ARM ARM. 5400 if (Inst.getNumOperands() != 5) 5401 return false; 5402 MCInst TmpInst; 5403 TmpInst.setOpcode(ARM::t2STR_PRE); 5404 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5405 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5406 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5407 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5408 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5409 TmpInst.addOperand(Inst.getOperand(3)); 5410 Inst = TmpInst; 5411 return true; 5412 } 5413 case ARM::LDMIA_UPD: 5414 // If this is a load of a single register via a 'pop', then we should use 5415 // a post-indexed LDR instruction instead, per the ARM ARM. 5416 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 5417 Inst.getNumOperands() == 5) { 5418 MCInst TmpInst; 5419 TmpInst.setOpcode(ARM::LDR_POST_IMM); 5420 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5421 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5422 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5423 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 5424 TmpInst.addOperand(MCOperand::CreateImm(4)); 5425 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5426 TmpInst.addOperand(Inst.getOperand(3)); 5427 Inst = TmpInst; 5428 return true; 5429 } 5430 break; 5431 case ARM::STMDB_UPD: 5432 // If this is a store of a single register via a 'push', then we should use 5433 // a pre-indexed STR instruction instead, per the ARM ARM. 5434 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 5435 Inst.getNumOperands() == 5) { 5436 MCInst TmpInst; 5437 TmpInst.setOpcode(ARM::STR_PRE_IMM); 5438 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5439 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5440 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 5441 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5442 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5443 TmpInst.addOperand(Inst.getOperand(3)); 5444 Inst = TmpInst; 5445 } 5446 break; 5447 case ARM::t2ADDri12: 5448 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 5449 // mnemonic was used (not "addw"), encoding T3 is preferred. 5450 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" || 5451 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5452 break; 5453 Inst.setOpcode(ARM::t2ADDri); 5454 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5455 break; 5456 case ARM::t2SUBri12: 5457 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 5458 // mnemonic was used (not "subw"), encoding T3 is preferred. 5459 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" || 5460 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5461 break; 5462 Inst.setOpcode(ARM::t2SUBri); 5463 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5464 break; 5465 case ARM::tADDi8: 5466 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5467 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5468 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5469 // to encoding T1 if <Rd> is omitted." 5470 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5471 Inst.setOpcode(ARM::tADDi3); 5472 return true; 5473 } 5474 break; 5475 case ARM::tSUBi8: 5476 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5477 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5478 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5479 // to encoding T1 if <Rd> is omitted." 5480 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5481 Inst.setOpcode(ARM::tSUBi3); 5482 return true; 5483 } 5484 break; 5485 case ARM::t2ADDrr: { 5486 // If the destination and first source operand are the same, and 5487 // there's no setting of the flags, use encoding T2 instead of T3. 5488 // Note that this is only for ADD, not SUB. This mirrors the system 5489 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 5490 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 5491 Inst.getOperand(5).getReg() != 0 || 5492 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5493 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 5494 break; 5495 MCInst TmpInst; 5496 TmpInst.setOpcode(ARM::tADDhirr); 5497 TmpInst.addOperand(Inst.getOperand(0)); 5498 TmpInst.addOperand(Inst.getOperand(0)); 5499 TmpInst.addOperand(Inst.getOperand(2)); 5500 TmpInst.addOperand(Inst.getOperand(3)); 5501 TmpInst.addOperand(Inst.getOperand(4)); 5502 Inst = TmpInst; 5503 return true; 5504 } 5505 case ARM::tB: 5506 // A Thumb conditional branch outside of an IT block is a tBcc. 5507 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 5508 Inst.setOpcode(ARM::tBcc); 5509 return true; 5510 } 5511 break; 5512 case ARM::t2B: 5513 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 5514 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 5515 Inst.setOpcode(ARM::t2Bcc); 5516 return true; 5517 } 5518 break; 5519 case ARM::t2Bcc: 5520 // If the conditional is AL or we're in an IT block, we really want t2B. 5521 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 5522 Inst.setOpcode(ARM::t2B); 5523 return true; 5524 } 5525 break; 5526 case ARM::tBcc: 5527 // If the conditional is AL, we really want tB. 5528 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 5529 Inst.setOpcode(ARM::tB); 5530 return true; 5531 } 5532 break; 5533 case ARM::tLDMIA: { 5534 // If the register list contains any high registers, or if the writeback 5535 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 5536 // instead if we're in Thumb2. Otherwise, this should have generated 5537 // an error in validateInstruction(). 5538 unsigned Rn = Inst.getOperand(0).getReg(); 5539 bool hasWritebackToken = 5540 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5541 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 5542 bool listContainsBase; 5543 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 5544 (!listContainsBase && !hasWritebackToken) || 5545 (listContainsBase && hasWritebackToken)) { 5546 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5547 assert (isThumbTwo()); 5548 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 5549 // If we're switching to the updating version, we need to insert 5550 // the writeback tied operand. 5551 if (hasWritebackToken) 5552 Inst.insert(Inst.begin(), 5553 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 5554 return true; 5555 } 5556 break; 5557 } 5558 case ARM::tSTMIA_UPD: { 5559 // If the register list contains any high registers, we need to use 5560 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5561 // should have generated an error in validateInstruction(). 5562 unsigned Rn = Inst.getOperand(0).getReg(); 5563 bool listContainsBase; 5564 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 5565 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5566 assert (isThumbTwo()); 5567 Inst.setOpcode(ARM::t2STMIA_UPD); 5568 return true; 5569 } 5570 break; 5571 } 5572 case ARM::tPOP: { 5573 bool listContainsBase; 5574 // If the register list contains any high registers, we need to use 5575 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5576 // should have generated an error in validateInstruction(). 5577 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 5578 return false; 5579 assert (isThumbTwo()); 5580 Inst.setOpcode(ARM::t2LDMIA_UPD); 5581 // Add the base register and writeback operands. 5582 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5583 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5584 return true; 5585 } 5586 case ARM::tPUSH: { 5587 bool listContainsBase; 5588 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 5589 return false; 5590 assert (isThumbTwo()); 5591 Inst.setOpcode(ARM::t2STMDB_UPD); 5592 // Add the base register and writeback operands. 5593 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5594 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5595 return true; 5596 } 5597 case ARM::t2MOVi: { 5598 // If we can use the 16-bit encoding and the user didn't explicitly 5599 // request the 32-bit variant, transform it here. 5600 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5601 Inst.getOperand(1).getImm() <= 255 && 5602 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 5603 Inst.getOperand(4).getReg() == ARM::CPSR) || 5604 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 5605 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5606 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5607 // The operands aren't in the same order for tMOVi8... 5608 MCInst TmpInst; 5609 TmpInst.setOpcode(ARM::tMOVi8); 5610 TmpInst.addOperand(Inst.getOperand(0)); 5611 TmpInst.addOperand(Inst.getOperand(4)); 5612 TmpInst.addOperand(Inst.getOperand(1)); 5613 TmpInst.addOperand(Inst.getOperand(2)); 5614 TmpInst.addOperand(Inst.getOperand(3)); 5615 Inst = TmpInst; 5616 return true; 5617 } 5618 break; 5619 } 5620 case ARM::t2MOVr: { 5621 // If we can use the 16-bit encoding and the user didn't explicitly 5622 // request the 32-bit variant, transform it here. 5623 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5624 isARMLowRegister(Inst.getOperand(1).getReg()) && 5625 Inst.getOperand(2).getImm() == ARMCC::AL && 5626 Inst.getOperand(4).getReg() == ARM::CPSR && 5627 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5628 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5629 // The operands aren't the same for tMOV[S]r... (no cc_out) 5630 MCInst TmpInst; 5631 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 5632 TmpInst.addOperand(Inst.getOperand(0)); 5633 TmpInst.addOperand(Inst.getOperand(1)); 5634 TmpInst.addOperand(Inst.getOperand(2)); 5635 TmpInst.addOperand(Inst.getOperand(3)); 5636 Inst = TmpInst; 5637 return true; 5638 } 5639 break; 5640 } 5641 case ARM::t2SXTH: 5642 case ARM::t2SXTB: 5643 case ARM::t2UXTH: 5644 case ARM::t2UXTB: { 5645 // If we can use the 16-bit encoding and the user didn't explicitly 5646 // request the 32-bit variant, transform it here. 5647 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5648 isARMLowRegister(Inst.getOperand(1).getReg()) && 5649 Inst.getOperand(2).getImm() == 0 && 5650 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5651 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5652 unsigned NewOpc; 5653 switch (Inst.getOpcode()) { 5654 default: llvm_unreachable("Illegal opcode!"); 5655 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 5656 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 5657 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 5658 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 5659 } 5660 // The operands aren't the same for thumb1 (no rotate operand). 5661 MCInst TmpInst; 5662 TmpInst.setOpcode(NewOpc); 5663 TmpInst.addOperand(Inst.getOperand(0)); 5664 TmpInst.addOperand(Inst.getOperand(1)); 5665 TmpInst.addOperand(Inst.getOperand(3)); 5666 TmpInst.addOperand(Inst.getOperand(4)); 5667 Inst = TmpInst; 5668 return true; 5669 } 5670 break; 5671 } 5672 case ARM::t2IT: { 5673 // The mask bits for all but the first condition are represented as 5674 // the low bit of the condition code value implies 't'. We currently 5675 // always have 1 implies 't', so XOR toggle the bits if the low bit 5676 // of the condition code is zero. The encoding also expects the low 5677 // bit of the condition to be encoded as bit 4 of the mask operand, 5678 // so mask that in if needed 5679 MCOperand &MO = Inst.getOperand(1); 5680 unsigned Mask = MO.getImm(); 5681 unsigned OrigMask = Mask; 5682 unsigned TZ = CountTrailingZeros_32(Mask); 5683 if ((Inst.getOperand(0).getImm() & 1) == 0) { 5684 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 5685 for (unsigned i = 3; i != TZ; --i) 5686 Mask ^= 1 << i; 5687 } else 5688 Mask |= 0x10; 5689 MO.setImm(Mask); 5690 5691 // Set up the IT block state according to the IT instruction we just 5692 // matched. 5693 assert(!inITBlock() && "nested IT blocks?!"); 5694 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 5695 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 5696 ITState.CurPosition = 0; 5697 ITState.FirstCond = true; 5698 break; 5699 } 5700 } 5701 return false; 5702} 5703 5704unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 5705 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 5706 // suffix depending on whether they're in an IT block or not. 5707 unsigned Opc = Inst.getOpcode(); 5708 const MCInstrDesc &MCID = getInstDesc(Opc); 5709 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 5710 assert(MCID.hasOptionalDef() && 5711 "optionally flag setting instruction missing optional def operand"); 5712 assert(MCID.NumOperands == Inst.getNumOperands() && 5713 "operand count mismatch!"); 5714 // Find the optional-def operand (cc_out). 5715 unsigned OpNo; 5716 for (OpNo = 0; 5717 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 5718 ++OpNo) 5719 ; 5720 // If we're parsing Thumb1, reject it completely. 5721 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 5722 return Match_MnemonicFail; 5723 // If we're parsing Thumb2, which form is legal depends on whether we're 5724 // in an IT block. 5725 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 5726 !inITBlock()) 5727 return Match_RequiresITBlock; 5728 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 5729 inITBlock()) 5730 return Match_RequiresNotITBlock; 5731 } 5732 // Some high-register supporting Thumb1 encodings only allow both registers 5733 // to be from r0-r7 when in Thumb2. 5734 else if (Opc == ARM::tADDhirr && isThumbOne() && 5735 isARMLowRegister(Inst.getOperand(1).getReg()) && 5736 isARMLowRegister(Inst.getOperand(2).getReg())) 5737 return Match_RequiresThumb2; 5738 // Others only require ARMv6 or later. 5739 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 5740 isARMLowRegister(Inst.getOperand(0).getReg()) && 5741 isARMLowRegister(Inst.getOperand(1).getReg())) 5742 return Match_RequiresV6; 5743 return Match_Success; 5744} 5745 5746bool ARMAsmParser:: 5747MatchAndEmitInstruction(SMLoc IDLoc, 5748 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 5749 MCStreamer &Out) { 5750 MCInst Inst; 5751 unsigned ErrorInfo; 5752 unsigned MatchResult; 5753 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 5754 switch (MatchResult) { 5755 default: break; 5756 case Match_Success: 5757 // Context sensitive operand constraints aren't handled by the matcher, 5758 // so check them here. 5759 if (validateInstruction(Inst, Operands)) { 5760 // Still progress the IT block, otherwise one wrong condition causes 5761 // nasty cascading errors. 5762 forwardITPosition(); 5763 return true; 5764 } 5765 5766 // Some instructions need post-processing to, for example, tweak which 5767 // encoding is selected. Loop on it while changes happen so the 5768 // individual transformations can chain off each other. E.g., 5769 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 5770 while (processInstruction(Inst, Operands)) 5771 ; 5772 5773 // Only move forward at the very end so that everything in validate 5774 // and process gets a consistent answer about whether we're in an IT 5775 // block. 5776 forwardITPosition(); 5777 5778 Out.EmitInstruction(Inst); 5779 return false; 5780 case Match_MissingFeature: 5781 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 5782 return true; 5783 case Match_InvalidOperand: { 5784 SMLoc ErrorLoc = IDLoc; 5785 if (ErrorInfo != ~0U) { 5786 if (ErrorInfo >= Operands.size()) 5787 return Error(IDLoc, "too few operands for instruction"); 5788 5789 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 5790 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 5791 } 5792 5793 return Error(ErrorLoc, "invalid operand for instruction"); 5794 } 5795 case Match_MnemonicFail: 5796 return Error(IDLoc, "invalid instruction"); 5797 case Match_ConversionFail: 5798 // The converter function will have already emited a diagnostic. 5799 return true; 5800 case Match_RequiresNotITBlock: 5801 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 5802 case Match_RequiresITBlock: 5803 return Error(IDLoc, "instruction only valid inside IT block"); 5804 case Match_RequiresV6: 5805 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 5806 case Match_RequiresThumb2: 5807 return Error(IDLoc, "instruction variant requires Thumb2"); 5808 } 5809 5810 llvm_unreachable("Implement any new match types added!"); 5811 return true; 5812} 5813 5814/// parseDirective parses the arm specific directives 5815bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 5816 StringRef IDVal = DirectiveID.getIdentifier(); 5817 if (IDVal == ".word") 5818 return parseDirectiveWord(4, DirectiveID.getLoc()); 5819 else if (IDVal == ".thumb") 5820 return parseDirectiveThumb(DirectiveID.getLoc()); 5821 else if (IDVal == ".arm") 5822 return parseDirectiveARM(DirectiveID.getLoc()); 5823 else if (IDVal == ".thumb_func") 5824 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 5825 else if (IDVal == ".code") 5826 return parseDirectiveCode(DirectiveID.getLoc()); 5827 else if (IDVal == ".syntax") 5828 return parseDirectiveSyntax(DirectiveID.getLoc()); 5829 else if (IDVal == ".unreq") 5830 return parseDirectiveUnreq(DirectiveID.getLoc()); 5831 return true; 5832} 5833 5834/// parseDirectiveWord 5835/// ::= .word [ expression (, expression)* ] 5836bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 5837 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5838 for (;;) { 5839 const MCExpr *Value; 5840 if (getParser().ParseExpression(Value)) 5841 return true; 5842 5843 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 5844 5845 if (getLexer().is(AsmToken::EndOfStatement)) 5846 break; 5847 5848 // FIXME: Improve diagnostic. 5849 if (getLexer().isNot(AsmToken::Comma)) 5850 return Error(L, "unexpected token in directive"); 5851 Parser.Lex(); 5852 } 5853 } 5854 5855 Parser.Lex(); 5856 return false; 5857} 5858 5859/// parseDirectiveThumb 5860/// ::= .thumb 5861bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 5862 if (getLexer().isNot(AsmToken::EndOfStatement)) 5863 return Error(L, "unexpected token in directive"); 5864 Parser.Lex(); 5865 5866 if (!isThumb()) 5867 SwitchMode(); 5868 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5869 return false; 5870} 5871 5872/// parseDirectiveARM 5873/// ::= .arm 5874bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 5875 if (getLexer().isNot(AsmToken::EndOfStatement)) 5876 return Error(L, "unexpected token in directive"); 5877 Parser.Lex(); 5878 5879 if (isThumb()) 5880 SwitchMode(); 5881 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5882 return false; 5883} 5884 5885/// parseDirectiveThumbFunc 5886/// ::= .thumbfunc symbol_name 5887bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 5888 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 5889 bool isMachO = MAI.hasSubsectionsViaSymbols(); 5890 StringRef Name; 5891 5892 // Darwin asm has function name after .thumb_func direction 5893 // ELF doesn't 5894 if (isMachO) { 5895 const AsmToken &Tok = Parser.getTok(); 5896 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 5897 return Error(L, "unexpected token in .thumb_func directive"); 5898 Name = Tok.getIdentifier(); 5899 Parser.Lex(); // Consume the identifier token. 5900 } 5901 5902 if (getLexer().isNot(AsmToken::EndOfStatement)) 5903 return Error(L, "unexpected token in directive"); 5904 Parser.Lex(); 5905 5906 // FIXME: assuming function name will be the line following .thumb_func 5907 if (!isMachO) { 5908 Name = Parser.getTok().getIdentifier(); 5909 } 5910 5911 // Mark symbol as a thumb symbol. 5912 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 5913 getParser().getStreamer().EmitThumbFunc(Func); 5914 return false; 5915} 5916 5917/// parseDirectiveSyntax 5918/// ::= .syntax unified | divided 5919bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 5920 const AsmToken &Tok = Parser.getTok(); 5921 if (Tok.isNot(AsmToken::Identifier)) 5922 return Error(L, "unexpected token in .syntax directive"); 5923 StringRef Mode = Tok.getString(); 5924 if (Mode == "unified" || Mode == "UNIFIED") 5925 Parser.Lex(); 5926 else if (Mode == "divided" || Mode == "DIVIDED") 5927 return Error(L, "'.syntax divided' arm asssembly not supported"); 5928 else 5929 return Error(L, "unrecognized syntax mode in .syntax directive"); 5930 5931 if (getLexer().isNot(AsmToken::EndOfStatement)) 5932 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5933 Parser.Lex(); 5934 5935 // TODO tell the MC streamer the mode 5936 // getParser().getStreamer().Emit???(); 5937 return false; 5938} 5939 5940/// parseDirectiveCode 5941/// ::= .code 16 | 32 5942bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 5943 const AsmToken &Tok = Parser.getTok(); 5944 if (Tok.isNot(AsmToken::Integer)) 5945 return Error(L, "unexpected token in .code directive"); 5946 int64_t Val = Parser.getTok().getIntVal(); 5947 if (Val == 16) 5948 Parser.Lex(); 5949 else if (Val == 32) 5950 Parser.Lex(); 5951 else 5952 return Error(L, "invalid operand to .code directive"); 5953 5954 if (getLexer().isNot(AsmToken::EndOfStatement)) 5955 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5956 Parser.Lex(); 5957 5958 if (Val == 16) { 5959 if (!isThumb()) 5960 SwitchMode(); 5961 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5962 } else { 5963 if (isThumb()) 5964 SwitchMode(); 5965 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5966 } 5967 5968 return false; 5969} 5970 5971/// parseDirectiveReq 5972/// ::= name .req registername 5973bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { 5974 Parser.Lex(); // Eat the '.req' token. 5975 unsigned Reg; 5976 SMLoc SRegLoc, ERegLoc; 5977 if (ParseRegister(Reg, SRegLoc, ERegLoc)) { 5978 Parser.EatToEndOfStatement(); 5979 return Error(SRegLoc, "register name expected"); 5980 } 5981 5982 // Shouldn't be anything else. 5983 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { 5984 Parser.EatToEndOfStatement(); 5985 return Error(Parser.getTok().getLoc(), 5986 "unexpected input in .req directive."); 5987 } 5988 5989 Parser.Lex(); // Consume the EndOfStatement 5990 5991 if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) 5992 return Error(SRegLoc, "redefinition of '" + Name + 5993 "' does not match original."); 5994 5995 return false; 5996} 5997 5998/// parseDirectiveUneq 5999/// ::= .unreq registername 6000bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { 6001 if (Parser.getTok().isNot(AsmToken::Identifier)) { 6002 Parser.EatToEndOfStatement(); 6003 return Error(L, "unexpected input in .unreq directive."); 6004 } 6005 RegisterReqs.erase(Parser.getTok().getIdentifier()); 6006 Parser.Lex(); // Eat the identifier. 6007 return false; 6008} 6009 6010extern "C" void LLVMInitializeARMAsmLexer(); 6011 6012/// Force static initialization. 6013extern "C" void LLVMInitializeARMAsmParser() { 6014 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 6015 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 6016 LLVMInitializeARMAsmLexer(); 6017} 6018 6019#define GET_REGISTER_MATCHER 6020#define GET_MATCHER_IMPLEMENTATION 6021#include "ARMGenAsmMatcher.inc" 6022