ARMAsmParser.cpp revision 9b1b3902882675e5ce35eacd639456bd648324b7
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 43 44class ARMAsmParser : public MCTargetAsmParser { 45 MCSubtargetInfo &STI; 46 MCAsmParser &Parser; 47 48 // Map of register aliases registers via the .req directive. 49 StringMap<unsigned> RegisterReqs; 50 51 struct { 52 ARMCC::CondCodes Cond; // Condition for IT block. 53 unsigned Mask:4; // Condition mask for instructions. 54 // Starting at first 1 (from lsb). 55 // '1' condition as indicated in IT. 56 // '0' inverse of condition (else). 57 // Count of instructions in IT block is 58 // 4 - trailingzeroes(mask) 59 60 bool FirstCond; // Explicit flag for when we're parsing the 61 // First instruction in the IT block. It's 62 // implied in the mask, so needs special 63 // handling. 64 65 unsigned CurPosition; // Current position in parsing of IT 66 // block. In range [0,3]. Initialized 67 // according to count of instructions in block. 68 // ~0U if no active IT block. 69 } ITState; 70 bool inITBlock() { return ITState.CurPosition != ~0U;} 71 void forwardITPosition() { 72 if (!inITBlock()) return; 73 // Move to the next instruction in the IT block, if there is one. If not, 74 // mark the block as done. 75 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 76 if (++ITState.CurPosition == 5 - TZ) 77 ITState.CurPosition = ~0U; // Done with the IT block after this. 78 } 79 80 81 MCAsmParser &getParser() const { return Parser; } 82 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 83 84 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 85 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 86 87 int tryParseRegister(); 88 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 89 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 90 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 91 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 92 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 93 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 94 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 95 unsigned &ShiftAmount); 96 bool parseDirectiveWord(unsigned Size, SMLoc L); 97 bool parseDirectiveThumb(SMLoc L); 98 bool parseDirectiveARM(SMLoc L); 99 bool parseDirectiveThumbFunc(SMLoc L); 100 bool parseDirectiveCode(SMLoc L); 101 bool parseDirectiveSyntax(SMLoc L); 102 bool parseDirectiveReq(StringRef Name, SMLoc L); 103 bool parseDirectiveUnreq(SMLoc L); 104 105 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 106 bool &CarrySetting, unsigned &ProcessorIMod, 107 StringRef &ITMask); 108 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 109 bool &CanAcceptPredicationCode); 110 111 bool isThumb() const { 112 // FIXME: Can tablegen auto-generate this? 113 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 114 } 115 bool isThumbOne() const { 116 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 117 } 118 bool isThumbTwo() const { 119 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 120 } 121 bool hasV6Ops() const { 122 return STI.getFeatureBits() & ARM::HasV6Ops; 123 } 124 bool hasV7Ops() const { 125 return STI.getFeatureBits() & ARM::HasV7Ops; 126 } 127 void SwitchMode() { 128 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 129 setAvailableFeatures(FB); 130 } 131 bool isMClass() const { 132 return STI.getFeatureBits() & ARM::FeatureMClass; 133 } 134 135 /// @name Auto-generated Match Functions 136 /// { 137 138#define GET_ASSEMBLER_HEADER 139#include "ARMGenAsmMatcher.inc" 140 141 /// } 142 143 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 144 OperandMatchResultTy parseCoprocNumOperand( 145 SmallVectorImpl<MCParsedAsmOperand*>&); 146 OperandMatchResultTy parseCoprocRegOperand( 147 SmallVectorImpl<MCParsedAsmOperand*>&); 148 OperandMatchResultTy parseCoprocOptionOperand( 149 SmallVectorImpl<MCParsedAsmOperand*>&); 150 OperandMatchResultTy parseMemBarrierOptOperand( 151 SmallVectorImpl<MCParsedAsmOperand*>&); 152 OperandMatchResultTy parseProcIFlagsOperand( 153 SmallVectorImpl<MCParsedAsmOperand*>&); 154 OperandMatchResultTy parseMSRMaskOperand( 155 SmallVectorImpl<MCParsedAsmOperand*>&); 156 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 157 StringRef Op, int Low, int High); 158 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 159 return parsePKHImm(O, "lsl", 0, 31); 160 } 161 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 162 return parsePKHImm(O, "asr", 1, 32); 163 } 164 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 165 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 166 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 167 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 168 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 169 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 170 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 171 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 172 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index); 173 174 // Asm Match Converter Methods 175 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 202 const SmallVectorImpl<MCParsedAsmOperand*> &); 203 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 204 const SmallVectorImpl<MCParsedAsmOperand*> &); 205 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 206 const SmallVectorImpl<MCParsedAsmOperand*> &); 207 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 208 const SmallVectorImpl<MCParsedAsmOperand*> &); 209 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 210 const SmallVectorImpl<MCParsedAsmOperand*> &); 211 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 212 const SmallVectorImpl<MCParsedAsmOperand*> &); 213 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 214 const SmallVectorImpl<MCParsedAsmOperand*> &); 215 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 216 const SmallVectorImpl<MCParsedAsmOperand*> &); 217 218 bool validateInstruction(MCInst &Inst, 219 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 220 bool processInstruction(MCInst &Inst, 221 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 222 bool shouldOmitCCOutOperand(StringRef Mnemonic, 223 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 224 225public: 226 enum ARMMatchResultTy { 227 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 228 Match_RequiresNotITBlock, 229 Match_RequiresV6, 230 Match_RequiresThumb2 231 }; 232 233 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 234 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 235 MCAsmParserExtension::Initialize(_Parser); 236 237 // Initialize the set of available features. 238 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 239 240 // Not in an ITBlock to start with. 241 ITState.CurPosition = ~0U; 242 } 243 244 // Implementation of the MCTargetAsmParser interface: 245 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 246 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 247 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 248 bool ParseDirective(AsmToken DirectiveID); 249 250 unsigned checkTargetMatchPredicate(MCInst &Inst); 251 252 bool MatchAndEmitInstruction(SMLoc IDLoc, 253 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 254 MCStreamer &Out); 255}; 256} // end anonymous namespace 257 258namespace { 259 260/// ARMOperand - Instances of this class represent a parsed ARM machine 261/// instruction. 262class ARMOperand : public MCParsedAsmOperand { 263 enum KindTy { 264 k_CondCode, 265 k_CCOut, 266 k_ITCondMask, 267 k_CoprocNum, 268 k_CoprocReg, 269 k_CoprocOption, 270 k_Immediate, 271 k_FPImmediate, 272 k_MemBarrierOpt, 273 k_Memory, 274 k_PostIndexRegister, 275 k_MSRMask, 276 k_ProcIFlags, 277 k_VectorIndex, 278 k_Register, 279 k_RegisterList, 280 k_DPRRegisterList, 281 k_SPRRegisterList, 282 k_VectorList, 283 k_VectorListAllLanes, 284 k_VectorListIndexed, 285 k_ShiftedRegister, 286 k_ShiftedImmediate, 287 k_ShifterImmediate, 288 k_RotateImmediate, 289 k_BitfieldDescriptor, 290 k_Token 291 } Kind; 292 293 SMLoc StartLoc, EndLoc; 294 SmallVector<unsigned, 8> Registers; 295 296 union { 297 struct { 298 ARMCC::CondCodes Val; 299 } CC; 300 301 struct { 302 unsigned Val; 303 } Cop; 304 305 struct { 306 unsigned Val; 307 } CoprocOption; 308 309 struct { 310 unsigned Mask:4; 311 } ITMask; 312 313 struct { 314 ARM_MB::MemBOpt Val; 315 } MBOpt; 316 317 struct { 318 ARM_PROC::IFlags Val; 319 } IFlags; 320 321 struct { 322 unsigned Val; 323 } MMask; 324 325 struct { 326 const char *Data; 327 unsigned Length; 328 } Tok; 329 330 struct { 331 unsigned RegNum; 332 } Reg; 333 334 // A vector register list is a sequential list of 1 to 4 registers. 335 struct { 336 unsigned RegNum; 337 unsigned Count; 338 unsigned LaneIndex; 339 } VectorList; 340 341 struct { 342 unsigned Val; 343 } VectorIndex; 344 345 struct { 346 const MCExpr *Val; 347 } Imm; 348 349 struct { 350 unsigned Val; // encoded 8-bit representation 351 } FPImm; 352 353 /// Combined record for all forms of ARM address expressions. 354 struct { 355 unsigned BaseRegNum; 356 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 357 // was specified. 358 const MCConstantExpr *OffsetImm; // Offset immediate value 359 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 360 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 361 unsigned ShiftImm; // shift for OffsetReg. 362 unsigned Alignment; // 0 = no alignment specified 363 // n = alignment in bytes (8, 16, or 32) 364 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 365 } Memory; 366 367 struct { 368 unsigned RegNum; 369 bool isAdd; 370 ARM_AM::ShiftOpc ShiftTy; 371 unsigned ShiftImm; 372 } PostIdxReg; 373 374 struct { 375 bool isASR; 376 unsigned Imm; 377 } ShifterImm; 378 struct { 379 ARM_AM::ShiftOpc ShiftTy; 380 unsigned SrcReg; 381 unsigned ShiftReg; 382 unsigned ShiftImm; 383 } RegShiftedReg; 384 struct { 385 ARM_AM::ShiftOpc ShiftTy; 386 unsigned SrcReg; 387 unsigned ShiftImm; 388 } RegShiftedImm; 389 struct { 390 unsigned Imm; 391 } RotImm; 392 struct { 393 unsigned LSB; 394 unsigned Width; 395 } Bitfield; 396 }; 397 398 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 399public: 400 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 401 Kind = o.Kind; 402 StartLoc = o.StartLoc; 403 EndLoc = o.EndLoc; 404 switch (Kind) { 405 case k_CondCode: 406 CC = o.CC; 407 break; 408 case k_ITCondMask: 409 ITMask = o.ITMask; 410 break; 411 case k_Token: 412 Tok = o.Tok; 413 break; 414 case k_CCOut: 415 case k_Register: 416 Reg = o.Reg; 417 break; 418 case k_RegisterList: 419 case k_DPRRegisterList: 420 case k_SPRRegisterList: 421 Registers = o.Registers; 422 break; 423 case k_VectorList: 424 case k_VectorListAllLanes: 425 case k_VectorListIndexed: 426 VectorList = o.VectorList; 427 break; 428 case k_CoprocNum: 429 case k_CoprocReg: 430 Cop = o.Cop; 431 break; 432 case k_CoprocOption: 433 CoprocOption = o.CoprocOption; 434 break; 435 case k_Immediate: 436 Imm = o.Imm; 437 break; 438 case k_FPImmediate: 439 FPImm = o.FPImm; 440 break; 441 case k_MemBarrierOpt: 442 MBOpt = o.MBOpt; 443 break; 444 case k_Memory: 445 Memory = o.Memory; 446 break; 447 case k_PostIndexRegister: 448 PostIdxReg = o.PostIdxReg; 449 break; 450 case k_MSRMask: 451 MMask = o.MMask; 452 break; 453 case k_ProcIFlags: 454 IFlags = o.IFlags; 455 break; 456 case k_ShifterImmediate: 457 ShifterImm = o.ShifterImm; 458 break; 459 case k_ShiftedRegister: 460 RegShiftedReg = o.RegShiftedReg; 461 break; 462 case k_ShiftedImmediate: 463 RegShiftedImm = o.RegShiftedImm; 464 break; 465 case k_RotateImmediate: 466 RotImm = o.RotImm; 467 break; 468 case k_BitfieldDescriptor: 469 Bitfield = o.Bitfield; 470 break; 471 case k_VectorIndex: 472 VectorIndex = o.VectorIndex; 473 break; 474 } 475 } 476 477 /// getStartLoc - Get the location of the first token of this operand. 478 SMLoc getStartLoc() const { return StartLoc; } 479 /// getEndLoc - Get the location of the last token of this operand. 480 SMLoc getEndLoc() const { return EndLoc; } 481 482 ARMCC::CondCodes getCondCode() const { 483 assert(Kind == k_CondCode && "Invalid access!"); 484 return CC.Val; 485 } 486 487 unsigned getCoproc() const { 488 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 489 return Cop.Val; 490 } 491 492 StringRef getToken() const { 493 assert(Kind == k_Token && "Invalid access!"); 494 return StringRef(Tok.Data, Tok.Length); 495 } 496 497 unsigned getReg() const { 498 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 499 return Reg.RegNum; 500 } 501 502 const SmallVectorImpl<unsigned> &getRegList() const { 503 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 504 Kind == k_SPRRegisterList) && "Invalid access!"); 505 return Registers; 506 } 507 508 const MCExpr *getImm() const { 509 assert(Kind == k_Immediate && "Invalid access!"); 510 return Imm.Val; 511 } 512 513 unsigned getFPImm() const { 514 assert(Kind == k_FPImmediate && "Invalid access!"); 515 return FPImm.Val; 516 } 517 518 unsigned getVectorIndex() const { 519 assert(Kind == k_VectorIndex && "Invalid access!"); 520 return VectorIndex.Val; 521 } 522 523 ARM_MB::MemBOpt getMemBarrierOpt() const { 524 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 525 return MBOpt.Val; 526 } 527 528 ARM_PROC::IFlags getProcIFlags() const { 529 assert(Kind == k_ProcIFlags && "Invalid access!"); 530 return IFlags.Val; 531 } 532 533 unsigned getMSRMask() const { 534 assert(Kind == k_MSRMask && "Invalid access!"); 535 return MMask.Val; 536 } 537 538 bool isCoprocNum() const { return Kind == k_CoprocNum; } 539 bool isCoprocReg() const { return Kind == k_CoprocReg; } 540 bool isCoprocOption() const { return Kind == k_CoprocOption; } 541 bool isCondCode() const { return Kind == k_CondCode; } 542 bool isCCOut() const { return Kind == k_CCOut; } 543 bool isITMask() const { return Kind == k_ITCondMask; } 544 bool isITCondCode() const { return Kind == k_CondCode; } 545 bool isImm() const { return Kind == k_Immediate; } 546 bool isFPImm() const { return Kind == k_FPImmediate; } 547 bool isImm8s4() const { 548 if (Kind != k_Immediate) 549 return false; 550 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 551 if (!CE) return false; 552 int64_t Value = CE->getValue(); 553 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 554 } 555 bool isImm0_1020s4() const { 556 if (Kind != k_Immediate) 557 return false; 558 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 559 if (!CE) return false; 560 int64_t Value = CE->getValue(); 561 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 562 } 563 bool isImm0_508s4() const { 564 if (Kind != k_Immediate) 565 return false; 566 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 567 if (!CE) return false; 568 int64_t Value = CE->getValue(); 569 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 570 } 571 bool isImm0_255() const { 572 if (Kind != k_Immediate) 573 return false; 574 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 575 if (!CE) return false; 576 int64_t Value = CE->getValue(); 577 return Value >= 0 && Value < 256; 578 } 579 bool isImm0_1() const { 580 if (Kind != k_Immediate) 581 return false; 582 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 583 if (!CE) return false; 584 int64_t Value = CE->getValue(); 585 return Value >= 0 && Value < 2; 586 } 587 bool isImm0_3() const { 588 if (Kind != k_Immediate) 589 return false; 590 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 591 if (!CE) return false; 592 int64_t Value = CE->getValue(); 593 return Value >= 0 && Value < 4; 594 } 595 bool isImm0_7() const { 596 if (Kind != k_Immediate) 597 return false; 598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 599 if (!CE) return false; 600 int64_t Value = CE->getValue(); 601 return Value >= 0 && Value < 8; 602 } 603 bool isImm0_15() const { 604 if (Kind != k_Immediate) 605 return false; 606 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 607 if (!CE) return false; 608 int64_t Value = CE->getValue(); 609 return Value >= 0 && Value < 16; 610 } 611 bool isImm0_31() const { 612 if (Kind != k_Immediate) 613 return false; 614 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 615 if (!CE) return false; 616 int64_t Value = CE->getValue(); 617 return Value >= 0 && Value < 32; 618 } 619 bool isImm0_63() const { 620 if (Kind != k_Immediate) 621 return false; 622 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 623 if (!CE) return false; 624 int64_t Value = CE->getValue(); 625 return Value >= 0 && Value < 64; 626 } 627 bool isImm8() const { 628 if (Kind != k_Immediate) 629 return false; 630 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 631 if (!CE) return false; 632 int64_t Value = CE->getValue(); 633 return Value == 8; 634 } 635 bool isImm16() const { 636 if (Kind != k_Immediate) 637 return false; 638 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 639 if (!CE) return false; 640 int64_t Value = CE->getValue(); 641 return Value == 16; 642 } 643 bool isImm32() const { 644 if (Kind != k_Immediate) 645 return false; 646 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 647 if (!CE) return false; 648 int64_t Value = CE->getValue(); 649 return Value == 32; 650 } 651 bool isShrImm8() const { 652 if (Kind != k_Immediate) 653 return false; 654 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 655 if (!CE) return false; 656 int64_t Value = CE->getValue(); 657 return Value > 0 && Value <= 8; 658 } 659 bool isShrImm16() const { 660 if (Kind != k_Immediate) 661 return false; 662 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 663 if (!CE) return false; 664 int64_t Value = CE->getValue(); 665 return Value > 0 && Value <= 16; 666 } 667 bool isShrImm32() const { 668 if (Kind != k_Immediate) 669 return false; 670 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 671 if (!CE) return false; 672 int64_t Value = CE->getValue(); 673 return Value > 0 && Value <= 32; 674 } 675 bool isShrImm64() const { 676 if (Kind != k_Immediate) 677 return false; 678 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 679 if (!CE) return false; 680 int64_t Value = CE->getValue(); 681 return Value > 0 && Value <= 64; 682 } 683 bool isImm1_7() const { 684 if (Kind != k_Immediate) 685 return false; 686 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 687 if (!CE) return false; 688 int64_t Value = CE->getValue(); 689 return Value > 0 && Value < 8; 690 } 691 bool isImm1_15() const { 692 if (Kind != k_Immediate) 693 return false; 694 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 695 if (!CE) return false; 696 int64_t Value = CE->getValue(); 697 return Value > 0 && Value < 16; 698 } 699 bool isImm1_31() const { 700 if (Kind != k_Immediate) 701 return false; 702 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 703 if (!CE) return false; 704 int64_t Value = CE->getValue(); 705 return Value > 0 && Value < 32; 706 } 707 bool isImm1_16() const { 708 if (Kind != k_Immediate) 709 return false; 710 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 711 if (!CE) return false; 712 int64_t Value = CE->getValue(); 713 return Value > 0 && Value < 17; 714 } 715 bool isImm1_32() const { 716 if (Kind != k_Immediate) 717 return false; 718 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 719 if (!CE) return false; 720 int64_t Value = CE->getValue(); 721 return Value > 0 && Value < 33; 722 } 723 bool isImm0_32() const { 724 if (Kind != k_Immediate) 725 return false; 726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 727 if (!CE) return false; 728 int64_t Value = CE->getValue(); 729 return Value >= 0 && Value < 33; 730 } 731 bool isImm0_65535() const { 732 if (Kind != k_Immediate) 733 return false; 734 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 735 if (!CE) return false; 736 int64_t Value = CE->getValue(); 737 return Value >= 0 && Value < 65536; 738 } 739 bool isImm0_65535Expr() const { 740 if (Kind != k_Immediate) 741 return false; 742 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 743 // If it's not a constant expression, it'll generate a fixup and be 744 // handled later. 745 if (!CE) return true; 746 int64_t Value = CE->getValue(); 747 return Value >= 0 && Value < 65536; 748 } 749 bool isImm24bit() const { 750 if (Kind != k_Immediate) 751 return false; 752 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 753 if (!CE) return false; 754 int64_t Value = CE->getValue(); 755 return Value >= 0 && Value <= 0xffffff; 756 } 757 bool isImmThumbSR() const { 758 if (Kind != k_Immediate) 759 return false; 760 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 761 if (!CE) return false; 762 int64_t Value = CE->getValue(); 763 return Value > 0 && Value < 33; 764 } 765 bool isPKHLSLImm() const { 766 if (Kind != k_Immediate) 767 return false; 768 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 769 if (!CE) return false; 770 int64_t Value = CE->getValue(); 771 return Value >= 0 && Value < 32; 772 } 773 bool isPKHASRImm() const { 774 if (Kind != k_Immediate) 775 return false; 776 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 777 if (!CE) return false; 778 int64_t Value = CE->getValue(); 779 return Value > 0 && Value <= 32; 780 } 781 bool isARMSOImm() const { 782 if (Kind != k_Immediate) 783 return false; 784 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 785 if (!CE) return false; 786 int64_t Value = CE->getValue(); 787 return ARM_AM::getSOImmVal(Value) != -1; 788 } 789 bool isARMSOImmNot() const { 790 if (Kind != k_Immediate) 791 return false; 792 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 793 if (!CE) return false; 794 int64_t Value = CE->getValue(); 795 return ARM_AM::getSOImmVal(~Value) != -1; 796 } 797 bool isARMSOImmNeg() const { 798 if (Kind != k_Immediate) 799 return false; 800 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 801 if (!CE) return false; 802 int64_t Value = CE->getValue(); 803 return ARM_AM::getSOImmVal(-Value) != -1; 804 } 805 bool isT2SOImm() const { 806 if (Kind != k_Immediate) 807 return false; 808 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 809 if (!CE) return false; 810 int64_t Value = CE->getValue(); 811 return ARM_AM::getT2SOImmVal(Value) != -1; 812 } 813 bool isT2SOImmNot() const { 814 if (Kind != k_Immediate) 815 return false; 816 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 817 if (!CE) return false; 818 int64_t Value = CE->getValue(); 819 return ARM_AM::getT2SOImmVal(~Value) != -1; 820 } 821 bool isT2SOImmNeg() const { 822 if (Kind != k_Immediate) 823 return false; 824 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 825 if (!CE) return false; 826 int64_t Value = CE->getValue(); 827 return ARM_AM::getT2SOImmVal(-Value) != -1; 828 } 829 bool isSetEndImm() const { 830 if (Kind != k_Immediate) 831 return false; 832 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 833 if (!CE) return false; 834 int64_t Value = CE->getValue(); 835 return Value == 1 || Value == 0; 836 } 837 bool isReg() const { return Kind == k_Register; } 838 bool isRegList() const { return Kind == k_RegisterList; } 839 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 840 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 841 bool isToken() const { return Kind == k_Token; } 842 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 843 bool isMemory() const { return Kind == k_Memory; } 844 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 845 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 846 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 847 bool isRotImm() const { return Kind == k_RotateImmediate; } 848 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 849 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 850 bool isPostIdxReg() const { 851 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 852 } 853 bool isMemNoOffset(bool alignOK = false) const { 854 if (!isMemory()) 855 return false; 856 // No offset of any kind. 857 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 858 (alignOK || Memory.Alignment == 0); 859 } 860 bool isAlignedMemory() const { 861 return isMemNoOffset(true); 862 } 863 bool isAddrMode2() const { 864 if (!isMemory() || Memory.Alignment != 0) return false; 865 // Check for register offset. 866 if (Memory.OffsetRegNum) return true; 867 // Immediate offset in range [-4095, 4095]. 868 if (!Memory.OffsetImm) return true; 869 int64_t Val = Memory.OffsetImm->getValue(); 870 return Val > -4096 && Val < 4096; 871 } 872 bool isAM2OffsetImm() const { 873 if (Kind != k_Immediate) 874 return false; 875 // Immediate offset in range [-4095, 4095]. 876 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 877 if (!CE) return false; 878 int64_t Val = CE->getValue(); 879 return Val > -4096 && Val < 4096; 880 } 881 bool isAddrMode3() const { 882 if (!isMemory() || Memory.Alignment != 0) return false; 883 // No shifts are legal for AM3. 884 if (Memory.ShiftType != ARM_AM::no_shift) return false; 885 // Check for register offset. 886 if (Memory.OffsetRegNum) return true; 887 // Immediate offset in range [-255, 255]. 888 if (!Memory.OffsetImm) return true; 889 int64_t Val = Memory.OffsetImm->getValue(); 890 return Val > -256 && Val < 256; 891 } 892 bool isAM3Offset() const { 893 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 894 return false; 895 if (Kind == k_PostIndexRegister) 896 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 897 // Immediate offset in range [-255, 255]. 898 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 899 if (!CE) return false; 900 int64_t Val = CE->getValue(); 901 // Special case, #-0 is INT32_MIN. 902 return (Val > -256 && Val < 256) || Val == INT32_MIN; 903 } 904 bool isAddrMode5() const { 905 // If we have an immediate that's not a constant, treat it as a label 906 // reference needing a fixup. If it is a constant, it's something else 907 // and we reject it. 908 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 909 return true; 910 if (!isMemory() || Memory.Alignment != 0) return false; 911 // Check for register offset. 912 if (Memory.OffsetRegNum) return false; 913 // Immediate offset in range [-1020, 1020] and a multiple of 4. 914 if (!Memory.OffsetImm) return true; 915 int64_t Val = Memory.OffsetImm->getValue(); 916 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 917 Val == INT32_MIN; 918 } 919 bool isMemTBB() const { 920 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 921 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 922 return false; 923 return true; 924 } 925 bool isMemTBH() const { 926 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 927 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 928 Memory.Alignment != 0 ) 929 return false; 930 return true; 931 } 932 bool isMemRegOffset() const { 933 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 934 return false; 935 return true; 936 } 937 bool isT2MemRegOffset() const { 938 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 939 Memory.Alignment != 0) 940 return false; 941 // Only lsl #{0, 1, 2, 3} allowed. 942 if (Memory.ShiftType == ARM_AM::no_shift) 943 return true; 944 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 945 return false; 946 return true; 947 } 948 bool isMemThumbRR() const { 949 // Thumb reg+reg addressing is simple. Just two registers, a base and 950 // an offset. No shifts, negations or any other complicating factors. 951 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 952 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 953 return false; 954 return isARMLowRegister(Memory.BaseRegNum) && 955 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 956 } 957 bool isMemThumbRIs4() const { 958 if (!isMemory() || Memory.OffsetRegNum != 0 || 959 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 960 return false; 961 // Immediate offset, multiple of 4 in range [0, 124]. 962 if (!Memory.OffsetImm) return true; 963 int64_t Val = Memory.OffsetImm->getValue(); 964 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 965 } 966 bool isMemThumbRIs2() const { 967 if (!isMemory() || Memory.OffsetRegNum != 0 || 968 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 969 return false; 970 // Immediate offset, multiple of 4 in range [0, 62]. 971 if (!Memory.OffsetImm) return true; 972 int64_t Val = Memory.OffsetImm->getValue(); 973 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 974 } 975 bool isMemThumbRIs1() const { 976 if (!isMemory() || Memory.OffsetRegNum != 0 || 977 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 978 return false; 979 // Immediate offset in range [0, 31]. 980 if (!Memory.OffsetImm) return true; 981 int64_t Val = Memory.OffsetImm->getValue(); 982 return Val >= 0 && Val <= 31; 983 } 984 bool isMemThumbSPI() const { 985 if (!isMemory() || Memory.OffsetRegNum != 0 || 986 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 987 return false; 988 // Immediate offset, multiple of 4 in range [0, 1020]. 989 if (!Memory.OffsetImm) return true; 990 int64_t Val = Memory.OffsetImm->getValue(); 991 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 992 } 993 bool isMemImm8s4Offset() const { 994 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 995 return false; 996 // Immediate offset a multiple of 4 in range [-1020, 1020]. 997 if (!Memory.OffsetImm) return true; 998 int64_t Val = Memory.OffsetImm->getValue(); 999 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 1000 } 1001 bool isMemImm0_1020s4Offset() const { 1002 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1003 return false; 1004 // Immediate offset a multiple of 4 in range [0, 1020]. 1005 if (!Memory.OffsetImm) return true; 1006 int64_t Val = Memory.OffsetImm->getValue(); 1007 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 1008 } 1009 bool isMemImm8Offset() const { 1010 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1011 return false; 1012 // Immediate offset in range [-255, 255]. 1013 if (!Memory.OffsetImm) return true; 1014 int64_t Val = Memory.OffsetImm->getValue(); 1015 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 1016 } 1017 bool isMemPosImm8Offset() const { 1018 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1019 return false; 1020 // Immediate offset in range [0, 255]. 1021 if (!Memory.OffsetImm) return true; 1022 int64_t Val = Memory.OffsetImm->getValue(); 1023 return Val >= 0 && Val < 256; 1024 } 1025 bool isMemNegImm8Offset() const { 1026 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1027 return false; 1028 // Immediate offset in range [-255, -1]. 1029 if (!Memory.OffsetImm) return false; 1030 int64_t Val = Memory.OffsetImm->getValue(); 1031 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 1032 } 1033 bool isMemUImm12Offset() const { 1034 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1035 return false; 1036 // Immediate offset in range [0, 4095]. 1037 if (!Memory.OffsetImm) return true; 1038 int64_t Val = Memory.OffsetImm->getValue(); 1039 return (Val >= 0 && Val < 4096); 1040 } 1041 bool isMemImm12Offset() const { 1042 // If we have an immediate that's not a constant, treat it as a label 1043 // reference needing a fixup. If it is a constant, it's something else 1044 // and we reject it. 1045 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 1046 return true; 1047 1048 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1049 return false; 1050 // Immediate offset in range [-4095, 4095]. 1051 if (!Memory.OffsetImm) return true; 1052 int64_t Val = Memory.OffsetImm->getValue(); 1053 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 1054 } 1055 bool isPostIdxImm8() const { 1056 if (Kind != k_Immediate) 1057 return false; 1058 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1059 if (!CE) return false; 1060 int64_t Val = CE->getValue(); 1061 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 1062 } 1063 bool isPostIdxImm8s4() const { 1064 if (Kind != k_Immediate) 1065 return false; 1066 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1067 if (!CE) return false; 1068 int64_t Val = CE->getValue(); 1069 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1070 (Val == INT32_MIN); 1071 } 1072 1073 bool isMSRMask() const { return Kind == k_MSRMask; } 1074 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1075 1076 // NEON operands. 1077 bool isVecListOneD() const { 1078 if (Kind != k_VectorList) return false; 1079 return VectorList.Count == 1; 1080 } 1081 1082 bool isVecListTwoD() const { 1083 if (Kind != k_VectorList) return false; 1084 return VectorList.Count == 2; 1085 } 1086 1087 bool isVecListThreeD() const { 1088 if (Kind != k_VectorList) return false; 1089 return VectorList.Count == 3; 1090 } 1091 1092 bool isVecListFourD() const { 1093 if (Kind != k_VectorList) return false; 1094 return VectorList.Count == 4; 1095 } 1096 1097 bool isVecListTwoQ() const { 1098 if (Kind != k_VectorList) return false; 1099 //FIXME: We haven't taught the parser to handle by-two register lists 1100 // yet, so don't pretend to know one. 1101 return VectorList.Count == 2 && false; 1102 } 1103 1104 bool isVecListOneDAllLanes() const { 1105 if (Kind != k_VectorListAllLanes) return false; 1106 return VectorList.Count == 1; 1107 } 1108 1109 bool isVecListTwoDAllLanes() const { 1110 if (Kind != k_VectorListAllLanes) return false; 1111 return VectorList.Count == 2; 1112 } 1113 1114 bool isVecListOneDByteIndexed() const { 1115 if (Kind != k_VectorListIndexed) return false; 1116 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1117 } 1118 1119 bool isVecListTwoDByteIndexed() const { 1120 if (Kind != k_VectorListIndexed) return false; 1121 return VectorList.Count == 2 && VectorList.LaneIndex <= 7; 1122 } 1123 1124 bool isVectorIndex8() const { 1125 if (Kind != k_VectorIndex) return false; 1126 return VectorIndex.Val < 8; 1127 } 1128 bool isVectorIndex16() const { 1129 if (Kind != k_VectorIndex) return false; 1130 return VectorIndex.Val < 4; 1131 } 1132 bool isVectorIndex32() const { 1133 if (Kind != k_VectorIndex) return false; 1134 return VectorIndex.Val < 2; 1135 } 1136 1137 bool isNEONi8splat() const { 1138 if (Kind != k_Immediate) 1139 return false; 1140 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1141 // Must be a constant. 1142 if (!CE) return false; 1143 int64_t Value = CE->getValue(); 1144 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1145 // value. 1146 return Value >= 0 && Value < 256; 1147 } 1148 1149 bool isNEONi16splat() const { 1150 if (Kind != k_Immediate) 1151 return false; 1152 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1153 // Must be a constant. 1154 if (!CE) return false; 1155 int64_t Value = CE->getValue(); 1156 // i16 value in the range [0,255] or [0x0100, 0xff00] 1157 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1158 } 1159 1160 bool isNEONi32splat() const { 1161 if (Kind != k_Immediate) 1162 return false; 1163 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1164 // Must be a constant. 1165 if (!CE) return false; 1166 int64_t Value = CE->getValue(); 1167 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1168 return (Value >= 0 && Value < 256) || 1169 (Value >= 0x0100 && Value <= 0xff00) || 1170 (Value >= 0x010000 && Value <= 0xff0000) || 1171 (Value >= 0x01000000 && Value <= 0xff000000); 1172 } 1173 1174 bool isNEONi32vmov() const { 1175 if (Kind != k_Immediate) 1176 return false; 1177 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1178 // Must be a constant. 1179 if (!CE) return false; 1180 int64_t Value = CE->getValue(); 1181 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1182 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1183 return (Value >= 0 && Value < 256) || 1184 (Value >= 0x0100 && Value <= 0xff00) || 1185 (Value >= 0x010000 && Value <= 0xff0000) || 1186 (Value >= 0x01000000 && Value <= 0xff000000) || 1187 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1188 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1189 } 1190 1191 bool isNEONi64splat() const { 1192 if (Kind != k_Immediate) 1193 return false; 1194 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1195 // Must be a constant. 1196 if (!CE) return false; 1197 uint64_t Value = CE->getValue(); 1198 // i64 value with each byte being either 0 or 0xff. 1199 for (unsigned i = 0; i < 8; ++i) 1200 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1201 return true; 1202 } 1203 1204 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1205 // Add as immediates when possible. Null MCExpr = 0. 1206 if (Expr == 0) 1207 Inst.addOperand(MCOperand::CreateImm(0)); 1208 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1209 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1210 else 1211 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1212 } 1213 1214 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1215 assert(N == 2 && "Invalid number of operands!"); 1216 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1217 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1218 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1219 } 1220 1221 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1222 assert(N == 1 && "Invalid number of operands!"); 1223 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1224 } 1225 1226 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1227 assert(N == 1 && "Invalid number of operands!"); 1228 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1229 } 1230 1231 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1232 assert(N == 1 && "Invalid number of operands!"); 1233 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1234 } 1235 1236 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1237 assert(N == 1 && "Invalid number of operands!"); 1238 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1239 } 1240 1241 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1242 assert(N == 1 && "Invalid number of operands!"); 1243 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1244 } 1245 1246 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1247 assert(N == 1 && "Invalid number of operands!"); 1248 Inst.addOperand(MCOperand::CreateReg(getReg())); 1249 } 1250 1251 void addRegOperands(MCInst &Inst, unsigned N) const { 1252 assert(N == 1 && "Invalid number of operands!"); 1253 Inst.addOperand(MCOperand::CreateReg(getReg())); 1254 } 1255 1256 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1257 assert(N == 3 && "Invalid number of operands!"); 1258 assert(isRegShiftedReg() && 1259 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1260 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1261 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1262 Inst.addOperand(MCOperand::CreateImm( 1263 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1264 } 1265 1266 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1267 assert(N == 2 && "Invalid number of operands!"); 1268 assert(isRegShiftedImm() && 1269 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1270 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1271 Inst.addOperand(MCOperand::CreateImm( 1272 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1273 } 1274 1275 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1276 assert(N == 1 && "Invalid number of operands!"); 1277 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1278 ShifterImm.Imm)); 1279 } 1280 1281 void addRegListOperands(MCInst &Inst, unsigned N) const { 1282 assert(N == 1 && "Invalid number of operands!"); 1283 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1284 for (SmallVectorImpl<unsigned>::const_iterator 1285 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1286 Inst.addOperand(MCOperand::CreateReg(*I)); 1287 } 1288 1289 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1290 addRegListOperands(Inst, N); 1291 } 1292 1293 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1294 addRegListOperands(Inst, N); 1295 } 1296 1297 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1298 assert(N == 1 && "Invalid number of operands!"); 1299 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1300 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1301 } 1302 1303 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1304 assert(N == 1 && "Invalid number of operands!"); 1305 // Munge the lsb/width into a bitfield mask. 1306 unsigned lsb = Bitfield.LSB; 1307 unsigned width = Bitfield.Width; 1308 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1309 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1310 (32 - (lsb + width))); 1311 Inst.addOperand(MCOperand::CreateImm(Mask)); 1312 } 1313 1314 void addImmOperands(MCInst &Inst, unsigned N) const { 1315 assert(N == 1 && "Invalid number of operands!"); 1316 addExpr(Inst, getImm()); 1317 } 1318 1319 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1320 assert(N == 1 && "Invalid number of operands!"); 1321 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1322 } 1323 1324 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1325 assert(N == 1 && "Invalid number of operands!"); 1326 // FIXME: We really want to scale the value here, but the LDRD/STRD 1327 // instruction don't encode operands that way yet. 1328 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1329 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1330 } 1331 1332 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1333 assert(N == 1 && "Invalid number of operands!"); 1334 // The immediate is scaled by four in the encoding and is stored 1335 // in the MCInst as such. Lop off the low two bits here. 1336 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1337 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1338 } 1339 1340 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1341 assert(N == 1 && "Invalid number of operands!"); 1342 // The immediate is scaled by four in the encoding and is stored 1343 // in the MCInst as such. Lop off the low two bits here. 1344 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1345 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1346 } 1347 1348 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1349 assert(N == 1 && "Invalid number of operands!"); 1350 // The constant encodes as the immediate-1, and we store in the instruction 1351 // the bits as encoded, so subtract off one here. 1352 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1353 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1354 } 1355 1356 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1357 assert(N == 1 && "Invalid number of operands!"); 1358 // The constant encodes as the immediate-1, and we store in the instruction 1359 // the bits as encoded, so subtract off one here. 1360 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1361 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1362 } 1363 1364 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1365 assert(N == 1 && "Invalid number of operands!"); 1366 // The constant encodes as the immediate, except for 32, which encodes as 1367 // zero. 1368 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1369 unsigned Imm = CE->getValue(); 1370 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1371 } 1372 1373 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1374 assert(N == 1 && "Invalid number of operands!"); 1375 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1376 // the instruction as well. 1377 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1378 int Val = CE->getValue(); 1379 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1380 } 1381 1382 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1383 assert(N == 1 && "Invalid number of operands!"); 1384 // The operand is actually a t2_so_imm, but we have its bitwise 1385 // negation in the assembly source, so twiddle it here. 1386 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1387 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1388 } 1389 1390 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 1391 assert(N == 1 && "Invalid number of operands!"); 1392 // The operand is actually a t2_so_imm, but we have its 1393 // negation in the assembly source, so twiddle it here. 1394 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1395 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1396 } 1397 1398 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1399 assert(N == 1 && "Invalid number of operands!"); 1400 // The operand is actually a so_imm, but we have its bitwise 1401 // negation in the assembly source, so twiddle it here. 1402 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1403 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1404 } 1405 1406 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const { 1407 assert(N == 1 && "Invalid number of operands!"); 1408 // The operand is actually a so_imm, but we have its 1409 // negation in the assembly source, so twiddle it here. 1410 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1411 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1412 } 1413 1414 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1415 assert(N == 1 && "Invalid number of operands!"); 1416 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1417 } 1418 1419 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1420 assert(N == 1 && "Invalid number of operands!"); 1421 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1422 } 1423 1424 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1425 assert(N == 2 && "Invalid number of operands!"); 1426 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1427 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1428 } 1429 1430 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1431 assert(N == 3 && "Invalid number of operands!"); 1432 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1433 if (!Memory.OffsetRegNum) { 1434 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1435 // Special case for #-0 1436 if (Val == INT32_MIN) Val = 0; 1437 if (Val < 0) Val = -Val; 1438 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1439 } else { 1440 // For register offset, we encode the shift type and negation flag 1441 // here. 1442 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1443 Memory.ShiftImm, Memory.ShiftType); 1444 } 1445 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1446 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1447 Inst.addOperand(MCOperand::CreateImm(Val)); 1448 } 1449 1450 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1451 assert(N == 2 && "Invalid number of operands!"); 1452 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1453 assert(CE && "non-constant AM2OffsetImm operand!"); 1454 int32_t Val = CE->getValue(); 1455 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1456 // Special case for #-0 1457 if (Val == INT32_MIN) Val = 0; 1458 if (Val < 0) Val = -Val; 1459 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1460 Inst.addOperand(MCOperand::CreateReg(0)); 1461 Inst.addOperand(MCOperand::CreateImm(Val)); 1462 } 1463 1464 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1465 assert(N == 3 && "Invalid number of operands!"); 1466 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1467 if (!Memory.OffsetRegNum) { 1468 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1469 // Special case for #-0 1470 if (Val == INT32_MIN) Val = 0; 1471 if (Val < 0) Val = -Val; 1472 Val = ARM_AM::getAM3Opc(AddSub, Val); 1473 } else { 1474 // For register offset, we encode the shift type and negation flag 1475 // here. 1476 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1477 } 1478 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1479 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1480 Inst.addOperand(MCOperand::CreateImm(Val)); 1481 } 1482 1483 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1484 assert(N == 2 && "Invalid number of operands!"); 1485 if (Kind == k_PostIndexRegister) { 1486 int32_t Val = 1487 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1488 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1489 Inst.addOperand(MCOperand::CreateImm(Val)); 1490 return; 1491 } 1492 1493 // Constant offset. 1494 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1495 int32_t Val = CE->getValue(); 1496 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1497 // Special case for #-0 1498 if (Val == INT32_MIN) Val = 0; 1499 if (Val < 0) Val = -Val; 1500 Val = ARM_AM::getAM3Opc(AddSub, Val); 1501 Inst.addOperand(MCOperand::CreateReg(0)); 1502 Inst.addOperand(MCOperand::CreateImm(Val)); 1503 } 1504 1505 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1506 assert(N == 2 && "Invalid number of operands!"); 1507 // If we have an immediate that's not a constant, treat it as a label 1508 // reference needing a fixup. If it is a constant, it's something else 1509 // and we reject it. 1510 if (isImm()) { 1511 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1512 Inst.addOperand(MCOperand::CreateImm(0)); 1513 return; 1514 } 1515 1516 // The lower two bits are always zero and as such are not encoded. 1517 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1518 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1519 // Special case for #-0 1520 if (Val == INT32_MIN) Val = 0; 1521 if (Val < 0) Val = -Val; 1522 Val = ARM_AM::getAM5Opc(AddSub, Val); 1523 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1524 Inst.addOperand(MCOperand::CreateImm(Val)); 1525 } 1526 1527 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1528 assert(N == 2 && "Invalid number of operands!"); 1529 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1530 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1531 Inst.addOperand(MCOperand::CreateImm(Val)); 1532 } 1533 1534 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1535 assert(N == 2 && "Invalid number of operands!"); 1536 // The lower two bits are always zero and as such are not encoded. 1537 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1538 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1539 Inst.addOperand(MCOperand::CreateImm(Val)); 1540 } 1541 1542 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1543 assert(N == 2 && "Invalid number of operands!"); 1544 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1545 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1546 Inst.addOperand(MCOperand::CreateImm(Val)); 1547 } 1548 1549 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1550 addMemImm8OffsetOperands(Inst, N); 1551 } 1552 1553 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1554 addMemImm8OffsetOperands(Inst, N); 1555 } 1556 1557 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1558 assert(N == 2 && "Invalid number of operands!"); 1559 // If this is an immediate, it's a label reference. 1560 if (Kind == k_Immediate) { 1561 addExpr(Inst, getImm()); 1562 Inst.addOperand(MCOperand::CreateImm(0)); 1563 return; 1564 } 1565 1566 // Otherwise, it's a normal memory reg+offset. 1567 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1568 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1569 Inst.addOperand(MCOperand::CreateImm(Val)); 1570 } 1571 1572 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1573 assert(N == 2 && "Invalid number of operands!"); 1574 // If this is an immediate, it's a label reference. 1575 if (Kind == k_Immediate) { 1576 addExpr(Inst, getImm()); 1577 Inst.addOperand(MCOperand::CreateImm(0)); 1578 return; 1579 } 1580 1581 // Otherwise, it's a normal memory reg+offset. 1582 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1583 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1584 Inst.addOperand(MCOperand::CreateImm(Val)); 1585 } 1586 1587 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1588 assert(N == 2 && "Invalid number of operands!"); 1589 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1590 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1591 } 1592 1593 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1594 assert(N == 2 && "Invalid number of operands!"); 1595 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1596 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1597 } 1598 1599 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1600 assert(N == 3 && "Invalid number of operands!"); 1601 unsigned Val = 1602 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1603 Memory.ShiftImm, Memory.ShiftType); 1604 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1605 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1606 Inst.addOperand(MCOperand::CreateImm(Val)); 1607 } 1608 1609 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1610 assert(N == 3 && "Invalid number of operands!"); 1611 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1612 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1613 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1614 } 1615 1616 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1617 assert(N == 2 && "Invalid number of operands!"); 1618 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1619 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1620 } 1621 1622 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1623 assert(N == 2 && "Invalid number of operands!"); 1624 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1625 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1626 Inst.addOperand(MCOperand::CreateImm(Val)); 1627 } 1628 1629 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1630 assert(N == 2 && "Invalid number of operands!"); 1631 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1632 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1633 Inst.addOperand(MCOperand::CreateImm(Val)); 1634 } 1635 1636 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1637 assert(N == 2 && "Invalid number of operands!"); 1638 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1639 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1640 Inst.addOperand(MCOperand::CreateImm(Val)); 1641 } 1642 1643 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1644 assert(N == 2 && "Invalid number of operands!"); 1645 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1646 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1647 Inst.addOperand(MCOperand::CreateImm(Val)); 1648 } 1649 1650 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1651 assert(N == 1 && "Invalid number of operands!"); 1652 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1653 assert(CE && "non-constant post-idx-imm8 operand!"); 1654 int Imm = CE->getValue(); 1655 bool isAdd = Imm >= 0; 1656 if (Imm == INT32_MIN) Imm = 0; 1657 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1658 Inst.addOperand(MCOperand::CreateImm(Imm)); 1659 } 1660 1661 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1662 assert(N == 1 && "Invalid number of operands!"); 1663 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1664 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1665 int Imm = CE->getValue(); 1666 bool isAdd = Imm >= 0; 1667 if (Imm == INT32_MIN) Imm = 0; 1668 // Immediate is scaled by 4. 1669 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1670 Inst.addOperand(MCOperand::CreateImm(Imm)); 1671 } 1672 1673 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1674 assert(N == 2 && "Invalid number of operands!"); 1675 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1676 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1677 } 1678 1679 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1680 assert(N == 2 && "Invalid number of operands!"); 1681 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1682 // The sign, shift type, and shift amount are encoded in a single operand 1683 // using the AM2 encoding helpers. 1684 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1685 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1686 PostIdxReg.ShiftTy); 1687 Inst.addOperand(MCOperand::CreateImm(Imm)); 1688 } 1689 1690 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1691 assert(N == 1 && "Invalid number of operands!"); 1692 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1693 } 1694 1695 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1696 assert(N == 1 && "Invalid number of operands!"); 1697 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1698 } 1699 1700 void addVecListOperands(MCInst &Inst, unsigned N) const { 1701 assert(N == 1 && "Invalid number of operands!"); 1702 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1703 } 1704 1705 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 1706 assert(N == 2 && "Invalid number of operands!"); 1707 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1708 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 1709 } 1710 1711 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1712 assert(N == 1 && "Invalid number of operands!"); 1713 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1714 } 1715 1716 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1717 assert(N == 1 && "Invalid number of operands!"); 1718 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1719 } 1720 1721 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1722 assert(N == 1 && "Invalid number of operands!"); 1723 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1724 } 1725 1726 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1727 assert(N == 1 && "Invalid number of operands!"); 1728 // The immediate encodes the type of constant as well as the value. 1729 // Mask in that this is an i8 splat. 1730 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1731 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1732 } 1733 1734 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1735 assert(N == 1 && "Invalid number of operands!"); 1736 // The immediate encodes the type of constant as well as the value. 1737 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1738 unsigned Value = CE->getValue(); 1739 if (Value >= 256) 1740 Value = (Value >> 8) | 0xa00; 1741 else 1742 Value |= 0x800; 1743 Inst.addOperand(MCOperand::CreateImm(Value)); 1744 } 1745 1746 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1747 assert(N == 1 && "Invalid number of operands!"); 1748 // The immediate encodes the type of constant as well as the value. 1749 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1750 unsigned Value = CE->getValue(); 1751 if (Value >= 256 && Value <= 0xff00) 1752 Value = (Value >> 8) | 0x200; 1753 else if (Value > 0xffff && Value <= 0xff0000) 1754 Value = (Value >> 16) | 0x400; 1755 else if (Value > 0xffffff) 1756 Value = (Value >> 24) | 0x600; 1757 Inst.addOperand(MCOperand::CreateImm(Value)); 1758 } 1759 1760 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1761 assert(N == 1 && "Invalid number of operands!"); 1762 // The immediate encodes the type of constant as well as the value. 1763 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1764 unsigned Value = CE->getValue(); 1765 if (Value >= 256 && Value <= 0xffff) 1766 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1767 else if (Value > 0xffff && Value <= 0xffffff) 1768 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1769 else if (Value > 0xffffff) 1770 Value = (Value >> 24) | 0x600; 1771 Inst.addOperand(MCOperand::CreateImm(Value)); 1772 } 1773 1774 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1775 assert(N == 1 && "Invalid number of operands!"); 1776 // The immediate encodes the type of constant as well as the value. 1777 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1778 uint64_t Value = CE->getValue(); 1779 unsigned Imm = 0; 1780 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1781 Imm |= (Value & 1) << i; 1782 } 1783 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1784 } 1785 1786 virtual void print(raw_ostream &OS) const; 1787 1788 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1789 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1790 Op->ITMask.Mask = Mask; 1791 Op->StartLoc = S; 1792 Op->EndLoc = S; 1793 return Op; 1794 } 1795 1796 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1797 ARMOperand *Op = new ARMOperand(k_CondCode); 1798 Op->CC.Val = CC; 1799 Op->StartLoc = S; 1800 Op->EndLoc = S; 1801 return Op; 1802 } 1803 1804 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1805 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1806 Op->Cop.Val = CopVal; 1807 Op->StartLoc = S; 1808 Op->EndLoc = S; 1809 return Op; 1810 } 1811 1812 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1813 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1814 Op->Cop.Val = CopVal; 1815 Op->StartLoc = S; 1816 Op->EndLoc = S; 1817 return Op; 1818 } 1819 1820 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1821 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1822 Op->Cop.Val = Val; 1823 Op->StartLoc = S; 1824 Op->EndLoc = E; 1825 return Op; 1826 } 1827 1828 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1829 ARMOperand *Op = new ARMOperand(k_CCOut); 1830 Op->Reg.RegNum = RegNum; 1831 Op->StartLoc = S; 1832 Op->EndLoc = S; 1833 return Op; 1834 } 1835 1836 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1837 ARMOperand *Op = new ARMOperand(k_Token); 1838 Op->Tok.Data = Str.data(); 1839 Op->Tok.Length = Str.size(); 1840 Op->StartLoc = S; 1841 Op->EndLoc = S; 1842 return Op; 1843 } 1844 1845 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1846 ARMOperand *Op = new ARMOperand(k_Register); 1847 Op->Reg.RegNum = RegNum; 1848 Op->StartLoc = S; 1849 Op->EndLoc = E; 1850 return Op; 1851 } 1852 1853 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1854 unsigned SrcReg, 1855 unsigned ShiftReg, 1856 unsigned ShiftImm, 1857 SMLoc S, SMLoc E) { 1858 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1859 Op->RegShiftedReg.ShiftTy = ShTy; 1860 Op->RegShiftedReg.SrcReg = SrcReg; 1861 Op->RegShiftedReg.ShiftReg = ShiftReg; 1862 Op->RegShiftedReg.ShiftImm = ShiftImm; 1863 Op->StartLoc = S; 1864 Op->EndLoc = E; 1865 return Op; 1866 } 1867 1868 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1869 unsigned SrcReg, 1870 unsigned ShiftImm, 1871 SMLoc S, SMLoc E) { 1872 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1873 Op->RegShiftedImm.ShiftTy = ShTy; 1874 Op->RegShiftedImm.SrcReg = SrcReg; 1875 Op->RegShiftedImm.ShiftImm = ShiftImm; 1876 Op->StartLoc = S; 1877 Op->EndLoc = E; 1878 return Op; 1879 } 1880 1881 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1882 SMLoc S, SMLoc E) { 1883 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1884 Op->ShifterImm.isASR = isASR; 1885 Op->ShifterImm.Imm = Imm; 1886 Op->StartLoc = S; 1887 Op->EndLoc = E; 1888 return Op; 1889 } 1890 1891 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1892 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1893 Op->RotImm.Imm = Imm; 1894 Op->StartLoc = S; 1895 Op->EndLoc = E; 1896 return Op; 1897 } 1898 1899 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1900 SMLoc S, SMLoc E) { 1901 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1902 Op->Bitfield.LSB = LSB; 1903 Op->Bitfield.Width = Width; 1904 Op->StartLoc = S; 1905 Op->EndLoc = E; 1906 return Op; 1907 } 1908 1909 static ARMOperand * 1910 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1911 SMLoc StartLoc, SMLoc EndLoc) { 1912 KindTy Kind = k_RegisterList; 1913 1914 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1915 Kind = k_DPRRegisterList; 1916 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1917 contains(Regs.front().first)) 1918 Kind = k_SPRRegisterList; 1919 1920 ARMOperand *Op = new ARMOperand(Kind); 1921 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1922 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1923 Op->Registers.push_back(I->first); 1924 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1925 Op->StartLoc = StartLoc; 1926 Op->EndLoc = EndLoc; 1927 return Op; 1928 } 1929 1930 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1931 SMLoc S, SMLoc E) { 1932 ARMOperand *Op = new ARMOperand(k_VectorList); 1933 Op->VectorList.RegNum = RegNum; 1934 Op->VectorList.Count = Count; 1935 Op->StartLoc = S; 1936 Op->EndLoc = E; 1937 return Op; 1938 } 1939 1940 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, 1941 SMLoc S, SMLoc E) { 1942 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); 1943 Op->VectorList.RegNum = RegNum; 1944 Op->VectorList.Count = Count; 1945 Op->StartLoc = S; 1946 Op->EndLoc = E; 1947 return Op; 1948 } 1949 1950 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, 1951 unsigned Index, SMLoc S, SMLoc E) { 1952 ARMOperand *Op = new ARMOperand(k_VectorListIndexed); 1953 Op->VectorList.RegNum = RegNum; 1954 Op->VectorList.Count = Count; 1955 Op->VectorList.LaneIndex = Index; 1956 Op->StartLoc = S; 1957 Op->EndLoc = E; 1958 return Op; 1959 } 1960 1961 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1962 MCContext &Ctx) { 1963 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1964 Op->VectorIndex.Val = Idx; 1965 Op->StartLoc = S; 1966 Op->EndLoc = E; 1967 return Op; 1968 } 1969 1970 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1971 ARMOperand *Op = new ARMOperand(k_Immediate); 1972 Op->Imm.Val = Val; 1973 Op->StartLoc = S; 1974 Op->EndLoc = E; 1975 return Op; 1976 } 1977 1978 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1979 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1980 Op->FPImm.Val = Val; 1981 Op->StartLoc = S; 1982 Op->EndLoc = S; 1983 return Op; 1984 } 1985 1986 static ARMOperand *CreateMem(unsigned BaseRegNum, 1987 const MCConstantExpr *OffsetImm, 1988 unsigned OffsetRegNum, 1989 ARM_AM::ShiftOpc ShiftType, 1990 unsigned ShiftImm, 1991 unsigned Alignment, 1992 bool isNegative, 1993 SMLoc S, SMLoc E) { 1994 ARMOperand *Op = new ARMOperand(k_Memory); 1995 Op->Memory.BaseRegNum = BaseRegNum; 1996 Op->Memory.OffsetImm = OffsetImm; 1997 Op->Memory.OffsetRegNum = OffsetRegNum; 1998 Op->Memory.ShiftType = ShiftType; 1999 Op->Memory.ShiftImm = ShiftImm; 2000 Op->Memory.Alignment = Alignment; 2001 Op->Memory.isNegative = isNegative; 2002 Op->StartLoc = S; 2003 Op->EndLoc = E; 2004 return Op; 2005 } 2006 2007 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 2008 ARM_AM::ShiftOpc ShiftTy, 2009 unsigned ShiftImm, 2010 SMLoc S, SMLoc E) { 2011 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 2012 Op->PostIdxReg.RegNum = RegNum; 2013 Op->PostIdxReg.isAdd = isAdd; 2014 Op->PostIdxReg.ShiftTy = ShiftTy; 2015 Op->PostIdxReg.ShiftImm = ShiftImm; 2016 Op->StartLoc = S; 2017 Op->EndLoc = E; 2018 return Op; 2019 } 2020 2021 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 2022 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 2023 Op->MBOpt.Val = Opt; 2024 Op->StartLoc = S; 2025 Op->EndLoc = S; 2026 return Op; 2027 } 2028 2029 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 2030 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 2031 Op->IFlags.Val = IFlags; 2032 Op->StartLoc = S; 2033 Op->EndLoc = S; 2034 return Op; 2035 } 2036 2037 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 2038 ARMOperand *Op = new ARMOperand(k_MSRMask); 2039 Op->MMask.Val = MMask; 2040 Op->StartLoc = S; 2041 Op->EndLoc = S; 2042 return Op; 2043 } 2044}; 2045 2046} // end anonymous namespace. 2047 2048void ARMOperand::print(raw_ostream &OS) const { 2049 switch (Kind) { 2050 case k_FPImmediate: 2051 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 2052 << ") >"; 2053 break; 2054 case k_CondCode: 2055 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 2056 break; 2057 case k_CCOut: 2058 OS << "<ccout " << getReg() << ">"; 2059 break; 2060 case k_ITCondMask: { 2061 static const char *MaskStr[] = { 2062 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 2063 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 2064 }; 2065 assert((ITMask.Mask & 0xf) == ITMask.Mask); 2066 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 2067 break; 2068 } 2069 case k_CoprocNum: 2070 OS << "<coprocessor number: " << getCoproc() << ">"; 2071 break; 2072 case k_CoprocReg: 2073 OS << "<coprocessor register: " << getCoproc() << ">"; 2074 break; 2075 case k_CoprocOption: 2076 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 2077 break; 2078 case k_MSRMask: 2079 OS << "<mask: " << getMSRMask() << ">"; 2080 break; 2081 case k_Immediate: 2082 getImm()->print(OS); 2083 break; 2084 case k_MemBarrierOpt: 2085 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 2086 break; 2087 case k_Memory: 2088 OS << "<memory " 2089 << " base:" << Memory.BaseRegNum; 2090 OS << ">"; 2091 break; 2092 case k_PostIndexRegister: 2093 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 2094 << PostIdxReg.RegNum; 2095 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 2096 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 2097 << PostIdxReg.ShiftImm; 2098 OS << ">"; 2099 break; 2100 case k_ProcIFlags: { 2101 OS << "<ARM_PROC::"; 2102 unsigned IFlags = getProcIFlags(); 2103 for (int i=2; i >= 0; --i) 2104 if (IFlags & (1 << i)) 2105 OS << ARM_PROC::IFlagsToString(1 << i); 2106 OS << ">"; 2107 break; 2108 } 2109 case k_Register: 2110 OS << "<register " << getReg() << ">"; 2111 break; 2112 case k_ShifterImmediate: 2113 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2114 << " #" << ShifterImm.Imm << ">"; 2115 break; 2116 case k_ShiftedRegister: 2117 OS << "<so_reg_reg " 2118 << RegShiftedReg.SrcReg << " " 2119 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 2120 << " " << RegShiftedReg.ShiftReg << ">"; 2121 break; 2122 case k_ShiftedImmediate: 2123 OS << "<so_reg_imm " 2124 << RegShiftedImm.SrcReg << " " 2125 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 2126 << " #" << RegShiftedImm.ShiftImm << ">"; 2127 break; 2128 case k_RotateImmediate: 2129 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2130 break; 2131 case k_BitfieldDescriptor: 2132 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2133 << ", width: " << Bitfield.Width << ">"; 2134 break; 2135 case k_RegisterList: 2136 case k_DPRRegisterList: 2137 case k_SPRRegisterList: { 2138 OS << "<register_list "; 2139 2140 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2141 for (SmallVectorImpl<unsigned>::const_iterator 2142 I = RegList.begin(), E = RegList.end(); I != E; ) { 2143 OS << *I; 2144 if (++I < E) OS << ", "; 2145 } 2146 2147 OS << ">"; 2148 break; 2149 } 2150 case k_VectorList: 2151 OS << "<vector_list " << VectorList.Count << " * " 2152 << VectorList.RegNum << ">"; 2153 break; 2154 case k_VectorListAllLanes: 2155 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 2156 << VectorList.RegNum << ">"; 2157 break; 2158 case k_VectorListIndexed: 2159 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 2160 << VectorList.Count << " * " << VectorList.RegNum << ">"; 2161 break; 2162 case k_Token: 2163 OS << "'" << getToken() << "'"; 2164 break; 2165 case k_VectorIndex: 2166 OS << "<vectorindex " << getVectorIndex() << ">"; 2167 break; 2168 } 2169} 2170 2171/// @name Auto-generated Match Functions 2172/// { 2173 2174static unsigned MatchRegisterName(StringRef Name); 2175 2176/// } 2177 2178bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2179 SMLoc &StartLoc, SMLoc &EndLoc) { 2180 StartLoc = Parser.getTok().getLoc(); 2181 RegNo = tryParseRegister(); 2182 EndLoc = Parser.getTok().getLoc(); 2183 2184 return (RegNo == (unsigned)-1); 2185} 2186 2187/// Try to parse a register name. The token must be an Identifier when called, 2188/// and if it is a register name the token is eaten and the register number is 2189/// returned. Otherwise return -1. 2190/// 2191int ARMAsmParser::tryParseRegister() { 2192 const AsmToken &Tok = Parser.getTok(); 2193 if (Tok.isNot(AsmToken::Identifier)) return -1; 2194 2195 std::string lowerCase = Tok.getString().lower(); 2196 unsigned RegNum = MatchRegisterName(lowerCase); 2197 if (!RegNum) { 2198 RegNum = StringSwitch<unsigned>(lowerCase) 2199 .Case("r13", ARM::SP) 2200 .Case("r14", ARM::LR) 2201 .Case("r15", ARM::PC) 2202 .Case("ip", ARM::R12) 2203 // Additional register name aliases for 'gas' compatibility. 2204 .Case("a1", ARM::R0) 2205 .Case("a2", ARM::R1) 2206 .Case("a3", ARM::R2) 2207 .Case("a4", ARM::R3) 2208 .Case("v1", ARM::R4) 2209 .Case("v2", ARM::R5) 2210 .Case("v3", ARM::R6) 2211 .Case("v4", ARM::R7) 2212 .Case("v5", ARM::R8) 2213 .Case("v6", ARM::R9) 2214 .Case("v7", ARM::R10) 2215 .Case("v8", ARM::R11) 2216 .Case("sb", ARM::R9) 2217 .Case("sl", ARM::R10) 2218 .Case("fp", ARM::R11) 2219 .Default(0); 2220 } 2221 if (!RegNum) { 2222 // Check for aliases registered via .req. 2223 StringMap<unsigned>::const_iterator Entry = 2224 RegisterReqs.find(Tok.getIdentifier()); 2225 // If no match, return failure. 2226 if (Entry == RegisterReqs.end()) 2227 return -1; 2228 Parser.Lex(); // Eat identifier token. 2229 return Entry->getValue(); 2230 } 2231 2232 Parser.Lex(); // Eat identifier token. 2233 2234 return RegNum; 2235} 2236 2237// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2238// If a recoverable error occurs, return 1. If an irrecoverable error 2239// occurs, return -1. An irrecoverable error is one where tokens have been 2240// consumed in the process of trying to parse the shifter (i.e., when it is 2241// indeed a shifter operand, but malformed). 2242int ARMAsmParser::tryParseShiftRegister( 2243 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2244 SMLoc S = Parser.getTok().getLoc(); 2245 const AsmToken &Tok = Parser.getTok(); 2246 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2247 2248 std::string lowerCase = Tok.getString().lower(); 2249 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2250 .Case("asl", ARM_AM::lsl) 2251 .Case("lsl", ARM_AM::lsl) 2252 .Case("lsr", ARM_AM::lsr) 2253 .Case("asr", ARM_AM::asr) 2254 .Case("ror", ARM_AM::ror) 2255 .Case("rrx", ARM_AM::rrx) 2256 .Default(ARM_AM::no_shift); 2257 2258 if (ShiftTy == ARM_AM::no_shift) 2259 return 1; 2260 2261 Parser.Lex(); // Eat the operator. 2262 2263 // The source register for the shift has already been added to the 2264 // operand list, so we need to pop it off and combine it into the shifted 2265 // register operand instead. 2266 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2267 if (!PrevOp->isReg()) 2268 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2269 int SrcReg = PrevOp->getReg(); 2270 int64_t Imm = 0; 2271 int ShiftReg = 0; 2272 if (ShiftTy == ARM_AM::rrx) { 2273 // RRX Doesn't have an explicit shift amount. The encoder expects 2274 // the shift register to be the same as the source register. Seems odd, 2275 // but OK. 2276 ShiftReg = SrcReg; 2277 } else { 2278 // Figure out if this is shifted by a constant or a register (for non-RRX). 2279 if (Parser.getTok().is(AsmToken::Hash) || 2280 Parser.getTok().is(AsmToken::Dollar)) { 2281 Parser.Lex(); // Eat hash. 2282 SMLoc ImmLoc = Parser.getTok().getLoc(); 2283 const MCExpr *ShiftExpr = 0; 2284 if (getParser().ParseExpression(ShiftExpr)) { 2285 Error(ImmLoc, "invalid immediate shift value"); 2286 return -1; 2287 } 2288 // The expression must be evaluatable as an immediate. 2289 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2290 if (!CE) { 2291 Error(ImmLoc, "invalid immediate shift value"); 2292 return -1; 2293 } 2294 // Range check the immediate. 2295 // lsl, ror: 0 <= imm <= 31 2296 // lsr, asr: 0 <= imm <= 32 2297 Imm = CE->getValue(); 2298 if (Imm < 0 || 2299 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2300 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2301 Error(ImmLoc, "immediate shift value out of range"); 2302 return -1; 2303 } 2304 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2305 ShiftReg = tryParseRegister(); 2306 SMLoc L = Parser.getTok().getLoc(); 2307 if (ShiftReg == -1) { 2308 Error (L, "expected immediate or register in shift operand"); 2309 return -1; 2310 } 2311 } else { 2312 Error (Parser.getTok().getLoc(), 2313 "expected immediate or register in shift operand"); 2314 return -1; 2315 } 2316 } 2317 2318 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2319 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2320 ShiftReg, Imm, 2321 S, Parser.getTok().getLoc())); 2322 else 2323 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2324 S, Parser.getTok().getLoc())); 2325 2326 return 0; 2327} 2328 2329 2330/// Try to parse a register name. The token must be an Identifier when called. 2331/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2332/// if there is a "writeback". 'true' if it's not a register. 2333/// 2334/// TODO this is likely to change to allow different register types and or to 2335/// parse for a specific register type. 2336bool ARMAsmParser:: 2337tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2338 SMLoc S = Parser.getTok().getLoc(); 2339 int RegNo = tryParseRegister(); 2340 if (RegNo == -1) 2341 return true; 2342 2343 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2344 2345 const AsmToken &ExclaimTok = Parser.getTok(); 2346 if (ExclaimTok.is(AsmToken::Exclaim)) { 2347 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2348 ExclaimTok.getLoc())); 2349 Parser.Lex(); // Eat exclaim token 2350 return false; 2351 } 2352 2353 // Also check for an index operand. This is only legal for vector registers, 2354 // but that'll get caught OK in operand matching, so we don't need to 2355 // explicitly filter everything else out here. 2356 if (Parser.getTok().is(AsmToken::LBrac)) { 2357 SMLoc SIdx = Parser.getTok().getLoc(); 2358 Parser.Lex(); // Eat left bracket token. 2359 2360 const MCExpr *ImmVal; 2361 if (getParser().ParseExpression(ImmVal)) 2362 return MatchOperand_ParseFail; 2363 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2364 if (!MCE) { 2365 TokError("immediate value expected for vector index"); 2366 return MatchOperand_ParseFail; 2367 } 2368 2369 SMLoc E = Parser.getTok().getLoc(); 2370 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2371 Error(E, "']' expected"); 2372 return MatchOperand_ParseFail; 2373 } 2374 2375 Parser.Lex(); // Eat right bracket token. 2376 2377 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2378 SIdx, E, 2379 getContext())); 2380 } 2381 2382 return false; 2383} 2384 2385/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2386/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2387/// "c5", ... 2388static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2389 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2390 // but efficient. 2391 switch (Name.size()) { 2392 default: break; 2393 case 2: 2394 if (Name[0] != CoprocOp) 2395 return -1; 2396 switch (Name[1]) { 2397 default: return -1; 2398 case '0': return 0; 2399 case '1': return 1; 2400 case '2': return 2; 2401 case '3': return 3; 2402 case '4': return 4; 2403 case '5': return 5; 2404 case '6': return 6; 2405 case '7': return 7; 2406 case '8': return 8; 2407 case '9': return 9; 2408 } 2409 break; 2410 case 3: 2411 if (Name[0] != CoprocOp || Name[1] != '1') 2412 return -1; 2413 switch (Name[2]) { 2414 default: return -1; 2415 case '0': return 10; 2416 case '1': return 11; 2417 case '2': return 12; 2418 case '3': return 13; 2419 case '4': return 14; 2420 case '5': return 15; 2421 } 2422 break; 2423 } 2424 2425 return -1; 2426} 2427 2428/// parseITCondCode - Try to parse a condition code for an IT instruction. 2429ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2430parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2431 SMLoc S = Parser.getTok().getLoc(); 2432 const AsmToken &Tok = Parser.getTok(); 2433 if (!Tok.is(AsmToken::Identifier)) 2434 return MatchOperand_NoMatch; 2435 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2436 .Case("eq", ARMCC::EQ) 2437 .Case("ne", ARMCC::NE) 2438 .Case("hs", ARMCC::HS) 2439 .Case("cs", ARMCC::HS) 2440 .Case("lo", ARMCC::LO) 2441 .Case("cc", ARMCC::LO) 2442 .Case("mi", ARMCC::MI) 2443 .Case("pl", ARMCC::PL) 2444 .Case("vs", ARMCC::VS) 2445 .Case("vc", ARMCC::VC) 2446 .Case("hi", ARMCC::HI) 2447 .Case("ls", ARMCC::LS) 2448 .Case("ge", ARMCC::GE) 2449 .Case("lt", ARMCC::LT) 2450 .Case("gt", ARMCC::GT) 2451 .Case("le", ARMCC::LE) 2452 .Case("al", ARMCC::AL) 2453 .Default(~0U); 2454 if (CC == ~0U) 2455 return MatchOperand_NoMatch; 2456 Parser.Lex(); // Eat the token. 2457 2458 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2459 2460 return MatchOperand_Success; 2461} 2462 2463/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2464/// token must be an Identifier when called, and if it is a coprocessor 2465/// number, the token is eaten and the operand is added to the operand list. 2466ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2467parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2468 SMLoc S = Parser.getTok().getLoc(); 2469 const AsmToken &Tok = Parser.getTok(); 2470 if (Tok.isNot(AsmToken::Identifier)) 2471 return MatchOperand_NoMatch; 2472 2473 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2474 if (Num == -1) 2475 return MatchOperand_NoMatch; 2476 2477 Parser.Lex(); // Eat identifier token. 2478 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2479 return MatchOperand_Success; 2480} 2481 2482/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2483/// token must be an Identifier when called, and if it is a coprocessor 2484/// number, the token is eaten and the operand is added to the operand list. 2485ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2486parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2487 SMLoc S = Parser.getTok().getLoc(); 2488 const AsmToken &Tok = Parser.getTok(); 2489 if (Tok.isNot(AsmToken::Identifier)) 2490 return MatchOperand_NoMatch; 2491 2492 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2493 if (Reg == -1) 2494 return MatchOperand_NoMatch; 2495 2496 Parser.Lex(); // Eat identifier token. 2497 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2498 return MatchOperand_Success; 2499} 2500 2501/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2502/// coproc_option : '{' imm0_255 '}' 2503ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2504parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2505 SMLoc S = Parser.getTok().getLoc(); 2506 2507 // If this isn't a '{', this isn't a coprocessor immediate operand. 2508 if (Parser.getTok().isNot(AsmToken::LCurly)) 2509 return MatchOperand_NoMatch; 2510 Parser.Lex(); // Eat the '{' 2511 2512 const MCExpr *Expr; 2513 SMLoc Loc = Parser.getTok().getLoc(); 2514 if (getParser().ParseExpression(Expr)) { 2515 Error(Loc, "illegal expression"); 2516 return MatchOperand_ParseFail; 2517 } 2518 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2519 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2520 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2521 return MatchOperand_ParseFail; 2522 } 2523 int Val = CE->getValue(); 2524 2525 // Check for and consume the closing '}' 2526 if (Parser.getTok().isNot(AsmToken::RCurly)) 2527 return MatchOperand_ParseFail; 2528 SMLoc E = Parser.getTok().getLoc(); 2529 Parser.Lex(); // Eat the '}' 2530 2531 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2532 return MatchOperand_Success; 2533} 2534 2535// For register list parsing, we need to map from raw GPR register numbering 2536// to the enumeration values. The enumeration values aren't sorted by 2537// register number due to our using "sp", "lr" and "pc" as canonical names. 2538static unsigned getNextRegister(unsigned Reg) { 2539 // If this is a GPR, we need to do it manually, otherwise we can rely 2540 // on the sort ordering of the enumeration since the other reg-classes 2541 // are sane. 2542 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2543 return Reg + 1; 2544 switch(Reg) { 2545 default: assert(0 && "Invalid GPR number!"); 2546 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2547 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2548 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2549 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2550 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2551 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2552 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2553 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2554 } 2555} 2556 2557// Return the low-subreg of a given Q register. 2558static unsigned getDRegFromQReg(unsigned QReg) { 2559 switch (QReg) { 2560 default: llvm_unreachable("expected a Q register!"); 2561 case ARM::Q0: return ARM::D0; 2562 case ARM::Q1: return ARM::D2; 2563 case ARM::Q2: return ARM::D4; 2564 case ARM::Q3: return ARM::D6; 2565 case ARM::Q4: return ARM::D8; 2566 case ARM::Q5: return ARM::D10; 2567 case ARM::Q6: return ARM::D12; 2568 case ARM::Q7: return ARM::D14; 2569 case ARM::Q8: return ARM::D16; 2570 case ARM::Q9: return ARM::D18; 2571 case ARM::Q10: return ARM::D20; 2572 case ARM::Q11: return ARM::D22; 2573 case ARM::Q12: return ARM::D24; 2574 case ARM::Q13: return ARM::D26; 2575 case ARM::Q14: return ARM::D28; 2576 case ARM::Q15: return ARM::D30; 2577 } 2578} 2579 2580/// Parse a register list. 2581bool ARMAsmParser:: 2582parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2583 assert(Parser.getTok().is(AsmToken::LCurly) && 2584 "Token is not a Left Curly Brace"); 2585 SMLoc S = Parser.getTok().getLoc(); 2586 Parser.Lex(); // Eat '{' token. 2587 SMLoc RegLoc = Parser.getTok().getLoc(); 2588 2589 // Check the first register in the list to see what register class 2590 // this is a list of. 2591 int Reg = tryParseRegister(); 2592 if (Reg == -1) 2593 return Error(RegLoc, "register expected"); 2594 2595 // The reglist instructions have at most 16 registers, so reserve 2596 // space for that many. 2597 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2598 2599 // Allow Q regs and just interpret them as the two D sub-registers. 2600 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2601 Reg = getDRegFromQReg(Reg); 2602 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2603 ++Reg; 2604 } 2605 const MCRegisterClass *RC; 2606 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2607 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2608 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2609 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2610 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2611 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2612 else 2613 return Error(RegLoc, "invalid register in register list"); 2614 2615 // Store the register. 2616 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2617 2618 // This starts immediately after the first register token in the list, 2619 // so we can see either a comma or a minus (range separator) as a legal 2620 // next token. 2621 while (Parser.getTok().is(AsmToken::Comma) || 2622 Parser.getTok().is(AsmToken::Minus)) { 2623 if (Parser.getTok().is(AsmToken::Minus)) { 2624 Parser.Lex(); // Eat the minus. 2625 SMLoc EndLoc = Parser.getTok().getLoc(); 2626 int EndReg = tryParseRegister(); 2627 if (EndReg == -1) 2628 return Error(EndLoc, "register expected"); 2629 // Allow Q regs and just interpret them as the two D sub-registers. 2630 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2631 EndReg = getDRegFromQReg(EndReg) + 1; 2632 // If the register is the same as the start reg, there's nothing 2633 // more to do. 2634 if (Reg == EndReg) 2635 continue; 2636 // The register must be in the same register class as the first. 2637 if (!RC->contains(EndReg)) 2638 return Error(EndLoc, "invalid register in register list"); 2639 // Ranges must go from low to high. 2640 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2641 return Error(EndLoc, "bad range in register list"); 2642 2643 // Add all the registers in the range to the register list. 2644 while (Reg != EndReg) { 2645 Reg = getNextRegister(Reg); 2646 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2647 } 2648 continue; 2649 } 2650 Parser.Lex(); // Eat the comma. 2651 RegLoc = Parser.getTok().getLoc(); 2652 int OldReg = Reg; 2653 const AsmToken RegTok = Parser.getTok(); 2654 Reg = tryParseRegister(); 2655 if (Reg == -1) 2656 return Error(RegLoc, "register expected"); 2657 // Allow Q regs and just interpret them as the two D sub-registers. 2658 bool isQReg = false; 2659 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2660 Reg = getDRegFromQReg(Reg); 2661 isQReg = true; 2662 } 2663 // The register must be in the same register class as the first. 2664 if (!RC->contains(Reg)) 2665 return Error(RegLoc, "invalid register in register list"); 2666 // List must be monotonically increasing. 2667 if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) 2668 return Error(RegLoc, "register list not in ascending order"); 2669 if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) { 2670 Warning(RegLoc, "duplicated register (" + RegTok.getString() + 2671 ") in register list"); 2672 continue; 2673 } 2674 // VFP register lists must also be contiguous. 2675 // It's OK to use the enumeration values directly here rather, as the 2676 // VFP register classes have the enum sorted properly. 2677 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2678 Reg != OldReg + 1) 2679 return Error(RegLoc, "non-contiguous register range"); 2680 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2681 if (isQReg) 2682 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2683 } 2684 2685 SMLoc E = Parser.getTok().getLoc(); 2686 if (Parser.getTok().isNot(AsmToken::RCurly)) 2687 return Error(E, "'}' expected"); 2688 Parser.Lex(); // Eat '}' token. 2689 2690 // Push the register list operand. 2691 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2692 2693 // The ARM system instruction variants for LDM/STM have a '^' token here. 2694 if (Parser.getTok().is(AsmToken::Caret)) { 2695 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); 2696 Parser.Lex(); // Eat '^' token. 2697 } 2698 2699 return false; 2700} 2701 2702// Helper function to parse the lane index for vector lists. 2703ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2704parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) { 2705 Index = 0; // Always return a defined index value. 2706 if (Parser.getTok().is(AsmToken::LBrac)) { 2707 Parser.Lex(); // Eat the '['. 2708 if (Parser.getTok().is(AsmToken::RBrac)) { 2709 // "Dn[]" is the 'all lanes' syntax. 2710 LaneKind = AllLanes; 2711 Parser.Lex(); // Eat the ']'. 2712 return MatchOperand_Success; 2713 } 2714 if (Parser.getTok().is(AsmToken::Integer)) { 2715 int64_t Val = Parser.getTok().getIntVal(); 2716 // Make this range check context sensitive for .8, .16, .32. 2717 if (Val < 0 && Val > 7) 2718 Error(Parser.getTok().getLoc(), "lane index out of range"); 2719 Index = Val; 2720 LaneKind = IndexedLane; 2721 Parser.Lex(); // Eat the token; 2722 if (Parser.getTok().isNot(AsmToken::RBrac)) 2723 Error(Parser.getTok().getLoc(), "']' expected"); 2724 Parser.Lex(); // Eat the ']'. 2725 return MatchOperand_Success; 2726 } 2727 Error(Parser.getTok().getLoc(), "lane index must be empty or an integer"); 2728 return MatchOperand_ParseFail; 2729 } 2730 LaneKind = NoLanes; 2731 return MatchOperand_Success; 2732} 2733 2734// parse a vector register list 2735ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2736parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2737 VectorLaneTy LaneKind; 2738 unsigned LaneIndex; 2739 SMLoc S = Parser.getTok().getLoc(); 2740 // As an extension (to match gas), support a plain D register or Q register 2741 // (without encosing curly braces) as a single or double entry list, 2742 // respectively. 2743 if (Parser.getTok().is(AsmToken::Identifier)) { 2744 int Reg = tryParseRegister(); 2745 if (Reg == -1) 2746 return MatchOperand_NoMatch; 2747 SMLoc E = Parser.getTok().getLoc(); 2748 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 2749 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2750 if (Res != MatchOperand_Success) 2751 return Res; 2752 switch (LaneKind) { 2753 default: 2754 assert(0 && "unexpected lane kind!"); 2755 case NoLanes: 2756 E = Parser.getTok().getLoc(); 2757 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E)); 2758 break; 2759 case AllLanes: 2760 E = Parser.getTok().getLoc(); 2761 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E)); 2762 break; 2763 case IndexedLane: 2764 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 2765 LaneIndex, S,E)); 2766 break; 2767 } 2768 return MatchOperand_Success; 2769 } 2770 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2771 Reg = getDRegFromQReg(Reg); 2772 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2773 if (Res != MatchOperand_Success) 2774 return Res; 2775 switch (LaneKind) { 2776 default: 2777 assert(0 && "unexpected lane kind!"); 2778 case NoLanes: 2779 E = Parser.getTok().getLoc(); 2780 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E)); 2781 break; 2782 case AllLanes: 2783 E = Parser.getTok().getLoc(); 2784 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E)); 2785 break; 2786 case IndexedLane: 2787 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 2788 LaneIndex, S,E)); 2789 break; 2790 } 2791 return MatchOperand_Success; 2792 } 2793 Error(S, "vector register expected"); 2794 return MatchOperand_ParseFail; 2795 } 2796 2797 if (Parser.getTok().isNot(AsmToken::LCurly)) 2798 return MatchOperand_NoMatch; 2799 2800 Parser.Lex(); // Eat '{' token. 2801 SMLoc RegLoc = Parser.getTok().getLoc(); 2802 2803 int Reg = tryParseRegister(); 2804 if (Reg == -1) { 2805 Error(RegLoc, "register expected"); 2806 return MatchOperand_ParseFail; 2807 } 2808 unsigned Count = 1; 2809 unsigned FirstReg = Reg; 2810 // The list is of D registers, but we also allow Q regs and just interpret 2811 // them as the two D sub-registers. 2812 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2813 FirstReg = Reg = getDRegFromQReg(Reg); 2814 ++Reg; 2815 ++Count; 2816 } 2817 if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success) 2818 return MatchOperand_ParseFail; 2819 2820 while (Parser.getTok().is(AsmToken::Comma) || 2821 Parser.getTok().is(AsmToken::Minus)) { 2822 if (Parser.getTok().is(AsmToken::Minus)) { 2823 Parser.Lex(); // Eat the minus. 2824 SMLoc EndLoc = Parser.getTok().getLoc(); 2825 int EndReg = tryParseRegister(); 2826 if (EndReg == -1) { 2827 Error(EndLoc, "register expected"); 2828 return MatchOperand_ParseFail; 2829 } 2830 // Allow Q regs and just interpret them as the two D sub-registers. 2831 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2832 EndReg = getDRegFromQReg(EndReg) + 1; 2833 // If the register is the same as the start reg, there's nothing 2834 // more to do. 2835 if (Reg == EndReg) 2836 continue; 2837 // The register must be in the same register class as the first. 2838 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 2839 Error(EndLoc, "invalid register in register list"); 2840 return MatchOperand_ParseFail; 2841 } 2842 // Ranges must go from low to high. 2843 if (Reg > EndReg) { 2844 Error(EndLoc, "bad range in register list"); 2845 return MatchOperand_ParseFail; 2846 } 2847 // Parse the lane specifier if present. 2848 VectorLaneTy NextLaneKind; 2849 unsigned NextLaneIndex; 2850 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2851 return MatchOperand_ParseFail; 2852 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2853 Error(EndLoc, "mismatched lane index in register list"); 2854 return MatchOperand_ParseFail; 2855 } 2856 EndLoc = Parser.getTok().getLoc(); 2857 2858 // Add all the registers in the range to the register list. 2859 Count += EndReg - Reg; 2860 Reg = EndReg; 2861 continue; 2862 } 2863 Parser.Lex(); // Eat the comma. 2864 RegLoc = Parser.getTok().getLoc(); 2865 int OldReg = Reg; 2866 Reg = tryParseRegister(); 2867 if (Reg == -1) { 2868 Error(RegLoc, "register expected"); 2869 return MatchOperand_ParseFail; 2870 } 2871 // vector register lists must be contiguous. 2872 // It's OK to use the enumeration values directly here rather, as the 2873 // VFP register classes have the enum sorted properly. 2874 // 2875 // The list is of D registers, but we also allow Q regs and just interpret 2876 // them as the two D sub-registers. 2877 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2878 Reg = getDRegFromQReg(Reg); 2879 if (Reg != OldReg + 1) { 2880 Error(RegLoc, "non-contiguous register range"); 2881 return MatchOperand_ParseFail; 2882 } 2883 ++Reg; 2884 Count += 2; 2885 // Parse the lane specifier if present. 2886 VectorLaneTy NextLaneKind; 2887 unsigned NextLaneIndex; 2888 SMLoc EndLoc = Parser.getTok().getLoc(); 2889 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2890 return MatchOperand_ParseFail; 2891 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2892 Error(EndLoc, "mismatched lane index in register list"); 2893 return MatchOperand_ParseFail; 2894 } 2895 continue; 2896 } 2897 // Normal D register. Just check that it's contiguous and keep going. 2898 if (Reg != OldReg + 1) { 2899 Error(RegLoc, "non-contiguous register range"); 2900 return MatchOperand_ParseFail; 2901 } 2902 ++Count; 2903 // Parse the lane specifier if present. 2904 VectorLaneTy NextLaneKind; 2905 unsigned NextLaneIndex; 2906 SMLoc EndLoc = Parser.getTok().getLoc(); 2907 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2908 return MatchOperand_ParseFail; 2909 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2910 Error(EndLoc, "mismatched lane index in register list"); 2911 return MatchOperand_ParseFail; 2912 } 2913 } 2914 2915 SMLoc E = Parser.getTok().getLoc(); 2916 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2917 Error(E, "'}' expected"); 2918 return MatchOperand_ParseFail; 2919 } 2920 Parser.Lex(); // Eat '}' token. 2921 2922 switch (LaneKind) { 2923 default: 2924 assert(0 && "unexpected lane kind in register list."); 2925 case NoLanes: 2926 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2927 break; 2928 case AllLanes: 2929 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 2930 S, E)); 2931 break; 2932 case IndexedLane: 2933 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 2934 LaneIndex, S, E)); 2935 break; 2936 } 2937 return MatchOperand_Success; 2938} 2939 2940/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2941ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2942parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2943 SMLoc S = Parser.getTok().getLoc(); 2944 const AsmToken &Tok = Parser.getTok(); 2945 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2946 StringRef OptStr = Tok.getString(); 2947 2948 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2949 .Case("sy", ARM_MB::SY) 2950 .Case("st", ARM_MB::ST) 2951 .Case("sh", ARM_MB::ISH) 2952 .Case("ish", ARM_MB::ISH) 2953 .Case("shst", ARM_MB::ISHST) 2954 .Case("ishst", ARM_MB::ISHST) 2955 .Case("nsh", ARM_MB::NSH) 2956 .Case("un", ARM_MB::NSH) 2957 .Case("nshst", ARM_MB::NSHST) 2958 .Case("unst", ARM_MB::NSHST) 2959 .Case("osh", ARM_MB::OSH) 2960 .Case("oshst", ARM_MB::OSHST) 2961 .Default(~0U); 2962 2963 if (Opt == ~0U) 2964 return MatchOperand_NoMatch; 2965 2966 Parser.Lex(); // Eat identifier token. 2967 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2968 return MatchOperand_Success; 2969} 2970 2971/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2972ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2973parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2974 SMLoc S = Parser.getTok().getLoc(); 2975 const AsmToken &Tok = Parser.getTok(); 2976 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2977 StringRef IFlagsStr = Tok.getString(); 2978 2979 // An iflags string of "none" is interpreted to mean that none of the AIF 2980 // bits are set. Not a terribly useful instruction, but a valid encoding. 2981 unsigned IFlags = 0; 2982 if (IFlagsStr != "none") { 2983 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2984 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2985 .Case("a", ARM_PROC::A) 2986 .Case("i", ARM_PROC::I) 2987 .Case("f", ARM_PROC::F) 2988 .Default(~0U); 2989 2990 // If some specific iflag is already set, it means that some letter is 2991 // present more than once, this is not acceptable. 2992 if (Flag == ~0U || (IFlags & Flag)) 2993 return MatchOperand_NoMatch; 2994 2995 IFlags |= Flag; 2996 } 2997 } 2998 2999 Parser.Lex(); // Eat identifier token. 3000 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 3001 return MatchOperand_Success; 3002} 3003 3004/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 3005ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3006parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3007 SMLoc S = Parser.getTok().getLoc(); 3008 const AsmToken &Tok = Parser.getTok(); 3009 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 3010 StringRef Mask = Tok.getString(); 3011 3012 if (isMClass()) { 3013 // See ARMv6-M 10.1.1 3014 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 3015 .Case("apsr", 0) 3016 .Case("iapsr", 1) 3017 .Case("eapsr", 2) 3018 .Case("xpsr", 3) 3019 .Case("ipsr", 5) 3020 .Case("epsr", 6) 3021 .Case("iepsr", 7) 3022 .Case("msp", 8) 3023 .Case("psp", 9) 3024 .Case("primask", 16) 3025 .Case("basepri", 17) 3026 .Case("basepri_max", 18) 3027 .Case("faultmask", 19) 3028 .Case("control", 20) 3029 .Default(~0U); 3030 3031 if (FlagsVal == ~0U) 3032 return MatchOperand_NoMatch; 3033 3034 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 3035 // basepri, basepri_max and faultmask only valid for V7m. 3036 return MatchOperand_NoMatch; 3037 3038 Parser.Lex(); // Eat identifier token. 3039 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3040 return MatchOperand_Success; 3041 } 3042 3043 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 3044 size_t Start = 0, Next = Mask.find('_'); 3045 StringRef Flags = ""; 3046 std::string SpecReg = Mask.slice(Start, Next).lower(); 3047 if (Next != StringRef::npos) 3048 Flags = Mask.slice(Next+1, Mask.size()); 3049 3050 // FlagsVal contains the complete mask: 3051 // 3-0: Mask 3052 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3053 unsigned FlagsVal = 0; 3054 3055 if (SpecReg == "apsr") { 3056 FlagsVal = StringSwitch<unsigned>(Flags) 3057 .Case("nzcvq", 0x8) // same as CPSR_f 3058 .Case("g", 0x4) // same as CPSR_s 3059 .Case("nzcvqg", 0xc) // same as CPSR_fs 3060 .Default(~0U); 3061 3062 if (FlagsVal == ~0U) { 3063 if (!Flags.empty()) 3064 return MatchOperand_NoMatch; 3065 else 3066 FlagsVal = 8; // No flag 3067 } 3068 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 3069 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 3070 Flags = "fc"; 3071 for (int i = 0, e = Flags.size(); i != e; ++i) { 3072 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 3073 .Case("c", 1) 3074 .Case("x", 2) 3075 .Case("s", 4) 3076 .Case("f", 8) 3077 .Default(~0U); 3078 3079 // If some specific flag is already set, it means that some letter is 3080 // present more than once, this is not acceptable. 3081 if (FlagsVal == ~0U || (FlagsVal & Flag)) 3082 return MatchOperand_NoMatch; 3083 FlagsVal |= Flag; 3084 } 3085 } else // No match for special register. 3086 return MatchOperand_NoMatch; 3087 3088 // Special register without flags is NOT equivalent to "fc" flags. 3089 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 3090 // two lines would enable gas compatibility at the expense of breaking 3091 // round-tripping. 3092 // 3093 // if (!FlagsVal) 3094 // FlagsVal = 0x9; 3095 3096 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3097 if (SpecReg == "spsr") 3098 FlagsVal |= 16; 3099 3100 Parser.Lex(); // Eat identifier token. 3101 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3102 return MatchOperand_Success; 3103} 3104 3105ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3106parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 3107 int Low, int High) { 3108 const AsmToken &Tok = Parser.getTok(); 3109 if (Tok.isNot(AsmToken::Identifier)) { 3110 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3111 return MatchOperand_ParseFail; 3112 } 3113 StringRef ShiftName = Tok.getString(); 3114 std::string LowerOp = Op.lower(); 3115 std::string UpperOp = Op.upper(); 3116 if (ShiftName != LowerOp && ShiftName != UpperOp) { 3117 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3118 return MatchOperand_ParseFail; 3119 } 3120 Parser.Lex(); // Eat shift type token. 3121 3122 // There must be a '#' and a shift amount. 3123 if (Parser.getTok().isNot(AsmToken::Hash) && 3124 Parser.getTok().isNot(AsmToken::Dollar)) { 3125 Error(Parser.getTok().getLoc(), "'#' expected"); 3126 return MatchOperand_ParseFail; 3127 } 3128 Parser.Lex(); // Eat hash token. 3129 3130 const MCExpr *ShiftAmount; 3131 SMLoc Loc = Parser.getTok().getLoc(); 3132 if (getParser().ParseExpression(ShiftAmount)) { 3133 Error(Loc, "illegal expression"); 3134 return MatchOperand_ParseFail; 3135 } 3136 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3137 if (!CE) { 3138 Error(Loc, "constant expression expected"); 3139 return MatchOperand_ParseFail; 3140 } 3141 int Val = CE->getValue(); 3142 if (Val < Low || Val > High) { 3143 Error(Loc, "immediate value out of range"); 3144 return MatchOperand_ParseFail; 3145 } 3146 3147 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 3148 3149 return MatchOperand_Success; 3150} 3151 3152ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3153parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3154 const AsmToken &Tok = Parser.getTok(); 3155 SMLoc S = Tok.getLoc(); 3156 if (Tok.isNot(AsmToken::Identifier)) { 3157 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3158 return MatchOperand_ParseFail; 3159 } 3160 int Val = StringSwitch<int>(Tok.getString()) 3161 .Case("be", 1) 3162 .Case("le", 0) 3163 .Default(-1); 3164 Parser.Lex(); // Eat the token. 3165 3166 if (Val == -1) { 3167 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3168 return MatchOperand_ParseFail; 3169 } 3170 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 3171 getContext()), 3172 S, Parser.getTok().getLoc())); 3173 return MatchOperand_Success; 3174} 3175 3176/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 3177/// instructions. Legal values are: 3178/// lsl #n 'n' in [0,31] 3179/// asr #n 'n' in [1,32] 3180/// n == 32 encoded as n == 0. 3181ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3182parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3183 const AsmToken &Tok = Parser.getTok(); 3184 SMLoc S = Tok.getLoc(); 3185 if (Tok.isNot(AsmToken::Identifier)) { 3186 Error(S, "shift operator 'asr' or 'lsl' expected"); 3187 return MatchOperand_ParseFail; 3188 } 3189 StringRef ShiftName = Tok.getString(); 3190 bool isASR; 3191 if (ShiftName == "lsl" || ShiftName == "LSL") 3192 isASR = false; 3193 else if (ShiftName == "asr" || ShiftName == "ASR") 3194 isASR = true; 3195 else { 3196 Error(S, "shift operator 'asr' or 'lsl' expected"); 3197 return MatchOperand_ParseFail; 3198 } 3199 Parser.Lex(); // Eat the operator. 3200 3201 // A '#' and a shift amount. 3202 if (Parser.getTok().isNot(AsmToken::Hash) && 3203 Parser.getTok().isNot(AsmToken::Dollar)) { 3204 Error(Parser.getTok().getLoc(), "'#' expected"); 3205 return MatchOperand_ParseFail; 3206 } 3207 Parser.Lex(); // Eat hash token. 3208 3209 const MCExpr *ShiftAmount; 3210 SMLoc E = Parser.getTok().getLoc(); 3211 if (getParser().ParseExpression(ShiftAmount)) { 3212 Error(E, "malformed shift expression"); 3213 return MatchOperand_ParseFail; 3214 } 3215 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3216 if (!CE) { 3217 Error(E, "shift amount must be an immediate"); 3218 return MatchOperand_ParseFail; 3219 } 3220 3221 int64_t Val = CE->getValue(); 3222 if (isASR) { 3223 // Shift amount must be in [1,32] 3224 if (Val < 1 || Val > 32) { 3225 Error(E, "'asr' shift amount must be in range [1,32]"); 3226 return MatchOperand_ParseFail; 3227 } 3228 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 3229 if (isThumb() && Val == 32) { 3230 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 3231 return MatchOperand_ParseFail; 3232 } 3233 if (Val == 32) Val = 0; 3234 } else { 3235 // Shift amount must be in [1,32] 3236 if (Val < 0 || Val > 31) { 3237 Error(E, "'lsr' shift amount must be in range [0,31]"); 3238 return MatchOperand_ParseFail; 3239 } 3240 } 3241 3242 E = Parser.getTok().getLoc(); 3243 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 3244 3245 return MatchOperand_Success; 3246} 3247 3248/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 3249/// of instructions. Legal values are: 3250/// ror #n 'n' in {0, 8, 16, 24} 3251ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3252parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3253 const AsmToken &Tok = Parser.getTok(); 3254 SMLoc S = Tok.getLoc(); 3255 if (Tok.isNot(AsmToken::Identifier)) 3256 return MatchOperand_NoMatch; 3257 StringRef ShiftName = Tok.getString(); 3258 if (ShiftName != "ror" && ShiftName != "ROR") 3259 return MatchOperand_NoMatch; 3260 Parser.Lex(); // Eat the operator. 3261 3262 // A '#' and a rotate amount. 3263 if (Parser.getTok().isNot(AsmToken::Hash) && 3264 Parser.getTok().isNot(AsmToken::Dollar)) { 3265 Error(Parser.getTok().getLoc(), "'#' expected"); 3266 return MatchOperand_ParseFail; 3267 } 3268 Parser.Lex(); // Eat hash token. 3269 3270 const MCExpr *ShiftAmount; 3271 SMLoc E = Parser.getTok().getLoc(); 3272 if (getParser().ParseExpression(ShiftAmount)) { 3273 Error(E, "malformed rotate expression"); 3274 return MatchOperand_ParseFail; 3275 } 3276 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3277 if (!CE) { 3278 Error(E, "rotate amount must be an immediate"); 3279 return MatchOperand_ParseFail; 3280 } 3281 3282 int64_t Val = CE->getValue(); 3283 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 3284 // normally, zero is represented in asm by omitting the rotate operand 3285 // entirely. 3286 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 3287 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 3288 return MatchOperand_ParseFail; 3289 } 3290 3291 E = Parser.getTok().getLoc(); 3292 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 3293 3294 return MatchOperand_Success; 3295} 3296 3297ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3298parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3299 SMLoc S = Parser.getTok().getLoc(); 3300 // The bitfield descriptor is really two operands, the LSB and the width. 3301 if (Parser.getTok().isNot(AsmToken::Hash) && 3302 Parser.getTok().isNot(AsmToken::Dollar)) { 3303 Error(Parser.getTok().getLoc(), "'#' expected"); 3304 return MatchOperand_ParseFail; 3305 } 3306 Parser.Lex(); // Eat hash token. 3307 3308 const MCExpr *LSBExpr; 3309 SMLoc E = Parser.getTok().getLoc(); 3310 if (getParser().ParseExpression(LSBExpr)) { 3311 Error(E, "malformed immediate expression"); 3312 return MatchOperand_ParseFail; 3313 } 3314 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 3315 if (!CE) { 3316 Error(E, "'lsb' operand must be an immediate"); 3317 return MatchOperand_ParseFail; 3318 } 3319 3320 int64_t LSB = CE->getValue(); 3321 // The LSB must be in the range [0,31] 3322 if (LSB < 0 || LSB > 31) { 3323 Error(E, "'lsb' operand must be in the range [0,31]"); 3324 return MatchOperand_ParseFail; 3325 } 3326 E = Parser.getTok().getLoc(); 3327 3328 // Expect another immediate operand. 3329 if (Parser.getTok().isNot(AsmToken::Comma)) { 3330 Error(Parser.getTok().getLoc(), "too few operands"); 3331 return MatchOperand_ParseFail; 3332 } 3333 Parser.Lex(); // Eat hash token. 3334 if (Parser.getTok().isNot(AsmToken::Hash) && 3335 Parser.getTok().isNot(AsmToken::Dollar)) { 3336 Error(Parser.getTok().getLoc(), "'#' expected"); 3337 return MatchOperand_ParseFail; 3338 } 3339 Parser.Lex(); // Eat hash token. 3340 3341 const MCExpr *WidthExpr; 3342 if (getParser().ParseExpression(WidthExpr)) { 3343 Error(E, "malformed immediate expression"); 3344 return MatchOperand_ParseFail; 3345 } 3346 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3347 if (!CE) { 3348 Error(E, "'width' operand must be an immediate"); 3349 return MatchOperand_ParseFail; 3350 } 3351 3352 int64_t Width = CE->getValue(); 3353 // The LSB must be in the range [1,32-lsb] 3354 if (Width < 1 || Width > 32 - LSB) { 3355 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3356 return MatchOperand_ParseFail; 3357 } 3358 E = Parser.getTok().getLoc(); 3359 3360 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3361 3362 return MatchOperand_Success; 3363} 3364 3365ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3366parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3367 // Check for a post-index addressing register operand. Specifically: 3368 // postidx_reg := '+' register {, shift} 3369 // | '-' register {, shift} 3370 // | register {, shift} 3371 3372 // This method must return MatchOperand_NoMatch without consuming any tokens 3373 // in the case where there is no match, as other alternatives take other 3374 // parse methods. 3375 AsmToken Tok = Parser.getTok(); 3376 SMLoc S = Tok.getLoc(); 3377 bool haveEaten = false; 3378 bool isAdd = true; 3379 int Reg = -1; 3380 if (Tok.is(AsmToken::Plus)) { 3381 Parser.Lex(); // Eat the '+' token. 3382 haveEaten = true; 3383 } else if (Tok.is(AsmToken::Minus)) { 3384 Parser.Lex(); // Eat the '-' token. 3385 isAdd = false; 3386 haveEaten = true; 3387 } 3388 if (Parser.getTok().is(AsmToken::Identifier)) 3389 Reg = tryParseRegister(); 3390 if (Reg == -1) { 3391 if (!haveEaten) 3392 return MatchOperand_NoMatch; 3393 Error(Parser.getTok().getLoc(), "register expected"); 3394 return MatchOperand_ParseFail; 3395 } 3396 SMLoc E = Parser.getTok().getLoc(); 3397 3398 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3399 unsigned ShiftImm = 0; 3400 if (Parser.getTok().is(AsmToken::Comma)) { 3401 Parser.Lex(); // Eat the ','. 3402 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3403 return MatchOperand_ParseFail; 3404 } 3405 3406 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3407 ShiftImm, S, E)); 3408 3409 return MatchOperand_Success; 3410} 3411 3412ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3413parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3414 // Check for a post-index addressing register operand. Specifically: 3415 // am3offset := '+' register 3416 // | '-' register 3417 // | register 3418 // | # imm 3419 // | # + imm 3420 // | # - imm 3421 3422 // This method must return MatchOperand_NoMatch without consuming any tokens 3423 // in the case where there is no match, as other alternatives take other 3424 // parse methods. 3425 AsmToken Tok = Parser.getTok(); 3426 SMLoc S = Tok.getLoc(); 3427 3428 // Do immediates first, as we always parse those if we have a '#'. 3429 if (Parser.getTok().is(AsmToken::Hash) || 3430 Parser.getTok().is(AsmToken::Dollar)) { 3431 Parser.Lex(); // Eat the '#'. 3432 // Explicitly look for a '-', as we need to encode negative zero 3433 // differently. 3434 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3435 const MCExpr *Offset; 3436 if (getParser().ParseExpression(Offset)) 3437 return MatchOperand_ParseFail; 3438 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3439 if (!CE) { 3440 Error(S, "constant expression expected"); 3441 return MatchOperand_ParseFail; 3442 } 3443 SMLoc E = Tok.getLoc(); 3444 // Negative zero is encoded as the flag value INT32_MIN. 3445 int32_t Val = CE->getValue(); 3446 if (isNegative && Val == 0) 3447 Val = INT32_MIN; 3448 3449 Operands.push_back( 3450 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3451 3452 return MatchOperand_Success; 3453 } 3454 3455 3456 bool haveEaten = false; 3457 bool isAdd = true; 3458 int Reg = -1; 3459 if (Tok.is(AsmToken::Plus)) { 3460 Parser.Lex(); // Eat the '+' token. 3461 haveEaten = true; 3462 } else if (Tok.is(AsmToken::Minus)) { 3463 Parser.Lex(); // Eat the '-' token. 3464 isAdd = false; 3465 haveEaten = true; 3466 } 3467 if (Parser.getTok().is(AsmToken::Identifier)) 3468 Reg = tryParseRegister(); 3469 if (Reg == -1) { 3470 if (!haveEaten) 3471 return MatchOperand_NoMatch; 3472 Error(Parser.getTok().getLoc(), "register expected"); 3473 return MatchOperand_ParseFail; 3474 } 3475 SMLoc E = Parser.getTok().getLoc(); 3476 3477 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3478 0, S, E)); 3479 3480 return MatchOperand_Success; 3481} 3482 3483/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3484/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3485/// when they refer multiple MIOperands inside a single one. 3486bool ARMAsmParser:: 3487cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3488 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3489 // Rt, Rt2 3490 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3491 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3492 // Create a writeback register dummy placeholder. 3493 Inst.addOperand(MCOperand::CreateReg(0)); 3494 // addr 3495 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3496 // pred 3497 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3498 return true; 3499} 3500 3501/// cvtT2StrdPre - Convert parsed operands to MCInst. 3502/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3503/// when they refer multiple MIOperands inside a single one. 3504bool ARMAsmParser:: 3505cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3506 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3507 // Create a writeback register dummy placeholder. 3508 Inst.addOperand(MCOperand::CreateReg(0)); 3509 // Rt, Rt2 3510 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3511 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3512 // addr 3513 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3514 // pred 3515 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3516 return true; 3517} 3518 3519/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3520/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3521/// when they refer multiple MIOperands inside a single one. 3522bool ARMAsmParser:: 3523cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3524 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3525 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3526 3527 // Create a writeback register dummy placeholder. 3528 Inst.addOperand(MCOperand::CreateImm(0)); 3529 3530 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3531 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3532 return true; 3533} 3534 3535/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3536/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3537/// when they refer multiple MIOperands inside a single one. 3538bool ARMAsmParser:: 3539cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3540 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3541 // Create a writeback register dummy placeholder. 3542 Inst.addOperand(MCOperand::CreateImm(0)); 3543 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3544 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3545 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3546 return true; 3547} 3548 3549/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3550/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3551/// when they refer multiple MIOperands inside a single one. 3552bool ARMAsmParser:: 3553cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3554 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3555 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3556 3557 // Create a writeback register dummy placeholder. 3558 Inst.addOperand(MCOperand::CreateImm(0)); 3559 3560 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3561 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3562 return true; 3563} 3564 3565/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3566/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3567/// when they refer multiple MIOperands inside a single one. 3568bool ARMAsmParser:: 3569cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3570 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3571 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3572 3573 // Create a writeback register dummy placeholder. 3574 Inst.addOperand(MCOperand::CreateImm(0)); 3575 3576 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3577 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3578 return true; 3579} 3580 3581 3582/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3583/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3584/// when they refer multiple MIOperands inside a single one. 3585bool ARMAsmParser:: 3586cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3587 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3588 // Create a writeback register dummy placeholder. 3589 Inst.addOperand(MCOperand::CreateImm(0)); 3590 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3591 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3592 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3593 return true; 3594} 3595 3596/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3597/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3598/// when they refer multiple MIOperands inside a single one. 3599bool ARMAsmParser:: 3600cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3601 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3602 // Create a writeback register dummy placeholder. 3603 Inst.addOperand(MCOperand::CreateImm(0)); 3604 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3605 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3606 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3607 return true; 3608} 3609 3610/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3611/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3612/// when they refer multiple MIOperands inside a single one. 3613bool ARMAsmParser:: 3614cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3615 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3616 // Create a writeback register dummy placeholder. 3617 Inst.addOperand(MCOperand::CreateImm(0)); 3618 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3619 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3620 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3621 return true; 3622} 3623 3624/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3625/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3626/// when they refer multiple MIOperands inside a single one. 3627bool ARMAsmParser:: 3628cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3629 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3630 // Rt 3631 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3632 // Create a writeback register dummy placeholder. 3633 Inst.addOperand(MCOperand::CreateImm(0)); 3634 // addr 3635 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3636 // offset 3637 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3638 // pred 3639 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3640 return true; 3641} 3642 3643/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3644/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3645/// when they refer multiple MIOperands inside a single one. 3646bool ARMAsmParser:: 3647cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3648 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3649 // Rt 3650 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3651 // Create a writeback register dummy placeholder. 3652 Inst.addOperand(MCOperand::CreateImm(0)); 3653 // addr 3654 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3655 // offset 3656 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3657 // pred 3658 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3659 return true; 3660} 3661 3662/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3663/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3664/// when they refer multiple MIOperands inside a single one. 3665bool ARMAsmParser:: 3666cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3667 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3668 // Create a writeback register dummy placeholder. 3669 Inst.addOperand(MCOperand::CreateImm(0)); 3670 // Rt 3671 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3672 // addr 3673 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3674 // offset 3675 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3676 // pred 3677 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3678 return true; 3679} 3680 3681/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3682/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3683/// when they refer multiple MIOperands inside a single one. 3684bool ARMAsmParser:: 3685cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3686 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3687 // Create a writeback register dummy placeholder. 3688 Inst.addOperand(MCOperand::CreateImm(0)); 3689 // Rt 3690 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3691 // addr 3692 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3693 // offset 3694 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3695 // pred 3696 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3697 return true; 3698} 3699 3700/// cvtLdrdPre - Convert parsed operands to MCInst. 3701/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3702/// when they refer multiple MIOperands inside a single one. 3703bool ARMAsmParser:: 3704cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3705 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3706 // Rt, Rt2 3707 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3708 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3709 // Create a writeback register dummy placeholder. 3710 Inst.addOperand(MCOperand::CreateImm(0)); 3711 // addr 3712 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3713 // pred 3714 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3715 return true; 3716} 3717 3718/// cvtStrdPre - Convert parsed operands to MCInst. 3719/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3720/// when they refer multiple MIOperands inside a single one. 3721bool ARMAsmParser:: 3722cvtStrdPre(MCInst &Inst, unsigned Opcode, 3723 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3724 // Create a writeback register dummy placeholder. 3725 Inst.addOperand(MCOperand::CreateImm(0)); 3726 // Rt, Rt2 3727 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3728 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3729 // addr 3730 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3731 // pred 3732 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3733 return true; 3734} 3735 3736/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3737/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3738/// when they refer multiple MIOperands inside a single one. 3739bool ARMAsmParser:: 3740cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3741 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3742 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3743 // Create a writeback register dummy placeholder. 3744 Inst.addOperand(MCOperand::CreateImm(0)); 3745 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3746 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3747 return true; 3748} 3749 3750/// cvtThumbMultiple- Convert parsed operands to MCInst. 3751/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3752/// when they refer multiple MIOperands inside a single one. 3753bool ARMAsmParser:: 3754cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3755 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3756 // The second source operand must be the same register as the destination 3757 // operand. 3758 if (Operands.size() == 6 && 3759 (((ARMOperand*)Operands[3])->getReg() != 3760 ((ARMOperand*)Operands[5])->getReg()) && 3761 (((ARMOperand*)Operands[3])->getReg() != 3762 ((ARMOperand*)Operands[4])->getReg())) { 3763 Error(Operands[3]->getStartLoc(), 3764 "destination register must match source register"); 3765 return false; 3766 } 3767 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3768 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3769 // If we have a three-operand form, make sure to set Rn to be the operand 3770 // that isn't the same as Rd. 3771 unsigned RegOp = 4; 3772 if (Operands.size() == 6 && 3773 ((ARMOperand*)Operands[4])->getReg() == 3774 ((ARMOperand*)Operands[3])->getReg()) 3775 RegOp = 5; 3776 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 3777 Inst.addOperand(Inst.getOperand(0)); 3778 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3779 3780 return true; 3781} 3782 3783bool ARMAsmParser:: 3784cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3785 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3786 // Vd 3787 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3788 // Create a writeback register dummy placeholder. 3789 Inst.addOperand(MCOperand::CreateImm(0)); 3790 // Vn 3791 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3792 // pred 3793 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3794 return true; 3795} 3796 3797bool ARMAsmParser:: 3798cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3799 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3800 // Vd 3801 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3802 // Create a writeback register dummy placeholder. 3803 Inst.addOperand(MCOperand::CreateImm(0)); 3804 // Vn 3805 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3806 // Vm 3807 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3808 // pred 3809 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3810 return true; 3811} 3812 3813bool ARMAsmParser:: 3814cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3815 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3816 // Create a writeback register dummy placeholder. 3817 Inst.addOperand(MCOperand::CreateImm(0)); 3818 // Vn 3819 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3820 // Vt 3821 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3822 // pred 3823 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3824 return true; 3825} 3826 3827bool ARMAsmParser:: 3828cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3829 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3830 // Create a writeback register dummy placeholder. 3831 Inst.addOperand(MCOperand::CreateImm(0)); 3832 // Vn 3833 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3834 // Vm 3835 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3836 // Vt 3837 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3838 // pred 3839 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3840 return true; 3841} 3842 3843/// Parse an ARM memory expression, return false if successful else return true 3844/// or an error. The first token must be a '[' when called. 3845bool ARMAsmParser:: 3846parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3847 SMLoc S, E; 3848 assert(Parser.getTok().is(AsmToken::LBrac) && 3849 "Token is not a Left Bracket"); 3850 S = Parser.getTok().getLoc(); 3851 Parser.Lex(); // Eat left bracket token. 3852 3853 const AsmToken &BaseRegTok = Parser.getTok(); 3854 int BaseRegNum = tryParseRegister(); 3855 if (BaseRegNum == -1) 3856 return Error(BaseRegTok.getLoc(), "register expected"); 3857 3858 // The next token must either be a comma or a closing bracket. 3859 const AsmToken &Tok = Parser.getTok(); 3860 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3861 return Error(Tok.getLoc(), "malformed memory operand"); 3862 3863 if (Tok.is(AsmToken::RBrac)) { 3864 E = Tok.getLoc(); 3865 Parser.Lex(); // Eat right bracket token. 3866 3867 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3868 0, 0, false, S, E)); 3869 3870 // If there's a pre-indexing writeback marker, '!', just add it as a token 3871 // operand. It's rather odd, but syntactically valid. 3872 if (Parser.getTok().is(AsmToken::Exclaim)) { 3873 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3874 Parser.Lex(); // Eat the '!'. 3875 } 3876 3877 return false; 3878 } 3879 3880 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3881 Parser.Lex(); // Eat the comma. 3882 3883 // If we have a ':', it's an alignment specifier. 3884 if (Parser.getTok().is(AsmToken::Colon)) { 3885 Parser.Lex(); // Eat the ':'. 3886 E = Parser.getTok().getLoc(); 3887 3888 const MCExpr *Expr; 3889 if (getParser().ParseExpression(Expr)) 3890 return true; 3891 3892 // The expression has to be a constant. Memory references with relocations 3893 // don't come through here, as they use the <label> forms of the relevant 3894 // instructions. 3895 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3896 if (!CE) 3897 return Error (E, "constant expression expected"); 3898 3899 unsigned Align = 0; 3900 switch (CE->getValue()) { 3901 default: 3902 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3903 case 64: Align = 8; break; 3904 case 128: Align = 16; break; 3905 case 256: Align = 32; break; 3906 } 3907 3908 // Now we should have the closing ']' 3909 E = Parser.getTok().getLoc(); 3910 if (Parser.getTok().isNot(AsmToken::RBrac)) 3911 return Error(E, "']' expected"); 3912 Parser.Lex(); // Eat right bracket token. 3913 3914 // Don't worry about range checking the value here. That's handled by 3915 // the is*() predicates. 3916 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3917 ARM_AM::no_shift, 0, Align, 3918 false, S, E)); 3919 3920 // If there's a pre-indexing writeback marker, '!', just add it as a token 3921 // operand. 3922 if (Parser.getTok().is(AsmToken::Exclaim)) { 3923 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3924 Parser.Lex(); // Eat the '!'. 3925 } 3926 3927 return false; 3928 } 3929 3930 // If we have a '#', it's an immediate offset, else assume it's a register 3931 // offset. Be friendly and also accept a plain integer (without a leading 3932 // hash) for gas compatibility. 3933 if (Parser.getTok().is(AsmToken::Hash) || 3934 Parser.getTok().is(AsmToken::Dollar) || 3935 Parser.getTok().is(AsmToken::Integer)) { 3936 if (Parser.getTok().isNot(AsmToken::Integer)) 3937 Parser.Lex(); // Eat the '#'. 3938 E = Parser.getTok().getLoc(); 3939 3940 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3941 const MCExpr *Offset; 3942 if (getParser().ParseExpression(Offset)) 3943 return true; 3944 3945 // The expression has to be a constant. Memory references with relocations 3946 // don't come through here, as they use the <label> forms of the relevant 3947 // instructions. 3948 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3949 if (!CE) 3950 return Error (E, "constant expression expected"); 3951 3952 // If the constant was #-0, represent it as INT32_MIN. 3953 int32_t Val = CE->getValue(); 3954 if (isNegative && Val == 0) 3955 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3956 3957 // Now we should have the closing ']' 3958 E = Parser.getTok().getLoc(); 3959 if (Parser.getTok().isNot(AsmToken::RBrac)) 3960 return Error(E, "']' expected"); 3961 Parser.Lex(); // Eat right bracket token. 3962 3963 // Don't worry about range checking the value here. That's handled by 3964 // the is*() predicates. 3965 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3966 ARM_AM::no_shift, 0, 0, 3967 false, S, E)); 3968 3969 // If there's a pre-indexing writeback marker, '!', just add it as a token 3970 // operand. 3971 if (Parser.getTok().is(AsmToken::Exclaim)) { 3972 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3973 Parser.Lex(); // Eat the '!'. 3974 } 3975 3976 return false; 3977 } 3978 3979 // The register offset is optionally preceded by a '+' or '-' 3980 bool isNegative = false; 3981 if (Parser.getTok().is(AsmToken::Minus)) { 3982 isNegative = true; 3983 Parser.Lex(); // Eat the '-'. 3984 } else if (Parser.getTok().is(AsmToken::Plus)) { 3985 // Nothing to do. 3986 Parser.Lex(); // Eat the '+'. 3987 } 3988 3989 E = Parser.getTok().getLoc(); 3990 int OffsetRegNum = tryParseRegister(); 3991 if (OffsetRegNum == -1) 3992 return Error(E, "register expected"); 3993 3994 // If there's a shift operator, handle it. 3995 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3996 unsigned ShiftImm = 0; 3997 if (Parser.getTok().is(AsmToken::Comma)) { 3998 Parser.Lex(); // Eat the ','. 3999 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 4000 return true; 4001 } 4002 4003 // Now we should have the closing ']' 4004 E = Parser.getTok().getLoc(); 4005 if (Parser.getTok().isNot(AsmToken::RBrac)) 4006 return Error(E, "']' expected"); 4007 Parser.Lex(); // Eat right bracket token. 4008 4009 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 4010 ShiftType, ShiftImm, 0, isNegative, 4011 S, E)); 4012 4013 // If there's a pre-indexing writeback marker, '!', just add it as a token 4014 // operand. 4015 if (Parser.getTok().is(AsmToken::Exclaim)) { 4016 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4017 Parser.Lex(); // Eat the '!'. 4018 } 4019 4020 return false; 4021} 4022 4023/// parseMemRegOffsetShift - one of these two: 4024/// ( lsl | lsr | asr | ror ) , # shift_amount 4025/// rrx 4026/// return true if it parses a shift otherwise it returns false. 4027bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 4028 unsigned &Amount) { 4029 SMLoc Loc = Parser.getTok().getLoc(); 4030 const AsmToken &Tok = Parser.getTok(); 4031 if (Tok.isNot(AsmToken::Identifier)) 4032 return true; 4033 StringRef ShiftName = Tok.getString(); 4034 if (ShiftName == "lsl" || ShiftName == "LSL" || 4035 ShiftName == "asl" || ShiftName == "ASL") 4036 St = ARM_AM::lsl; 4037 else if (ShiftName == "lsr" || ShiftName == "LSR") 4038 St = ARM_AM::lsr; 4039 else if (ShiftName == "asr" || ShiftName == "ASR") 4040 St = ARM_AM::asr; 4041 else if (ShiftName == "ror" || ShiftName == "ROR") 4042 St = ARM_AM::ror; 4043 else if (ShiftName == "rrx" || ShiftName == "RRX") 4044 St = ARM_AM::rrx; 4045 else 4046 return Error(Loc, "illegal shift operator"); 4047 Parser.Lex(); // Eat shift type token. 4048 4049 // rrx stands alone. 4050 Amount = 0; 4051 if (St != ARM_AM::rrx) { 4052 Loc = Parser.getTok().getLoc(); 4053 // A '#' and a shift amount. 4054 const AsmToken &HashTok = Parser.getTok(); 4055 if (HashTok.isNot(AsmToken::Hash) && 4056 HashTok.isNot(AsmToken::Dollar)) 4057 return Error(HashTok.getLoc(), "'#' expected"); 4058 Parser.Lex(); // Eat hash token. 4059 4060 const MCExpr *Expr; 4061 if (getParser().ParseExpression(Expr)) 4062 return true; 4063 // Range check the immediate. 4064 // lsl, ror: 0 <= imm <= 31 4065 // lsr, asr: 0 <= imm <= 32 4066 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4067 if (!CE) 4068 return Error(Loc, "shift amount must be an immediate"); 4069 int64_t Imm = CE->getValue(); 4070 if (Imm < 0 || 4071 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 4072 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 4073 return Error(Loc, "immediate shift value out of range"); 4074 Amount = Imm; 4075 } 4076 4077 return false; 4078} 4079 4080/// parseFPImm - A floating point immediate expression operand. 4081ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 4082parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4083 SMLoc S = Parser.getTok().getLoc(); 4084 4085 if (Parser.getTok().isNot(AsmToken::Hash) && 4086 Parser.getTok().isNot(AsmToken::Dollar)) 4087 return MatchOperand_NoMatch; 4088 4089 // Disambiguate the VMOV forms that can accept an FP immediate. 4090 // vmov.f32 <sreg>, #imm 4091 // vmov.f64 <dreg>, #imm 4092 // vmov.f32 <dreg>, #imm @ vector f32x2 4093 // vmov.f32 <qreg>, #imm @ vector f32x4 4094 // 4095 // There are also the NEON VMOV instructions which expect an 4096 // integer constant. Make sure we don't try to parse an FPImm 4097 // for these: 4098 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 4099 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 4100 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 4101 TyOp->getToken() != ".f64")) 4102 return MatchOperand_NoMatch; 4103 4104 Parser.Lex(); // Eat the '#'. 4105 4106 // Handle negation, as that still comes through as a separate token. 4107 bool isNegative = false; 4108 if (Parser.getTok().is(AsmToken::Minus)) { 4109 isNegative = true; 4110 Parser.Lex(); 4111 } 4112 const AsmToken &Tok = Parser.getTok(); 4113 if (Tok.is(AsmToken::Real)) { 4114 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 4115 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 4116 // If we had a '-' in front, toggle the sign bit. 4117 IntVal ^= (uint64_t)isNegative << 63; 4118 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 4119 Parser.Lex(); // Eat the token. 4120 if (Val == -1) { 4121 TokError("floating point value out of range"); 4122 return MatchOperand_ParseFail; 4123 } 4124 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4125 return MatchOperand_Success; 4126 } 4127 if (Tok.is(AsmToken::Integer)) { 4128 int64_t Val = Tok.getIntVal(); 4129 Parser.Lex(); // Eat the token. 4130 if (Val > 255 || Val < 0) { 4131 TokError("encoded floating point value out of range"); 4132 return MatchOperand_ParseFail; 4133 } 4134 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4135 return MatchOperand_Success; 4136 } 4137 4138 TokError("invalid floating point immediate"); 4139 return MatchOperand_ParseFail; 4140} 4141/// Parse a arm instruction operand. For now this parses the operand regardless 4142/// of the mnemonic. 4143bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4144 StringRef Mnemonic) { 4145 SMLoc S, E; 4146 4147 // Check if the current operand has a custom associated parser, if so, try to 4148 // custom parse the operand, or fallback to the general approach. 4149 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 4150 if (ResTy == MatchOperand_Success) 4151 return false; 4152 // If there wasn't a custom match, try the generic matcher below. Otherwise, 4153 // there was a match, but an error occurred, in which case, just return that 4154 // the operand parsing failed. 4155 if (ResTy == MatchOperand_ParseFail) 4156 return true; 4157 4158 switch (getLexer().getKind()) { 4159 default: 4160 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 4161 return true; 4162 case AsmToken::Identifier: { 4163 // If this is VMRS, check for the apsr_nzcv operand. 4164 if (!tryParseRegisterWithWriteBack(Operands)) 4165 return false; 4166 int Res = tryParseShiftRegister(Operands); 4167 if (Res == 0) // success 4168 return false; 4169 else if (Res == -1) // irrecoverable error 4170 return true; 4171 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 4172 S = Parser.getTok().getLoc(); 4173 Parser.Lex(); 4174 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 4175 return false; 4176 } 4177 4178 // Fall though for the Identifier case that is not a register or a 4179 // special name. 4180 } 4181 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 4182 case AsmToken::Integer: // things like 1f and 2b as a branch targets 4183 case AsmToken::String: // quoted label names. 4184 case AsmToken::Dot: { // . as a branch target 4185 // This was not a register so parse other operands that start with an 4186 // identifier (like labels) as expressions and create them as immediates. 4187 const MCExpr *IdVal; 4188 S = Parser.getTok().getLoc(); 4189 if (getParser().ParseExpression(IdVal)) 4190 return true; 4191 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4192 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 4193 return false; 4194 } 4195 case AsmToken::LBrac: 4196 return parseMemory(Operands); 4197 case AsmToken::LCurly: 4198 return parseRegisterList(Operands); 4199 case AsmToken::Dollar: 4200 case AsmToken::Hash: { 4201 // #42 -> immediate. 4202 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 4203 S = Parser.getTok().getLoc(); 4204 Parser.Lex(); 4205 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4206 const MCExpr *ImmVal; 4207 if (getParser().ParseExpression(ImmVal)) 4208 return true; 4209 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 4210 if (CE) { 4211 int32_t Val = CE->getValue(); 4212 if (isNegative && Val == 0) 4213 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 4214 } 4215 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4216 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 4217 return false; 4218 } 4219 case AsmToken::Colon: { 4220 // ":lower16:" and ":upper16:" expression prefixes 4221 // FIXME: Check it's an expression prefix, 4222 // e.g. (FOO - :lower16:BAR) isn't legal. 4223 ARMMCExpr::VariantKind RefKind; 4224 if (parsePrefix(RefKind)) 4225 return true; 4226 4227 const MCExpr *SubExprVal; 4228 if (getParser().ParseExpression(SubExprVal)) 4229 return true; 4230 4231 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 4232 getContext()); 4233 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4234 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 4235 return false; 4236 } 4237 } 4238} 4239 4240// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 4241// :lower16: and :upper16:. 4242bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 4243 RefKind = ARMMCExpr::VK_ARM_None; 4244 4245 // :lower16: and :upper16: modifiers 4246 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 4247 Parser.Lex(); // Eat ':' 4248 4249 if (getLexer().isNot(AsmToken::Identifier)) { 4250 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 4251 return true; 4252 } 4253 4254 StringRef IDVal = Parser.getTok().getIdentifier(); 4255 if (IDVal == "lower16") { 4256 RefKind = ARMMCExpr::VK_ARM_LO16; 4257 } else if (IDVal == "upper16") { 4258 RefKind = ARMMCExpr::VK_ARM_HI16; 4259 } else { 4260 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 4261 return true; 4262 } 4263 Parser.Lex(); 4264 4265 if (getLexer().isNot(AsmToken::Colon)) { 4266 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 4267 return true; 4268 } 4269 Parser.Lex(); // Eat the last ':' 4270 return false; 4271} 4272 4273/// \brief Given a mnemonic, split out possible predication code and carry 4274/// setting letters to form a canonical mnemonic and flags. 4275// 4276// FIXME: Would be nice to autogen this. 4277// FIXME: This is a bit of a maze of special cases. 4278StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 4279 unsigned &PredicationCode, 4280 bool &CarrySetting, 4281 unsigned &ProcessorIMod, 4282 StringRef &ITMask) { 4283 PredicationCode = ARMCC::AL; 4284 CarrySetting = false; 4285 ProcessorIMod = 0; 4286 4287 // Ignore some mnemonics we know aren't predicated forms. 4288 // 4289 // FIXME: Would be nice to autogen this. 4290 if ((Mnemonic == "movs" && isThumb()) || 4291 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 4292 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 4293 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 4294 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 4295 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 4296 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 4297 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 4298 return Mnemonic; 4299 4300 // First, split out any predication code. Ignore mnemonics we know aren't 4301 // predicated but do have a carry-set and so weren't caught above. 4302 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 4303 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 4304 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 4305 Mnemonic != "sbcs" && Mnemonic != "rscs") { 4306 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 4307 .Case("eq", ARMCC::EQ) 4308 .Case("ne", ARMCC::NE) 4309 .Case("hs", ARMCC::HS) 4310 .Case("cs", ARMCC::HS) 4311 .Case("lo", ARMCC::LO) 4312 .Case("cc", ARMCC::LO) 4313 .Case("mi", ARMCC::MI) 4314 .Case("pl", ARMCC::PL) 4315 .Case("vs", ARMCC::VS) 4316 .Case("vc", ARMCC::VC) 4317 .Case("hi", ARMCC::HI) 4318 .Case("ls", ARMCC::LS) 4319 .Case("ge", ARMCC::GE) 4320 .Case("lt", ARMCC::LT) 4321 .Case("gt", ARMCC::GT) 4322 .Case("le", ARMCC::LE) 4323 .Case("al", ARMCC::AL) 4324 .Default(~0U); 4325 if (CC != ~0U) { 4326 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 4327 PredicationCode = CC; 4328 } 4329 } 4330 4331 // Next, determine if we have a carry setting bit. We explicitly ignore all 4332 // the instructions we know end in 's'. 4333 if (Mnemonic.endswith("s") && 4334 !(Mnemonic == "cps" || Mnemonic == "mls" || 4335 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 4336 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 4337 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 4338 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || 4339 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || 4340 Mnemonic == "fsts" || 4341 (Mnemonic == "movs" && isThumb()))) { 4342 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4343 CarrySetting = true; 4344 } 4345 4346 // The "cps" instruction can have a interrupt mode operand which is glued into 4347 // the mnemonic. Check if this is the case, split it and parse the imod op 4348 if (Mnemonic.startswith("cps")) { 4349 // Split out any imod code. 4350 unsigned IMod = 4351 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4352 .Case("ie", ARM_PROC::IE) 4353 .Case("id", ARM_PROC::ID) 4354 .Default(~0U); 4355 if (IMod != ~0U) { 4356 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4357 ProcessorIMod = IMod; 4358 } 4359 } 4360 4361 // The "it" instruction has the condition mask on the end of the mnemonic. 4362 if (Mnemonic.startswith("it")) { 4363 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4364 Mnemonic = Mnemonic.slice(0, 2); 4365 } 4366 4367 return Mnemonic; 4368} 4369 4370/// \brief Given a canonical mnemonic, determine if the instruction ever allows 4371/// inclusion of carry set or predication code operands. 4372// 4373// FIXME: It would be nice to autogen this. 4374void ARMAsmParser:: 4375getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4376 bool &CanAcceptPredicationCode) { 4377 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4378 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4379 Mnemonic == "add" || Mnemonic == "adc" || 4380 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4381 Mnemonic == "orr" || Mnemonic == "mvn" || 4382 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4383 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4384 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4385 Mnemonic == "mla" || Mnemonic == "smlal" || 4386 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4387 CanAcceptCarrySet = true; 4388 } else 4389 CanAcceptCarrySet = false; 4390 4391 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4392 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4393 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4394 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4395 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4396 (Mnemonic == "clrex" && !isThumb()) || 4397 (Mnemonic == "nop" && isThumbOne()) || 4398 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4399 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4400 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4401 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4402 !isThumb()) || 4403 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4404 CanAcceptPredicationCode = false; 4405 } else 4406 CanAcceptPredicationCode = true; 4407 4408 if (isThumb()) { 4409 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4410 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4411 CanAcceptPredicationCode = false; 4412 } 4413} 4414 4415bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4416 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4417 // FIXME: This is all horribly hacky. We really need a better way to deal 4418 // with optional operands like this in the matcher table. 4419 4420 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4421 // another does not. Specifically, the MOVW instruction does not. So we 4422 // special case it here and remove the defaulted (non-setting) cc_out 4423 // operand if that's the instruction we're trying to match. 4424 // 4425 // We do this as post-processing of the explicit operands rather than just 4426 // conditionally adding the cc_out in the first place because we need 4427 // to check the type of the parsed immediate operand. 4428 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4429 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4430 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4431 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4432 return true; 4433 4434 // Register-register 'add' for thumb does not have a cc_out operand 4435 // when there are only two register operands. 4436 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4437 static_cast<ARMOperand*>(Operands[3])->isReg() && 4438 static_cast<ARMOperand*>(Operands[4])->isReg() && 4439 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4440 return true; 4441 // Register-register 'add' for thumb does not have a cc_out operand 4442 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4443 // have to check the immediate range here since Thumb2 has a variant 4444 // that can handle a different range and has a cc_out operand. 4445 if (((isThumb() && Mnemonic == "add") || 4446 (isThumbTwo() && Mnemonic == "sub")) && 4447 Operands.size() == 6 && 4448 static_cast<ARMOperand*>(Operands[3])->isReg() && 4449 static_cast<ARMOperand*>(Operands[4])->isReg() && 4450 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4451 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4452 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4453 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4454 return true; 4455 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4456 // imm0_4095 variant. That's the least-preferred variant when 4457 // selecting via the generic "add" mnemonic, so to know that we 4458 // should remove the cc_out operand, we have to explicitly check that 4459 // it's not one of the other variants. Ugh. 4460 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4461 Operands.size() == 6 && 4462 static_cast<ARMOperand*>(Operands[3])->isReg() && 4463 static_cast<ARMOperand*>(Operands[4])->isReg() && 4464 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4465 // Nest conditions rather than one big 'if' statement for readability. 4466 // 4467 // If either register is a high reg, it's either one of the SP 4468 // variants (handled above) or a 32-bit encoding, so we just 4469 // check against T3. 4470 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4471 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4472 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4473 return false; 4474 // If both registers are low, we're in an IT block, and the immediate is 4475 // in range, we should use encoding T1 instead, which has a cc_out. 4476 if (inITBlock() && 4477 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4478 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4479 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4480 return false; 4481 4482 // Otherwise, we use encoding T4, which does not have a cc_out 4483 // operand. 4484 return true; 4485 } 4486 4487 // The thumb2 multiply instruction doesn't have a CCOut register, so 4488 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4489 // use the 16-bit encoding or not. 4490 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4491 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4492 static_cast<ARMOperand*>(Operands[3])->isReg() && 4493 static_cast<ARMOperand*>(Operands[4])->isReg() && 4494 static_cast<ARMOperand*>(Operands[5])->isReg() && 4495 // If the registers aren't low regs, the destination reg isn't the 4496 // same as one of the source regs, or the cc_out operand is zero 4497 // outside of an IT block, we have to use the 32-bit encoding, so 4498 // remove the cc_out operand. 4499 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4500 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4501 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4502 !inITBlock() || 4503 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4504 static_cast<ARMOperand*>(Operands[5])->getReg() && 4505 static_cast<ARMOperand*>(Operands[3])->getReg() != 4506 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4507 return true; 4508 4509 // Also check the 'mul' syntax variant that doesn't specify an explicit 4510 // destination register. 4511 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4512 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4513 static_cast<ARMOperand*>(Operands[3])->isReg() && 4514 static_cast<ARMOperand*>(Operands[4])->isReg() && 4515 // If the registers aren't low regs or the cc_out operand is zero 4516 // outside of an IT block, we have to use the 32-bit encoding, so 4517 // remove the cc_out operand. 4518 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4519 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4520 !inITBlock())) 4521 return true; 4522 4523 4524 4525 // Register-register 'add/sub' for thumb does not have a cc_out operand 4526 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4527 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4528 // right, this will result in better diagnostics (which operand is off) 4529 // anyway. 4530 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4531 (Operands.size() == 5 || Operands.size() == 6) && 4532 static_cast<ARMOperand*>(Operands[3])->isReg() && 4533 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4534 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4535 return true; 4536 4537 return false; 4538} 4539 4540static bool isDataTypeToken(StringRef Tok) { 4541 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4542 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4543 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4544 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4545 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4546 Tok == ".f" || Tok == ".d"; 4547} 4548 4549// FIXME: This bit should probably be handled via an explicit match class 4550// in the .td files that matches the suffix instead of having it be 4551// a literal string token the way it is now. 4552static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4553 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4554} 4555 4556static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features); 4557/// Parse an arm instruction mnemonic followed by its operands. 4558bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4559 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4560 // Apply mnemonic aliases before doing anything else, as the destination 4561 // mnemnonic may include suffices and we want to handle them normally. 4562 // The generic tblgen'erated code does this later, at the start of 4563 // MatchInstructionImpl(), but that's too late for aliases that include 4564 // any sort of suffix. 4565 unsigned AvailableFeatures = getAvailableFeatures(); 4566 applyMnemonicAliases(Name, AvailableFeatures); 4567 4568 // First check for the ARM-specific .req directive. 4569 if (Parser.getTok().is(AsmToken::Identifier) && 4570 Parser.getTok().getIdentifier() == ".req") { 4571 parseDirectiveReq(Name, NameLoc); 4572 // We always return 'error' for this, as we're done with this 4573 // statement and don't need to match the 'instruction." 4574 return true; 4575 } 4576 4577 // Create the leading tokens for the mnemonic, split by '.' characters. 4578 size_t Start = 0, Next = Name.find('.'); 4579 StringRef Mnemonic = Name.slice(Start, Next); 4580 4581 // Split out the predication code and carry setting flag from the mnemonic. 4582 unsigned PredicationCode; 4583 unsigned ProcessorIMod; 4584 bool CarrySetting; 4585 StringRef ITMask; 4586 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4587 ProcessorIMod, ITMask); 4588 4589 // In Thumb1, only the branch (B) instruction can be predicated. 4590 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4591 Parser.EatToEndOfStatement(); 4592 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4593 } 4594 4595 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4596 4597 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4598 // is the mask as it will be for the IT encoding if the conditional 4599 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4600 // where the conditional bit0 is zero, the instruction post-processing 4601 // will adjust the mask accordingly. 4602 if (Mnemonic == "it") { 4603 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4604 if (ITMask.size() > 3) { 4605 Parser.EatToEndOfStatement(); 4606 return Error(Loc, "too many conditions on IT instruction"); 4607 } 4608 unsigned Mask = 8; 4609 for (unsigned i = ITMask.size(); i != 0; --i) { 4610 char pos = ITMask[i - 1]; 4611 if (pos != 't' && pos != 'e') { 4612 Parser.EatToEndOfStatement(); 4613 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4614 } 4615 Mask >>= 1; 4616 if (ITMask[i - 1] == 't') 4617 Mask |= 8; 4618 } 4619 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4620 } 4621 4622 // FIXME: This is all a pretty gross hack. We should automatically handle 4623 // optional operands like this via tblgen. 4624 4625 // Next, add the CCOut and ConditionCode operands, if needed. 4626 // 4627 // For mnemonics which can ever incorporate a carry setting bit or predication 4628 // code, our matching model involves us always generating CCOut and 4629 // ConditionCode operands to match the mnemonic "as written" and then we let 4630 // the matcher deal with finding the right instruction or generating an 4631 // appropriate error. 4632 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4633 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4634 4635 // If we had a carry-set on an instruction that can't do that, issue an 4636 // error. 4637 if (!CanAcceptCarrySet && CarrySetting) { 4638 Parser.EatToEndOfStatement(); 4639 return Error(NameLoc, "instruction '" + Mnemonic + 4640 "' can not set flags, but 's' suffix specified"); 4641 } 4642 // If we had a predication code on an instruction that can't do that, issue an 4643 // error. 4644 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4645 Parser.EatToEndOfStatement(); 4646 return Error(NameLoc, "instruction '" + Mnemonic + 4647 "' is not predicable, but condition code specified"); 4648 } 4649 4650 // Add the carry setting operand, if necessary. 4651 if (CanAcceptCarrySet) { 4652 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4653 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4654 Loc)); 4655 } 4656 4657 // Add the predication code operand, if necessary. 4658 if (CanAcceptPredicationCode) { 4659 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4660 CarrySetting); 4661 Operands.push_back(ARMOperand::CreateCondCode( 4662 ARMCC::CondCodes(PredicationCode), Loc)); 4663 } 4664 4665 // Add the processor imod operand, if necessary. 4666 if (ProcessorIMod) { 4667 Operands.push_back(ARMOperand::CreateImm( 4668 MCConstantExpr::Create(ProcessorIMod, getContext()), 4669 NameLoc, NameLoc)); 4670 } 4671 4672 // Add the remaining tokens in the mnemonic. 4673 while (Next != StringRef::npos) { 4674 Start = Next; 4675 Next = Name.find('.', Start + 1); 4676 StringRef ExtraToken = Name.slice(Start, Next); 4677 4678 // Some NEON instructions have an optional datatype suffix that is 4679 // completely ignored. Check for that. 4680 if (isDataTypeToken(ExtraToken) && 4681 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 4682 continue; 4683 4684 if (ExtraToken != ".n") { 4685 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4686 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4687 } 4688 } 4689 4690 // Read the remaining operands. 4691 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4692 // Read the first operand. 4693 if (parseOperand(Operands, Mnemonic)) { 4694 Parser.EatToEndOfStatement(); 4695 return true; 4696 } 4697 4698 while (getLexer().is(AsmToken::Comma)) { 4699 Parser.Lex(); // Eat the comma. 4700 4701 // Parse and remember the operand. 4702 if (parseOperand(Operands, Mnemonic)) { 4703 Parser.EatToEndOfStatement(); 4704 return true; 4705 } 4706 } 4707 } 4708 4709 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4710 SMLoc Loc = getLexer().getLoc(); 4711 Parser.EatToEndOfStatement(); 4712 return Error(Loc, "unexpected token in argument list"); 4713 } 4714 4715 Parser.Lex(); // Consume the EndOfStatement 4716 4717 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4718 // do and don't have a cc_out optional-def operand. With some spot-checks 4719 // of the operand list, we can figure out which variant we're trying to 4720 // parse and adjust accordingly before actually matching. We shouldn't ever 4721 // try to remove a cc_out operand that was explicitly set on the the 4722 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4723 // table driven matcher doesn't fit well with the ARM instruction set. 4724 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4725 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4726 Operands.erase(Operands.begin() + 1); 4727 delete Op; 4728 } 4729 4730 // ARM mode 'blx' need special handling, as the register operand version 4731 // is predicable, but the label operand version is not. So, we can't rely 4732 // on the Mnemonic based checking to correctly figure out when to put 4733 // a k_CondCode operand in the list. If we're trying to match the label 4734 // version, remove the k_CondCode operand here. 4735 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4736 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4737 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4738 Operands.erase(Operands.begin() + 1); 4739 delete Op; 4740 } 4741 4742 // The vector-compare-to-zero instructions have a literal token "#0" at 4743 // the end that comes to here as an immediate operand. Convert it to a 4744 // token to play nicely with the matcher. 4745 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4746 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4747 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4748 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4749 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4750 if (CE && CE->getValue() == 0) { 4751 Operands.erase(Operands.begin() + 5); 4752 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4753 delete Op; 4754 } 4755 } 4756 // VCMP{E} does the same thing, but with a different operand count. 4757 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4758 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4759 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4760 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4761 if (CE && CE->getValue() == 0) { 4762 Operands.erase(Operands.begin() + 4); 4763 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4764 delete Op; 4765 } 4766 } 4767 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4768 // end. Convert it to a token here. Take care not to convert those 4769 // that should hit the Thumb2 encoding. 4770 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4771 static_cast<ARMOperand*>(Operands[3])->isReg() && 4772 static_cast<ARMOperand*>(Operands[4])->isReg() && 4773 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4774 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4775 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4776 if (CE && CE->getValue() == 0 && 4777 (isThumbOne() || 4778 // The cc_out operand matches the IT block. 4779 ((inITBlock() != CarrySetting) && 4780 // Neither register operand is a high register. 4781 (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4782 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){ 4783 Operands.erase(Operands.begin() + 5); 4784 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4785 delete Op; 4786 } 4787 } 4788 4789 return false; 4790} 4791 4792// Validate context-sensitive operand constraints. 4793 4794// return 'true' if register list contains non-low GPR registers, 4795// 'false' otherwise. If Reg is in the register list or is HiReg, set 4796// 'containsReg' to true. 4797static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4798 unsigned HiReg, bool &containsReg) { 4799 containsReg = false; 4800 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4801 unsigned OpReg = Inst.getOperand(i).getReg(); 4802 if (OpReg == Reg) 4803 containsReg = true; 4804 // Anything other than a low register isn't legal here. 4805 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4806 return true; 4807 } 4808 return false; 4809} 4810 4811// Check if the specified regisgter is in the register list of the inst, 4812// starting at the indicated operand number. 4813static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4814 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4815 unsigned OpReg = Inst.getOperand(i).getReg(); 4816 if (OpReg == Reg) 4817 return true; 4818 } 4819 return false; 4820} 4821 4822// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4823// the ARMInsts array) instead. Getting that here requires awkward 4824// API changes, though. Better way? 4825namespace llvm { 4826extern const MCInstrDesc ARMInsts[]; 4827} 4828static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4829 return ARMInsts[Opcode]; 4830} 4831 4832// FIXME: We would really like to be able to tablegen'erate this. 4833bool ARMAsmParser:: 4834validateInstruction(MCInst &Inst, 4835 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4836 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4837 SMLoc Loc = Operands[0]->getStartLoc(); 4838 // Check the IT block state first. 4839 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4840 // being allowed in IT blocks, but not being predicable. It just always 4841 // executes. 4842 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4843 unsigned bit = 1; 4844 if (ITState.FirstCond) 4845 ITState.FirstCond = false; 4846 else 4847 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4848 // The instruction must be predicable. 4849 if (!MCID.isPredicable()) 4850 return Error(Loc, "instructions in IT block must be predicable"); 4851 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4852 unsigned ITCond = bit ? ITState.Cond : 4853 ARMCC::getOppositeCondition(ITState.Cond); 4854 if (Cond != ITCond) { 4855 // Find the condition code Operand to get its SMLoc information. 4856 SMLoc CondLoc; 4857 for (unsigned i = 1; i < Operands.size(); ++i) 4858 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4859 CondLoc = Operands[i]->getStartLoc(); 4860 return Error(CondLoc, "incorrect condition in IT block; got '" + 4861 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4862 "', but expected '" + 4863 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4864 } 4865 // Check for non-'al' condition codes outside of the IT block. 4866 } else if (isThumbTwo() && MCID.isPredicable() && 4867 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4868 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4869 Inst.getOpcode() != ARM::t2B) 4870 return Error(Loc, "predicated instructions must be in IT block"); 4871 4872 switch (Inst.getOpcode()) { 4873 case ARM::LDRD: 4874 case ARM::LDRD_PRE: 4875 case ARM::LDRD_POST: 4876 case ARM::LDREXD: { 4877 // Rt2 must be Rt + 1. 4878 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4879 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4880 if (Rt2 != Rt + 1) 4881 return Error(Operands[3]->getStartLoc(), 4882 "destination operands must be sequential"); 4883 return false; 4884 } 4885 case ARM::STRD: { 4886 // Rt2 must be Rt + 1. 4887 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4888 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4889 if (Rt2 != Rt + 1) 4890 return Error(Operands[3]->getStartLoc(), 4891 "source operands must be sequential"); 4892 return false; 4893 } 4894 case ARM::STRD_PRE: 4895 case ARM::STRD_POST: 4896 case ARM::STREXD: { 4897 // Rt2 must be Rt + 1. 4898 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4899 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4900 if (Rt2 != Rt + 1) 4901 return Error(Operands[3]->getStartLoc(), 4902 "source operands must be sequential"); 4903 return false; 4904 } 4905 case ARM::SBFX: 4906 case ARM::UBFX: { 4907 // width must be in range [1, 32-lsb] 4908 unsigned lsb = Inst.getOperand(2).getImm(); 4909 unsigned widthm1 = Inst.getOperand(3).getImm(); 4910 if (widthm1 >= 32 - lsb) 4911 return Error(Operands[5]->getStartLoc(), 4912 "bitfield width must be in range [1,32-lsb]"); 4913 return false; 4914 } 4915 case ARM::tLDMIA: { 4916 // If we're parsing Thumb2, the .w variant is available and handles 4917 // most cases that are normally illegal for a Thumb1 LDM 4918 // instruction. We'll make the transformation in processInstruction() 4919 // if necessary. 4920 // 4921 // Thumb LDM instructions are writeback iff the base register is not 4922 // in the register list. 4923 unsigned Rn = Inst.getOperand(0).getReg(); 4924 bool hasWritebackToken = 4925 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4926 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4927 bool listContainsBase; 4928 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4929 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4930 "registers must be in range r0-r7"); 4931 // If we should have writeback, then there should be a '!' token. 4932 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4933 return Error(Operands[2]->getStartLoc(), 4934 "writeback operator '!' expected"); 4935 // If we should not have writeback, there must not be a '!'. This is 4936 // true even for the 32-bit wide encodings. 4937 if (listContainsBase && hasWritebackToken) 4938 return Error(Operands[3]->getStartLoc(), 4939 "writeback operator '!' not allowed when base register " 4940 "in register list"); 4941 4942 break; 4943 } 4944 case ARM::t2LDMIA_UPD: { 4945 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4946 return Error(Operands[4]->getStartLoc(), 4947 "writeback operator '!' not allowed when base register " 4948 "in register list"); 4949 break; 4950 } 4951 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 4952 // so only issue a diagnostic for thumb1. The instructions will be 4953 // switched to the t2 encodings in processInstruction() if necessary. 4954 case ARM::tPOP: { 4955 bool listContainsBase; 4956 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 4957 !isThumbTwo()) 4958 return Error(Operands[2]->getStartLoc(), 4959 "registers must be in range r0-r7 or pc"); 4960 break; 4961 } 4962 case ARM::tPUSH: { 4963 bool listContainsBase; 4964 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 4965 !isThumbTwo()) 4966 return Error(Operands[2]->getStartLoc(), 4967 "registers must be in range r0-r7 or lr"); 4968 break; 4969 } 4970 case ARM::tSTMIA_UPD: { 4971 bool listContainsBase; 4972 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4973 return Error(Operands[4]->getStartLoc(), 4974 "registers must be in range r0-r7"); 4975 break; 4976 } 4977 } 4978 4979 return false; 4980} 4981 4982static unsigned getRealVSTLNOpcode(unsigned Opc) { 4983 switch(Opc) { 4984 default: assert(0 && "unexpected opcode!"); 4985 // VST1LN 4986 case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8: 4987 case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8: 4988 case ARM::VST1LNdWB_fixed_Asm_U8: 4989 return ARM::VST1LNd8_UPD; 4990 case ARM::VST1LNdWB_fixed_Asm_16: case ARM::VST1LNdWB_fixed_Asm_P16: 4991 case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16: 4992 case ARM::VST1LNdWB_fixed_Asm_U16: 4993 return ARM::VST1LNd16_UPD; 4994 case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F: 4995 case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32: 4996 case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: 4997 return ARM::VST1LNd32_UPD; 4998 case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8: 4999 case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8: 5000 case ARM::VST1LNdWB_register_Asm_U8: 5001 return ARM::VST1LNd8_UPD; 5002 case ARM::VST1LNdWB_register_Asm_16: case ARM::VST1LNdWB_register_Asm_P16: 5003 case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16: 5004 case ARM::VST1LNdWB_register_Asm_U16: 5005 return ARM::VST1LNd16_UPD; 5006 case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F: 5007 case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32: 5008 case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: 5009 return ARM::VST1LNd32_UPD; 5010 case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: 5011 case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8: 5012 case ARM::VST1LNdAsm_U8: 5013 return ARM::VST1LNd8; 5014 case ARM::VST1LNdAsm_16: case ARM::VST1LNdAsm_P16: 5015 case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16: 5016 case ARM::VST1LNdAsm_U16: 5017 return ARM::VST1LNd16; 5018 case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F: 5019 case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: 5020 case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32: 5021 return ARM::VST1LNd32; 5022 5023 // VST2LN 5024 case ARM::VST2LNdWB_fixed_Asm_8: case ARM::VST2LNdWB_fixed_Asm_P8: 5025 case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8: 5026 case ARM::VST2LNdWB_fixed_Asm_U8: 5027 return ARM::VST2LNd8_UPD; 5028 case ARM::VST2LNdWB_fixed_Asm_16: case ARM::VST2LNdWB_fixed_Asm_P16: 5029 case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16: 5030 case ARM::VST2LNdWB_fixed_Asm_U16: 5031 return ARM::VST2LNd16_UPD; 5032 case ARM::VST2LNdWB_fixed_Asm_32: case ARM::VST2LNdWB_fixed_Asm_F: 5033 case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32: 5034 case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32: 5035 return ARM::VST2LNd32_UPD; 5036 case ARM::VST2LNdWB_register_Asm_8: case ARM::VST2LNdWB_register_Asm_P8: 5037 case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8: 5038 case ARM::VST2LNdWB_register_Asm_U8: 5039 return ARM::VST2LNd8_UPD; 5040 case ARM::VST2LNdWB_register_Asm_16: case ARM::VST2LNdWB_register_Asm_P16: 5041 case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16: 5042 case ARM::VST2LNdWB_register_Asm_U16: 5043 return ARM::VST2LNd16_UPD; 5044 case ARM::VST2LNdWB_register_Asm_32: case ARM::VST2LNdWB_register_Asm_F: 5045 case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32: 5046 case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32: 5047 return ARM::VST2LNd32_UPD; 5048 case ARM::VST2LNdAsm_8: case ARM::VST2LNdAsm_P8: 5049 case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8: 5050 case ARM::VST2LNdAsm_U8: 5051 return ARM::VST2LNd8; 5052 case ARM::VST2LNdAsm_16: case ARM::VST2LNdAsm_P16: 5053 case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16: 5054 case ARM::VST2LNdAsm_U16: 5055 return ARM::VST2LNd16; 5056 case ARM::VST2LNdAsm_32: case ARM::VST2LNdAsm_F: 5057 case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: 5058 case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32: 5059 return ARM::VST2LNd32; 5060 } 5061} 5062 5063static unsigned getRealVLDLNOpcode(unsigned Opc) { 5064 switch(Opc) { 5065 default: assert(0 && "unexpected opcode!"); 5066 // VLD1LN 5067 case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8: 5068 case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8: 5069 case ARM::VLD1LNdWB_fixed_Asm_U8: 5070 return ARM::VLD1LNd8_UPD; 5071 case ARM::VLD1LNdWB_fixed_Asm_16: case ARM::VLD1LNdWB_fixed_Asm_P16: 5072 case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16: 5073 case ARM::VLD1LNdWB_fixed_Asm_U16: 5074 return ARM::VLD1LNd16_UPD; 5075 case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F: 5076 case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32: 5077 case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: 5078 return ARM::VLD1LNd32_UPD; 5079 case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8: 5080 case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8: 5081 case ARM::VLD1LNdWB_register_Asm_U8: 5082 return ARM::VLD1LNd8_UPD; 5083 case ARM::VLD1LNdWB_register_Asm_16: case ARM::VLD1LNdWB_register_Asm_P16: 5084 case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16: 5085 case ARM::VLD1LNdWB_register_Asm_U16: 5086 return ARM::VLD1LNd16_UPD; 5087 case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F: 5088 case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32: 5089 case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: 5090 return ARM::VLD1LNd32_UPD; 5091 case ARM::VLD1LNdAsm_8: case ARM::VLD1LNdAsm_P8: 5092 case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8: 5093 case ARM::VLD1LNdAsm_U8: 5094 return ARM::VLD1LNd8; 5095 case ARM::VLD1LNdAsm_16: case ARM::VLD1LNdAsm_P16: 5096 case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16: 5097 case ARM::VLD1LNdAsm_U16: 5098 return ARM::VLD1LNd16; 5099 case ARM::VLD1LNdAsm_32: case ARM::VLD1LNdAsm_F: 5100 case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: 5101 case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32: 5102 return ARM::VLD1LNd32; 5103 5104 // VLD2LN 5105 case ARM::VLD2LNdWB_fixed_Asm_8: case ARM::VLD2LNdWB_fixed_Asm_P8: 5106 case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8: 5107 case ARM::VLD2LNdWB_fixed_Asm_U8: 5108 return ARM::VLD2LNd8_UPD; 5109 case ARM::VLD2LNdWB_fixed_Asm_16: case ARM::VLD2LNdWB_fixed_Asm_P16: 5110 case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16: 5111 case ARM::VLD2LNdWB_fixed_Asm_U16: 5112 return ARM::VLD2LNd16_UPD; 5113 case ARM::VLD2LNdWB_fixed_Asm_32: case ARM::VLD2LNdWB_fixed_Asm_F: 5114 case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32: 5115 case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32: 5116 return ARM::VLD2LNd32_UPD; 5117 case ARM::VLD2LNdWB_register_Asm_8: case ARM::VLD2LNdWB_register_Asm_P8: 5118 case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8: 5119 case ARM::VLD2LNdWB_register_Asm_U8: 5120 return ARM::VLD2LNd8_UPD; 5121 case ARM::VLD2LNdWB_register_Asm_16: case ARM::VLD2LNdWB_register_Asm_P16: 5122 case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16: 5123 case ARM::VLD2LNdWB_register_Asm_U16: 5124 return ARM::VLD2LNd16_UPD; 5125 case ARM::VLD2LNdWB_register_Asm_32: case ARM::VLD2LNdWB_register_Asm_F: 5126 case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32: 5127 case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32: 5128 return ARM::VLD2LNd32_UPD; 5129 case ARM::VLD2LNdAsm_8: case ARM::VLD2LNdAsm_P8: 5130 case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8: 5131 case ARM::VLD2LNdAsm_U8: 5132 return ARM::VLD2LNd8; 5133 case ARM::VLD2LNdAsm_16: case ARM::VLD2LNdAsm_P16: 5134 case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16: 5135 case ARM::VLD2LNdAsm_U16: 5136 return ARM::VLD2LNd16; 5137 case ARM::VLD2LNdAsm_32: case ARM::VLD2LNdAsm_F: 5138 case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: 5139 case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32: 5140 return ARM::VLD2LNd32; 5141 } 5142} 5143 5144bool ARMAsmParser:: 5145processInstruction(MCInst &Inst, 5146 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5147 switch (Inst.getOpcode()) { 5148 // Handle NEON VST complex aliases. 5149 case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8: 5150 case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8: 5151 case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16: 5152 case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16: 5153 case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16: 5154 case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F: 5155 case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32: 5156 case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: { 5157 MCInst TmpInst; 5158 // Shuffle the operands around so the lane index operand is in the 5159 // right place. 5160 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5161 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5162 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5163 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5164 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5165 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5166 TmpInst.addOperand(Inst.getOperand(1)); // lane 5167 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5168 TmpInst.addOperand(Inst.getOperand(6)); 5169 Inst = TmpInst; 5170 return true; 5171 } 5172 5173 case ARM::VST2LNdWB_register_Asm_8: case ARM::VST2LNdWB_register_Asm_P8: 5174 case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8: 5175 case ARM::VST2LNdWB_register_Asm_U8: case ARM::VST2LNdWB_register_Asm_16: 5176 case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16: 5177 case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16: 5178 case ARM::VST2LNdWB_register_Asm_32: case ARM::VST2LNdWB_register_Asm_F: 5179 case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32: 5180 case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32: { 5181 MCInst TmpInst; 5182 // Shuffle the operands around so the lane index operand is in the 5183 // right place. 5184 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5185 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5186 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5187 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5188 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5189 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5190 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5191 TmpInst.addOperand(Inst.getOperand(1)); // lane 5192 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5193 TmpInst.addOperand(Inst.getOperand(6)); 5194 Inst = TmpInst; 5195 return true; 5196 } 5197 case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8: 5198 case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8: 5199 case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16: 5200 case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16: 5201 case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16: 5202 case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F: 5203 case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32: 5204 case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: { 5205 MCInst TmpInst; 5206 // Shuffle the operands around so the lane index operand is in the 5207 // right place. 5208 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5209 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5210 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5211 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5212 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5213 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5214 TmpInst.addOperand(Inst.getOperand(1)); // lane 5215 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5216 TmpInst.addOperand(Inst.getOperand(5)); 5217 Inst = TmpInst; 5218 return true; 5219 } 5220 5221 case ARM::VST2LNdWB_fixed_Asm_8: case ARM::VST2LNdWB_fixed_Asm_P8: 5222 case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8: 5223 case ARM::VST2LNdWB_fixed_Asm_U8: case ARM::VST2LNdWB_fixed_Asm_16: 5224 case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16: 5225 case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16: 5226 case ARM::VST2LNdWB_fixed_Asm_32: case ARM::VST2LNdWB_fixed_Asm_F: 5227 case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32: 5228 case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32: { 5229 MCInst TmpInst; 5230 // Shuffle the operands around so the lane index operand is in the 5231 // right place. 5232 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5233 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5234 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5235 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5236 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5237 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5238 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5239 TmpInst.addOperand(Inst.getOperand(1)); // lane 5240 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5241 TmpInst.addOperand(Inst.getOperand(5)); 5242 Inst = TmpInst; 5243 return true; 5244 } 5245 case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8: 5246 case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16: 5247 case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16: 5248 case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F: 5249 case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32: 5250 case ARM::VST1LNdAsm_U32: { 5251 MCInst TmpInst; 5252 // Shuffle the operands around so the lane index operand is in the 5253 // right place. 5254 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5255 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5256 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5257 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5258 TmpInst.addOperand(Inst.getOperand(1)); // lane 5259 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5260 TmpInst.addOperand(Inst.getOperand(5)); 5261 Inst = TmpInst; 5262 return true; 5263 } 5264 5265 case ARM::VST2LNdAsm_8: case ARM::VST2LNdAsm_P8: case ARM::VST2LNdAsm_I8: 5266 case ARM::VST2LNdAsm_S8: case ARM::VST2LNdAsm_U8: case ARM::VST2LNdAsm_16: 5267 case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16: 5268 case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32: case ARM::VST2LNdAsm_F: 5269 case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32: 5270 case ARM::VST2LNdAsm_U32: { 5271 MCInst TmpInst; 5272 // Shuffle the operands around so the lane index operand is in the 5273 // right place. 5274 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5275 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5276 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5277 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5278 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5279 TmpInst.addOperand(Inst.getOperand(1)); // lane 5280 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5281 TmpInst.addOperand(Inst.getOperand(5)); 5282 Inst = TmpInst; 5283 return true; 5284 } 5285 // Handle NEON VLD complex aliases. 5286 case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8: 5287 case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8: 5288 case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16: 5289 case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16: 5290 case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16: 5291 case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F: 5292 case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32: 5293 case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: { 5294 MCInst TmpInst; 5295 // Shuffle the operands around so the lane index operand is in the 5296 // right place. 5297 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5298 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5299 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5300 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5301 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5302 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5303 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5304 TmpInst.addOperand(Inst.getOperand(1)); // lane 5305 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5306 TmpInst.addOperand(Inst.getOperand(6)); 5307 Inst = TmpInst; 5308 return true; 5309 } 5310 5311 case ARM::VLD2LNdWB_register_Asm_8: case ARM::VLD2LNdWB_register_Asm_P8: 5312 case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8: 5313 case ARM::VLD2LNdWB_register_Asm_U8: case ARM::VLD2LNdWB_register_Asm_16: 5314 case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16: 5315 case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16: 5316 case ARM::VLD2LNdWB_register_Asm_32: case ARM::VLD2LNdWB_register_Asm_F: 5317 case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32: 5318 case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32: { 5319 MCInst TmpInst; 5320 // Shuffle the operands around so the lane index operand is in the 5321 // right place. 5322 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5323 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5324 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5325 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5326 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5327 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5328 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5329 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5330 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5331 TmpInst.addOperand(Inst.getOperand(1)); // lane 5332 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5333 TmpInst.addOperand(Inst.getOperand(6)); 5334 Inst = TmpInst; 5335 return true; 5336 } 5337 5338 case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8: 5339 case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8: 5340 case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16: 5341 case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16: 5342 case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16: 5343 case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F: 5344 case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32: 5345 case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: { 5346 MCInst TmpInst; 5347 // Shuffle the operands around so the lane index operand is in the 5348 // right place. 5349 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5350 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5351 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5352 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5353 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5354 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5355 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5356 TmpInst.addOperand(Inst.getOperand(1)); // lane 5357 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5358 TmpInst.addOperand(Inst.getOperand(5)); 5359 Inst = TmpInst; 5360 return true; 5361 } 5362 5363 case ARM::VLD2LNdWB_fixed_Asm_8: case ARM::VLD2LNdWB_fixed_Asm_P8: 5364 case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8: 5365 case ARM::VLD2LNdWB_fixed_Asm_U8: case ARM::VLD2LNdWB_fixed_Asm_16: 5366 case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16: 5367 case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16: 5368 case ARM::VLD2LNdWB_fixed_Asm_32: case ARM::VLD2LNdWB_fixed_Asm_F: 5369 case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32: 5370 case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32: { 5371 MCInst TmpInst; 5372 // Shuffle the operands around so the lane index operand is in the 5373 // right place. 5374 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5375 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5376 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5377 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5378 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5379 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5380 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5381 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5382 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5383 TmpInst.addOperand(Inst.getOperand(1)); // lane 5384 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5385 TmpInst.addOperand(Inst.getOperand(5)); 5386 Inst = TmpInst; 5387 return true; 5388 } 5389 5390 case ARM::VLD1LNdAsm_8: case ARM::VLD1LNdAsm_P8: case ARM::VLD1LNdAsm_I8: 5391 case ARM::VLD1LNdAsm_S8: case ARM::VLD1LNdAsm_U8: case ARM::VLD1LNdAsm_16: 5392 case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16: 5393 case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32: case ARM::VLD1LNdAsm_F: 5394 case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32: 5395 case ARM::VLD1LNdAsm_U32: { 5396 MCInst TmpInst; 5397 // Shuffle the operands around so the lane index operand is in the 5398 // right place. 5399 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5400 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5401 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5402 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5403 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5404 TmpInst.addOperand(Inst.getOperand(1)); // lane 5405 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5406 TmpInst.addOperand(Inst.getOperand(5)); 5407 Inst = TmpInst; 5408 return true; 5409 } 5410 5411 case ARM::VLD2LNdAsm_8: case ARM::VLD2LNdAsm_P8: case ARM::VLD2LNdAsm_I8: 5412 case ARM::VLD2LNdAsm_S8: case ARM::VLD2LNdAsm_U8: case ARM::VLD2LNdAsm_16: 5413 case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16: 5414 case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32: case ARM::VLD2LNdAsm_F: 5415 case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32: 5416 case ARM::VLD2LNdAsm_U32: { 5417 MCInst TmpInst; 5418 // Shuffle the operands around so the lane index operand is in the 5419 // right place. 5420 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5421 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5422 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5423 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5424 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5425 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5426 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1)); 5427 TmpInst.addOperand(Inst.getOperand(1)); // lane 5428 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5429 TmpInst.addOperand(Inst.getOperand(5)); 5430 Inst = TmpInst; 5431 return true; 5432 } 5433 // Handle the Thumb2 mode MOV complex aliases. 5434 case ARM::t2MOVsi: 5435 case ARM::t2MOVSsi: { 5436 // Which instruction to expand to depends on the CCOut operand and 5437 // whether we're in an IT block if the register operands are low 5438 // registers. 5439 bool isNarrow = false; 5440 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5441 isARMLowRegister(Inst.getOperand(1).getReg()) && 5442 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi)) 5443 isNarrow = true; 5444 MCInst TmpInst; 5445 unsigned newOpc; 5446 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) { 5447 default: llvm_unreachable("unexpected opcode!"); 5448 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; 5449 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; 5450 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; 5451 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; 5452 } 5453 unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); 5454 if (Ammount == 32) Ammount = 0; 5455 TmpInst.setOpcode(newOpc); 5456 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5457 if (isNarrow) 5458 TmpInst.addOperand(MCOperand::CreateReg( 5459 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 5460 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5461 TmpInst.addOperand(MCOperand::CreateImm(Ammount)); 5462 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5463 TmpInst.addOperand(Inst.getOperand(4)); 5464 if (!isNarrow) 5465 TmpInst.addOperand(MCOperand::CreateReg( 5466 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 5467 Inst = TmpInst; 5468 return true; 5469 } 5470 // Handle the ARM mode MOV complex aliases. 5471 case ARM::ASRr: 5472 case ARM::LSRr: 5473 case ARM::LSLr: 5474 case ARM::RORr: { 5475 ARM_AM::ShiftOpc ShiftTy; 5476 switch(Inst.getOpcode()) { 5477 default: llvm_unreachable("unexpected opcode!"); 5478 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 5479 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 5480 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 5481 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 5482 } 5483 // A shift by zero is a plain MOVr, not a MOVsi. 5484 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 5485 MCInst TmpInst; 5486 TmpInst.setOpcode(ARM::MOVsr); 5487 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5488 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5489 TmpInst.addOperand(Inst.getOperand(2)); // Rm 5490 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5491 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5492 TmpInst.addOperand(Inst.getOperand(4)); 5493 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5494 Inst = TmpInst; 5495 return true; 5496 } 5497 case ARM::ASRi: 5498 case ARM::LSRi: 5499 case ARM::LSLi: 5500 case ARM::RORi: { 5501 ARM_AM::ShiftOpc ShiftTy; 5502 switch(Inst.getOpcode()) { 5503 default: llvm_unreachable("unexpected opcode!"); 5504 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 5505 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 5506 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 5507 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 5508 } 5509 // A shift by zero is a plain MOVr, not a MOVsi. 5510 unsigned Amt = Inst.getOperand(2).getImm(); 5511 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 5512 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 5513 MCInst TmpInst; 5514 TmpInst.setOpcode(Opc); 5515 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5516 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5517 if (Opc == ARM::MOVsi) 5518 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5519 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5520 TmpInst.addOperand(Inst.getOperand(4)); 5521 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5522 Inst = TmpInst; 5523 return true; 5524 } 5525 case ARM::RRXi: { 5526 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 5527 MCInst TmpInst; 5528 TmpInst.setOpcode(ARM::MOVsi); 5529 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5530 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5531 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5532 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5533 TmpInst.addOperand(Inst.getOperand(3)); 5534 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 5535 Inst = TmpInst; 5536 return true; 5537 } 5538 case ARM::t2LDMIA_UPD: { 5539 // If this is a load of a single register, then we should use 5540 // a post-indexed LDR instruction instead, per the ARM ARM. 5541 if (Inst.getNumOperands() != 5) 5542 return false; 5543 MCInst TmpInst; 5544 TmpInst.setOpcode(ARM::t2LDR_POST); 5545 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5546 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5547 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5548 TmpInst.addOperand(MCOperand::CreateImm(4)); 5549 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5550 TmpInst.addOperand(Inst.getOperand(3)); 5551 Inst = TmpInst; 5552 return true; 5553 } 5554 case ARM::t2STMDB_UPD: { 5555 // If this is a store of a single register, then we should use 5556 // a pre-indexed STR instruction instead, per the ARM ARM. 5557 if (Inst.getNumOperands() != 5) 5558 return false; 5559 MCInst TmpInst; 5560 TmpInst.setOpcode(ARM::t2STR_PRE); 5561 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5562 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5563 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5564 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5565 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5566 TmpInst.addOperand(Inst.getOperand(3)); 5567 Inst = TmpInst; 5568 return true; 5569 } 5570 case ARM::LDMIA_UPD: 5571 // If this is a load of a single register via a 'pop', then we should use 5572 // a post-indexed LDR instruction instead, per the ARM ARM. 5573 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 5574 Inst.getNumOperands() == 5) { 5575 MCInst TmpInst; 5576 TmpInst.setOpcode(ARM::LDR_POST_IMM); 5577 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5578 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5579 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5580 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 5581 TmpInst.addOperand(MCOperand::CreateImm(4)); 5582 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5583 TmpInst.addOperand(Inst.getOperand(3)); 5584 Inst = TmpInst; 5585 return true; 5586 } 5587 break; 5588 case ARM::STMDB_UPD: 5589 // If this is a store of a single register via a 'push', then we should use 5590 // a pre-indexed STR instruction instead, per the ARM ARM. 5591 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 5592 Inst.getNumOperands() == 5) { 5593 MCInst TmpInst; 5594 TmpInst.setOpcode(ARM::STR_PRE_IMM); 5595 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5596 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5597 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 5598 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5599 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5600 TmpInst.addOperand(Inst.getOperand(3)); 5601 Inst = TmpInst; 5602 } 5603 break; 5604 case ARM::t2ADDri12: 5605 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 5606 // mnemonic was used (not "addw"), encoding T3 is preferred. 5607 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" || 5608 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5609 break; 5610 Inst.setOpcode(ARM::t2ADDri); 5611 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5612 break; 5613 case ARM::t2SUBri12: 5614 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 5615 // mnemonic was used (not "subw"), encoding T3 is preferred. 5616 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" || 5617 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5618 break; 5619 Inst.setOpcode(ARM::t2SUBri); 5620 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5621 break; 5622 case ARM::tADDi8: 5623 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5624 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5625 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5626 // to encoding T1 if <Rd> is omitted." 5627 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5628 Inst.setOpcode(ARM::tADDi3); 5629 return true; 5630 } 5631 break; 5632 case ARM::tSUBi8: 5633 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5634 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5635 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5636 // to encoding T1 if <Rd> is omitted." 5637 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5638 Inst.setOpcode(ARM::tSUBi3); 5639 return true; 5640 } 5641 break; 5642 case ARM::t2ADDrr: { 5643 // If the destination and first source operand are the same, and 5644 // there's no setting of the flags, use encoding T2 instead of T3. 5645 // Note that this is only for ADD, not SUB. This mirrors the system 5646 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 5647 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 5648 Inst.getOperand(5).getReg() != 0 || 5649 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5650 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 5651 break; 5652 MCInst TmpInst; 5653 TmpInst.setOpcode(ARM::tADDhirr); 5654 TmpInst.addOperand(Inst.getOperand(0)); 5655 TmpInst.addOperand(Inst.getOperand(0)); 5656 TmpInst.addOperand(Inst.getOperand(2)); 5657 TmpInst.addOperand(Inst.getOperand(3)); 5658 TmpInst.addOperand(Inst.getOperand(4)); 5659 Inst = TmpInst; 5660 return true; 5661 } 5662 case ARM::tB: 5663 // A Thumb conditional branch outside of an IT block is a tBcc. 5664 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 5665 Inst.setOpcode(ARM::tBcc); 5666 return true; 5667 } 5668 break; 5669 case ARM::t2B: 5670 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 5671 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 5672 Inst.setOpcode(ARM::t2Bcc); 5673 return true; 5674 } 5675 break; 5676 case ARM::t2Bcc: 5677 // If the conditional is AL or we're in an IT block, we really want t2B. 5678 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 5679 Inst.setOpcode(ARM::t2B); 5680 return true; 5681 } 5682 break; 5683 case ARM::tBcc: 5684 // If the conditional is AL, we really want tB. 5685 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 5686 Inst.setOpcode(ARM::tB); 5687 return true; 5688 } 5689 break; 5690 case ARM::tLDMIA: { 5691 // If the register list contains any high registers, or if the writeback 5692 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 5693 // instead if we're in Thumb2. Otherwise, this should have generated 5694 // an error in validateInstruction(). 5695 unsigned Rn = Inst.getOperand(0).getReg(); 5696 bool hasWritebackToken = 5697 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5698 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 5699 bool listContainsBase; 5700 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 5701 (!listContainsBase && !hasWritebackToken) || 5702 (listContainsBase && hasWritebackToken)) { 5703 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5704 assert (isThumbTwo()); 5705 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 5706 // If we're switching to the updating version, we need to insert 5707 // the writeback tied operand. 5708 if (hasWritebackToken) 5709 Inst.insert(Inst.begin(), 5710 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 5711 return true; 5712 } 5713 break; 5714 } 5715 case ARM::tSTMIA_UPD: { 5716 // If the register list contains any high registers, we need to use 5717 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5718 // should have generated an error in validateInstruction(). 5719 unsigned Rn = Inst.getOperand(0).getReg(); 5720 bool listContainsBase; 5721 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 5722 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5723 assert (isThumbTwo()); 5724 Inst.setOpcode(ARM::t2STMIA_UPD); 5725 return true; 5726 } 5727 break; 5728 } 5729 case ARM::tPOP: { 5730 bool listContainsBase; 5731 // If the register list contains any high registers, we need to use 5732 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5733 // should have generated an error in validateInstruction(). 5734 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 5735 return false; 5736 assert (isThumbTwo()); 5737 Inst.setOpcode(ARM::t2LDMIA_UPD); 5738 // Add the base register and writeback operands. 5739 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5740 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5741 return true; 5742 } 5743 case ARM::tPUSH: { 5744 bool listContainsBase; 5745 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 5746 return false; 5747 assert (isThumbTwo()); 5748 Inst.setOpcode(ARM::t2STMDB_UPD); 5749 // Add the base register and writeback operands. 5750 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5751 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5752 return true; 5753 } 5754 case ARM::t2MOVi: { 5755 // If we can use the 16-bit encoding and the user didn't explicitly 5756 // request the 32-bit variant, transform it here. 5757 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5758 Inst.getOperand(1).getImm() <= 255 && 5759 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 5760 Inst.getOperand(4).getReg() == ARM::CPSR) || 5761 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 5762 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5763 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5764 // The operands aren't in the same order for tMOVi8... 5765 MCInst TmpInst; 5766 TmpInst.setOpcode(ARM::tMOVi8); 5767 TmpInst.addOperand(Inst.getOperand(0)); 5768 TmpInst.addOperand(Inst.getOperand(4)); 5769 TmpInst.addOperand(Inst.getOperand(1)); 5770 TmpInst.addOperand(Inst.getOperand(2)); 5771 TmpInst.addOperand(Inst.getOperand(3)); 5772 Inst = TmpInst; 5773 return true; 5774 } 5775 break; 5776 } 5777 case ARM::t2MOVr: { 5778 // If we can use the 16-bit encoding and the user didn't explicitly 5779 // request the 32-bit variant, transform it here. 5780 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5781 isARMLowRegister(Inst.getOperand(1).getReg()) && 5782 Inst.getOperand(2).getImm() == ARMCC::AL && 5783 Inst.getOperand(4).getReg() == ARM::CPSR && 5784 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5785 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5786 // The operands aren't the same for tMOV[S]r... (no cc_out) 5787 MCInst TmpInst; 5788 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 5789 TmpInst.addOperand(Inst.getOperand(0)); 5790 TmpInst.addOperand(Inst.getOperand(1)); 5791 TmpInst.addOperand(Inst.getOperand(2)); 5792 TmpInst.addOperand(Inst.getOperand(3)); 5793 Inst = TmpInst; 5794 return true; 5795 } 5796 break; 5797 } 5798 case ARM::t2SXTH: 5799 case ARM::t2SXTB: 5800 case ARM::t2UXTH: 5801 case ARM::t2UXTB: { 5802 // If we can use the 16-bit encoding and the user didn't explicitly 5803 // request the 32-bit variant, transform it here. 5804 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5805 isARMLowRegister(Inst.getOperand(1).getReg()) && 5806 Inst.getOperand(2).getImm() == 0 && 5807 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5808 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5809 unsigned NewOpc; 5810 switch (Inst.getOpcode()) { 5811 default: llvm_unreachable("Illegal opcode!"); 5812 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 5813 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 5814 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 5815 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 5816 } 5817 // The operands aren't the same for thumb1 (no rotate operand). 5818 MCInst TmpInst; 5819 TmpInst.setOpcode(NewOpc); 5820 TmpInst.addOperand(Inst.getOperand(0)); 5821 TmpInst.addOperand(Inst.getOperand(1)); 5822 TmpInst.addOperand(Inst.getOperand(3)); 5823 TmpInst.addOperand(Inst.getOperand(4)); 5824 Inst = TmpInst; 5825 return true; 5826 } 5827 break; 5828 } 5829 case ARM::t2IT: { 5830 // The mask bits for all but the first condition are represented as 5831 // the low bit of the condition code value implies 't'. We currently 5832 // always have 1 implies 't', so XOR toggle the bits if the low bit 5833 // of the condition code is zero. The encoding also expects the low 5834 // bit of the condition to be encoded as bit 4 of the mask operand, 5835 // so mask that in if needed 5836 MCOperand &MO = Inst.getOperand(1); 5837 unsigned Mask = MO.getImm(); 5838 unsigned OrigMask = Mask; 5839 unsigned TZ = CountTrailingZeros_32(Mask); 5840 if ((Inst.getOperand(0).getImm() & 1) == 0) { 5841 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 5842 for (unsigned i = 3; i != TZ; --i) 5843 Mask ^= 1 << i; 5844 } else 5845 Mask |= 0x10; 5846 MO.setImm(Mask); 5847 5848 // Set up the IT block state according to the IT instruction we just 5849 // matched. 5850 assert(!inITBlock() && "nested IT blocks?!"); 5851 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 5852 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 5853 ITState.CurPosition = 0; 5854 ITState.FirstCond = true; 5855 break; 5856 } 5857 } 5858 return false; 5859} 5860 5861unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 5862 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 5863 // suffix depending on whether they're in an IT block or not. 5864 unsigned Opc = Inst.getOpcode(); 5865 const MCInstrDesc &MCID = getInstDesc(Opc); 5866 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 5867 assert(MCID.hasOptionalDef() && 5868 "optionally flag setting instruction missing optional def operand"); 5869 assert(MCID.NumOperands == Inst.getNumOperands() && 5870 "operand count mismatch!"); 5871 // Find the optional-def operand (cc_out). 5872 unsigned OpNo; 5873 for (OpNo = 0; 5874 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 5875 ++OpNo) 5876 ; 5877 // If we're parsing Thumb1, reject it completely. 5878 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 5879 return Match_MnemonicFail; 5880 // If we're parsing Thumb2, which form is legal depends on whether we're 5881 // in an IT block. 5882 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 5883 !inITBlock()) 5884 return Match_RequiresITBlock; 5885 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 5886 inITBlock()) 5887 return Match_RequiresNotITBlock; 5888 } 5889 // Some high-register supporting Thumb1 encodings only allow both registers 5890 // to be from r0-r7 when in Thumb2. 5891 else if (Opc == ARM::tADDhirr && isThumbOne() && 5892 isARMLowRegister(Inst.getOperand(1).getReg()) && 5893 isARMLowRegister(Inst.getOperand(2).getReg())) 5894 return Match_RequiresThumb2; 5895 // Others only require ARMv6 or later. 5896 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 5897 isARMLowRegister(Inst.getOperand(0).getReg()) && 5898 isARMLowRegister(Inst.getOperand(1).getReg())) 5899 return Match_RequiresV6; 5900 return Match_Success; 5901} 5902 5903bool ARMAsmParser:: 5904MatchAndEmitInstruction(SMLoc IDLoc, 5905 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 5906 MCStreamer &Out) { 5907 MCInst Inst; 5908 unsigned ErrorInfo; 5909 unsigned MatchResult; 5910 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 5911 switch (MatchResult) { 5912 default: break; 5913 case Match_Success: 5914 // Context sensitive operand constraints aren't handled by the matcher, 5915 // so check them here. 5916 if (validateInstruction(Inst, Operands)) { 5917 // Still progress the IT block, otherwise one wrong condition causes 5918 // nasty cascading errors. 5919 forwardITPosition(); 5920 return true; 5921 } 5922 5923 // Some instructions need post-processing to, for example, tweak which 5924 // encoding is selected. Loop on it while changes happen so the 5925 // individual transformations can chain off each other. E.g., 5926 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 5927 while (processInstruction(Inst, Operands)) 5928 ; 5929 5930 // Only move forward at the very end so that everything in validate 5931 // and process gets a consistent answer about whether we're in an IT 5932 // block. 5933 forwardITPosition(); 5934 5935 Out.EmitInstruction(Inst); 5936 return false; 5937 case Match_MissingFeature: 5938 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 5939 return true; 5940 case Match_InvalidOperand: { 5941 SMLoc ErrorLoc = IDLoc; 5942 if (ErrorInfo != ~0U) { 5943 if (ErrorInfo >= Operands.size()) 5944 return Error(IDLoc, "too few operands for instruction"); 5945 5946 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 5947 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 5948 } 5949 5950 return Error(ErrorLoc, "invalid operand for instruction"); 5951 } 5952 case Match_MnemonicFail: 5953 return Error(IDLoc, "invalid instruction"); 5954 case Match_ConversionFail: 5955 // The converter function will have already emited a diagnostic. 5956 return true; 5957 case Match_RequiresNotITBlock: 5958 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 5959 case Match_RequiresITBlock: 5960 return Error(IDLoc, "instruction only valid inside IT block"); 5961 case Match_RequiresV6: 5962 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 5963 case Match_RequiresThumb2: 5964 return Error(IDLoc, "instruction variant requires Thumb2"); 5965 } 5966 5967 llvm_unreachable("Implement any new match types added!"); 5968 return true; 5969} 5970 5971/// parseDirective parses the arm specific directives 5972bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 5973 StringRef IDVal = DirectiveID.getIdentifier(); 5974 if (IDVal == ".word") 5975 return parseDirectiveWord(4, DirectiveID.getLoc()); 5976 else if (IDVal == ".thumb") 5977 return parseDirectiveThumb(DirectiveID.getLoc()); 5978 else if (IDVal == ".arm") 5979 return parseDirectiveARM(DirectiveID.getLoc()); 5980 else if (IDVal == ".thumb_func") 5981 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 5982 else if (IDVal == ".code") 5983 return parseDirectiveCode(DirectiveID.getLoc()); 5984 else if (IDVal == ".syntax") 5985 return parseDirectiveSyntax(DirectiveID.getLoc()); 5986 else if (IDVal == ".unreq") 5987 return parseDirectiveUnreq(DirectiveID.getLoc()); 5988 return true; 5989} 5990 5991/// parseDirectiveWord 5992/// ::= .word [ expression (, expression)* ] 5993bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 5994 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5995 for (;;) { 5996 const MCExpr *Value; 5997 if (getParser().ParseExpression(Value)) 5998 return true; 5999 6000 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 6001 6002 if (getLexer().is(AsmToken::EndOfStatement)) 6003 break; 6004 6005 // FIXME: Improve diagnostic. 6006 if (getLexer().isNot(AsmToken::Comma)) 6007 return Error(L, "unexpected token in directive"); 6008 Parser.Lex(); 6009 } 6010 } 6011 6012 Parser.Lex(); 6013 return false; 6014} 6015 6016/// parseDirectiveThumb 6017/// ::= .thumb 6018bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 6019 if (getLexer().isNot(AsmToken::EndOfStatement)) 6020 return Error(L, "unexpected token in directive"); 6021 Parser.Lex(); 6022 6023 if (!isThumb()) 6024 SwitchMode(); 6025 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 6026 return false; 6027} 6028 6029/// parseDirectiveARM 6030/// ::= .arm 6031bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 6032 if (getLexer().isNot(AsmToken::EndOfStatement)) 6033 return Error(L, "unexpected token in directive"); 6034 Parser.Lex(); 6035 6036 if (isThumb()) 6037 SwitchMode(); 6038 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 6039 return false; 6040} 6041 6042/// parseDirectiveThumbFunc 6043/// ::= .thumbfunc symbol_name 6044bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 6045 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 6046 bool isMachO = MAI.hasSubsectionsViaSymbols(); 6047 StringRef Name; 6048 6049 // Darwin asm has function name after .thumb_func direction 6050 // ELF doesn't 6051 if (isMachO) { 6052 const AsmToken &Tok = Parser.getTok(); 6053 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 6054 return Error(L, "unexpected token in .thumb_func directive"); 6055 Name = Tok.getIdentifier(); 6056 Parser.Lex(); // Consume the identifier token. 6057 } 6058 6059 if (getLexer().isNot(AsmToken::EndOfStatement)) 6060 return Error(L, "unexpected token in directive"); 6061 Parser.Lex(); 6062 6063 // FIXME: assuming function name will be the line following .thumb_func 6064 if (!isMachO) { 6065 Name = Parser.getTok().getIdentifier(); 6066 } 6067 6068 // Mark symbol as a thumb symbol. 6069 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 6070 getParser().getStreamer().EmitThumbFunc(Func); 6071 return false; 6072} 6073 6074/// parseDirectiveSyntax 6075/// ::= .syntax unified | divided 6076bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 6077 const AsmToken &Tok = Parser.getTok(); 6078 if (Tok.isNot(AsmToken::Identifier)) 6079 return Error(L, "unexpected token in .syntax directive"); 6080 StringRef Mode = Tok.getString(); 6081 if (Mode == "unified" || Mode == "UNIFIED") 6082 Parser.Lex(); 6083 else if (Mode == "divided" || Mode == "DIVIDED") 6084 return Error(L, "'.syntax divided' arm asssembly not supported"); 6085 else 6086 return Error(L, "unrecognized syntax mode in .syntax directive"); 6087 6088 if (getLexer().isNot(AsmToken::EndOfStatement)) 6089 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 6090 Parser.Lex(); 6091 6092 // TODO tell the MC streamer the mode 6093 // getParser().getStreamer().Emit???(); 6094 return false; 6095} 6096 6097/// parseDirectiveCode 6098/// ::= .code 16 | 32 6099bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 6100 const AsmToken &Tok = Parser.getTok(); 6101 if (Tok.isNot(AsmToken::Integer)) 6102 return Error(L, "unexpected token in .code directive"); 6103 int64_t Val = Parser.getTok().getIntVal(); 6104 if (Val == 16) 6105 Parser.Lex(); 6106 else if (Val == 32) 6107 Parser.Lex(); 6108 else 6109 return Error(L, "invalid operand to .code directive"); 6110 6111 if (getLexer().isNot(AsmToken::EndOfStatement)) 6112 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 6113 Parser.Lex(); 6114 6115 if (Val == 16) { 6116 if (!isThumb()) 6117 SwitchMode(); 6118 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 6119 } else { 6120 if (isThumb()) 6121 SwitchMode(); 6122 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 6123 } 6124 6125 return false; 6126} 6127 6128/// parseDirectiveReq 6129/// ::= name .req registername 6130bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { 6131 Parser.Lex(); // Eat the '.req' token. 6132 unsigned Reg; 6133 SMLoc SRegLoc, ERegLoc; 6134 if (ParseRegister(Reg, SRegLoc, ERegLoc)) { 6135 Parser.EatToEndOfStatement(); 6136 return Error(SRegLoc, "register name expected"); 6137 } 6138 6139 // Shouldn't be anything else. 6140 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { 6141 Parser.EatToEndOfStatement(); 6142 return Error(Parser.getTok().getLoc(), 6143 "unexpected input in .req directive."); 6144 } 6145 6146 Parser.Lex(); // Consume the EndOfStatement 6147 6148 if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) 6149 return Error(SRegLoc, "redefinition of '" + Name + 6150 "' does not match original."); 6151 6152 return false; 6153} 6154 6155/// parseDirectiveUneq 6156/// ::= .unreq registername 6157bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { 6158 if (Parser.getTok().isNot(AsmToken::Identifier)) { 6159 Parser.EatToEndOfStatement(); 6160 return Error(L, "unexpected input in .unreq directive."); 6161 } 6162 RegisterReqs.erase(Parser.getTok().getIdentifier()); 6163 Parser.Lex(); // Eat the identifier. 6164 return false; 6165} 6166 6167extern "C" void LLVMInitializeARMAsmLexer(); 6168 6169/// Force static initialization. 6170extern "C" void LLVMInitializeARMAsmParser() { 6171 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 6172 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 6173 LLVMInitializeARMAsmLexer(); 6174} 6175 6176#define GET_REGISTER_MATCHER 6177#define GET_MATCHER_IMPLEMENTATION 6178#include "ARMGenAsmMatcher.inc" 6179