ARMAsmParser.cpp revision 27debd60a152d39e421c57bce511f16d8439a670
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 43 44class ARMAsmParser : public MCTargetAsmParser { 45 MCSubtargetInfo &STI; 46 MCAsmParser &Parser; 47 48 struct { 49 ARMCC::CondCodes Cond; // Condition for IT block. 50 unsigned Mask:4; // Condition mask for instructions. 51 // Starting at first 1 (from lsb). 52 // '1' condition as indicated in IT. 53 // '0' inverse of condition (else). 54 // Count of instructions in IT block is 55 // 4 - trailingzeroes(mask) 56 57 bool FirstCond; // Explicit flag for when we're parsing the 58 // First instruction in the IT block. It's 59 // implied in the mask, so needs special 60 // handling. 61 62 unsigned CurPosition; // Current position in parsing of IT 63 // block. In range [0,3]. Initialized 64 // according to count of instructions in block. 65 // ~0U if no active IT block. 66 } ITState; 67 bool inITBlock() { return ITState.CurPosition != ~0U;} 68 void forwardITPosition() { 69 if (!inITBlock()) return; 70 // Move to the next instruction in the IT block, if there is one. If not, 71 // mark the block as done. 72 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 73 if (++ITState.CurPosition == 5 - TZ) 74 ITState.CurPosition = ~0U; // Done with the IT block after this. 75 } 76 77 78 MCAsmParser &getParser() const { return Parser; } 79 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 80 81 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 82 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 83 84 int tryParseRegister(); 85 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 86 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 89 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 90 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 91 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 92 unsigned &ShiftAmount); 93 bool parseDirectiveWord(unsigned Size, SMLoc L); 94 bool parseDirectiveThumb(SMLoc L); 95 bool parseDirectiveARM(SMLoc L); 96 bool parseDirectiveThumbFunc(SMLoc L); 97 bool parseDirectiveCode(SMLoc L); 98 bool parseDirectiveSyntax(SMLoc L); 99 100 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 101 bool &CarrySetting, unsigned &ProcessorIMod, 102 StringRef &ITMask); 103 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 104 bool &CanAcceptPredicationCode); 105 106 bool isThumb() const { 107 // FIXME: Can tablegen auto-generate this? 108 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 109 } 110 bool isThumbOne() const { 111 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 112 } 113 bool isThumbTwo() const { 114 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 115 } 116 bool hasV6Ops() const { 117 return STI.getFeatureBits() & ARM::HasV6Ops; 118 } 119 bool hasV7Ops() const { 120 return STI.getFeatureBits() & ARM::HasV7Ops; 121 } 122 void SwitchMode() { 123 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 124 setAvailableFeatures(FB); 125 } 126 bool isMClass() const { 127 return STI.getFeatureBits() & ARM::FeatureMClass; 128 } 129 130 /// @name Auto-generated Match Functions 131 /// { 132 133#define GET_ASSEMBLER_HEADER 134#include "ARMGenAsmMatcher.inc" 135 136 /// } 137 138 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocNumOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocRegOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseCoprocOptionOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseMemBarrierOptOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseProcIFlagsOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parseMSRMaskOperand( 150 SmallVectorImpl<MCParsedAsmOperand*>&); 151 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 152 StringRef Op, int Low, int High); 153 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 154 return parsePKHImm(O, "lsl", 0, 31); 155 } 156 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 157 return parsePKHImm(O, "asr", 1, 32); 158 } 159 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 165 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 166 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 167 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index); 168 169 // Asm Match Converter Methods 170 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 209 const SmallVectorImpl<MCParsedAsmOperand*> &); 210 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 211 const SmallVectorImpl<MCParsedAsmOperand*> &); 212 213 bool validateInstruction(MCInst &Inst, 214 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 215 bool processInstruction(MCInst &Inst, 216 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 217 bool shouldOmitCCOutOperand(StringRef Mnemonic, 218 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 219 220public: 221 enum ARMMatchResultTy { 222 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 223 Match_RequiresNotITBlock, 224 Match_RequiresV6, 225 Match_RequiresThumb2 226 }; 227 228 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 229 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 230 MCAsmParserExtension::Initialize(_Parser); 231 232 // Initialize the set of available features. 233 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 234 235 // Not in an ITBlock to start with. 236 ITState.CurPosition = ~0U; 237 } 238 239 // Implementation of the MCTargetAsmParser interface: 240 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 241 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 242 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 243 bool ParseDirective(AsmToken DirectiveID); 244 245 unsigned checkTargetMatchPredicate(MCInst &Inst); 246 247 bool MatchAndEmitInstruction(SMLoc IDLoc, 248 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 249 MCStreamer &Out); 250}; 251} // end anonymous namespace 252 253namespace { 254 255/// ARMOperand - Instances of this class represent a parsed ARM machine 256/// instruction. 257class ARMOperand : public MCParsedAsmOperand { 258 enum KindTy { 259 k_CondCode, 260 k_CCOut, 261 k_ITCondMask, 262 k_CoprocNum, 263 k_CoprocReg, 264 k_CoprocOption, 265 k_Immediate, 266 k_FPImmediate, 267 k_MemBarrierOpt, 268 k_Memory, 269 k_PostIndexRegister, 270 k_MSRMask, 271 k_ProcIFlags, 272 k_VectorIndex, 273 k_Register, 274 k_RegisterList, 275 k_DPRRegisterList, 276 k_SPRRegisterList, 277 k_VectorList, 278 k_VectorListAllLanes, 279 k_VectorListIndexed, 280 k_ShiftedRegister, 281 k_ShiftedImmediate, 282 k_ShifterImmediate, 283 k_RotateImmediate, 284 k_BitfieldDescriptor, 285 k_Token 286 } Kind; 287 288 SMLoc StartLoc, EndLoc; 289 SmallVector<unsigned, 8> Registers; 290 291 union { 292 struct { 293 ARMCC::CondCodes Val; 294 } CC; 295 296 struct { 297 unsigned Val; 298 } Cop; 299 300 struct { 301 unsigned Val; 302 } CoprocOption; 303 304 struct { 305 unsigned Mask:4; 306 } ITMask; 307 308 struct { 309 ARM_MB::MemBOpt Val; 310 } MBOpt; 311 312 struct { 313 ARM_PROC::IFlags Val; 314 } IFlags; 315 316 struct { 317 unsigned Val; 318 } MMask; 319 320 struct { 321 const char *Data; 322 unsigned Length; 323 } Tok; 324 325 struct { 326 unsigned RegNum; 327 } Reg; 328 329 // A vector register list is a sequential list of 1 to 4 registers. 330 struct { 331 unsigned RegNum; 332 unsigned Count; 333 unsigned LaneIndex; 334 } VectorList; 335 336 struct { 337 unsigned Val; 338 } VectorIndex; 339 340 struct { 341 const MCExpr *Val; 342 } Imm; 343 344 struct { 345 unsigned Val; // encoded 8-bit representation 346 } FPImm; 347 348 /// Combined record for all forms of ARM address expressions. 349 struct { 350 unsigned BaseRegNum; 351 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 352 // was specified. 353 const MCConstantExpr *OffsetImm; // Offset immediate value 354 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 355 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 356 unsigned ShiftImm; // shift for OffsetReg. 357 unsigned Alignment; // 0 = no alignment specified 358 // n = alignment in bytes (8, 16, or 32) 359 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 360 } Memory; 361 362 struct { 363 unsigned RegNum; 364 bool isAdd; 365 ARM_AM::ShiftOpc ShiftTy; 366 unsigned ShiftImm; 367 } PostIdxReg; 368 369 struct { 370 bool isASR; 371 unsigned Imm; 372 } ShifterImm; 373 struct { 374 ARM_AM::ShiftOpc ShiftTy; 375 unsigned SrcReg; 376 unsigned ShiftReg; 377 unsigned ShiftImm; 378 } RegShiftedReg; 379 struct { 380 ARM_AM::ShiftOpc ShiftTy; 381 unsigned SrcReg; 382 unsigned ShiftImm; 383 } RegShiftedImm; 384 struct { 385 unsigned Imm; 386 } RotImm; 387 struct { 388 unsigned LSB; 389 unsigned Width; 390 } Bitfield; 391 }; 392 393 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 394public: 395 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 396 Kind = o.Kind; 397 StartLoc = o.StartLoc; 398 EndLoc = o.EndLoc; 399 switch (Kind) { 400 case k_CondCode: 401 CC = o.CC; 402 break; 403 case k_ITCondMask: 404 ITMask = o.ITMask; 405 break; 406 case k_Token: 407 Tok = o.Tok; 408 break; 409 case k_CCOut: 410 case k_Register: 411 Reg = o.Reg; 412 break; 413 case k_RegisterList: 414 case k_DPRRegisterList: 415 case k_SPRRegisterList: 416 Registers = o.Registers; 417 break; 418 case k_VectorList: 419 case k_VectorListAllLanes: 420 case k_VectorListIndexed: 421 VectorList = o.VectorList; 422 break; 423 case k_CoprocNum: 424 case k_CoprocReg: 425 Cop = o.Cop; 426 break; 427 case k_CoprocOption: 428 CoprocOption = o.CoprocOption; 429 break; 430 case k_Immediate: 431 Imm = o.Imm; 432 break; 433 case k_FPImmediate: 434 FPImm = o.FPImm; 435 break; 436 case k_MemBarrierOpt: 437 MBOpt = o.MBOpt; 438 break; 439 case k_Memory: 440 Memory = o.Memory; 441 break; 442 case k_PostIndexRegister: 443 PostIdxReg = o.PostIdxReg; 444 break; 445 case k_MSRMask: 446 MMask = o.MMask; 447 break; 448 case k_ProcIFlags: 449 IFlags = o.IFlags; 450 break; 451 case k_ShifterImmediate: 452 ShifterImm = o.ShifterImm; 453 break; 454 case k_ShiftedRegister: 455 RegShiftedReg = o.RegShiftedReg; 456 break; 457 case k_ShiftedImmediate: 458 RegShiftedImm = o.RegShiftedImm; 459 break; 460 case k_RotateImmediate: 461 RotImm = o.RotImm; 462 break; 463 case k_BitfieldDescriptor: 464 Bitfield = o.Bitfield; 465 break; 466 case k_VectorIndex: 467 VectorIndex = o.VectorIndex; 468 break; 469 } 470 } 471 472 /// getStartLoc - Get the location of the first token of this operand. 473 SMLoc getStartLoc() const { return StartLoc; } 474 /// getEndLoc - Get the location of the last token of this operand. 475 SMLoc getEndLoc() const { return EndLoc; } 476 477 ARMCC::CondCodes getCondCode() const { 478 assert(Kind == k_CondCode && "Invalid access!"); 479 return CC.Val; 480 } 481 482 unsigned getCoproc() const { 483 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 484 return Cop.Val; 485 } 486 487 StringRef getToken() const { 488 assert(Kind == k_Token && "Invalid access!"); 489 return StringRef(Tok.Data, Tok.Length); 490 } 491 492 unsigned getReg() const { 493 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 494 return Reg.RegNum; 495 } 496 497 const SmallVectorImpl<unsigned> &getRegList() const { 498 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 499 Kind == k_SPRRegisterList) && "Invalid access!"); 500 return Registers; 501 } 502 503 const MCExpr *getImm() const { 504 assert(Kind == k_Immediate && "Invalid access!"); 505 return Imm.Val; 506 } 507 508 unsigned getFPImm() const { 509 assert(Kind == k_FPImmediate && "Invalid access!"); 510 return FPImm.Val; 511 } 512 513 unsigned getVectorIndex() const { 514 assert(Kind == k_VectorIndex && "Invalid access!"); 515 return VectorIndex.Val; 516 } 517 518 ARM_MB::MemBOpt getMemBarrierOpt() const { 519 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 520 return MBOpt.Val; 521 } 522 523 ARM_PROC::IFlags getProcIFlags() const { 524 assert(Kind == k_ProcIFlags && "Invalid access!"); 525 return IFlags.Val; 526 } 527 528 unsigned getMSRMask() const { 529 assert(Kind == k_MSRMask && "Invalid access!"); 530 return MMask.Val; 531 } 532 533 bool isCoprocNum() const { return Kind == k_CoprocNum; } 534 bool isCoprocReg() const { return Kind == k_CoprocReg; } 535 bool isCoprocOption() const { return Kind == k_CoprocOption; } 536 bool isCondCode() const { return Kind == k_CondCode; } 537 bool isCCOut() const { return Kind == k_CCOut; } 538 bool isITMask() const { return Kind == k_ITCondMask; } 539 bool isITCondCode() const { return Kind == k_CondCode; } 540 bool isImm() const { return Kind == k_Immediate; } 541 bool isFPImm() const { return Kind == k_FPImmediate; } 542 bool isImm8s4() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 549 } 550 bool isImm0_1020s4() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 557 } 558 bool isImm0_508s4() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 565 } 566 bool isImm0_255() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value >= 0 && Value < 256; 573 } 574 bool isImm0_1() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 2; 581 } 582 bool isImm0_3() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return Value >= 0 && Value < 4; 589 } 590 bool isImm0_7() const { 591 if (Kind != k_Immediate) 592 return false; 593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 594 if (!CE) return false; 595 int64_t Value = CE->getValue(); 596 return Value >= 0 && Value < 8; 597 } 598 bool isImm0_15() const { 599 if (Kind != k_Immediate) 600 return false; 601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 602 if (!CE) return false; 603 int64_t Value = CE->getValue(); 604 return Value >= 0 && Value < 16; 605 } 606 bool isImm0_31() const { 607 if (Kind != k_Immediate) 608 return false; 609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 610 if (!CE) return false; 611 int64_t Value = CE->getValue(); 612 return Value >= 0 && Value < 32; 613 } 614 bool isImm0_63() const { 615 if (Kind != k_Immediate) 616 return false; 617 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 618 if (!CE) return false; 619 int64_t Value = CE->getValue(); 620 return Value >= 0 && Value < 64; 621 } 622 bool isImm8() const { 623 if (Kind != k_Immediate) 624 return false; 625 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 626 if (!CE) return false; 627 int64_t Value = CE->getValue(); 628 return Value == 8; 629 } 630 bool isImm16() const { 631 if (Kind != k_Immediate) 632 return false; 633 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 634 if (!CE) return false; 635 int64_t Value = CE->getValue(); 636 return Value == 16; 637 } 638 bool isImm32() const { 639 if (Kind != k_Immediate) 640 return false; 641 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 642 if (!CE) return false; 643 int64_t Value = CE->getValue(); 644 return Value == 32; 645 } 646 bool isShrImm8() const { 647 if (Kind != k_Immediate) 648 return false; 649 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 650 if (!CE) return false; 651 int64_t Value = CE->getValue(); 652 return Value > 0 && Value <= 8; 653 } 654 bool isShrImm16() const { 655 if (Kind != k_Immediate) 656 return false; 657 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 658 if (!CE) return false; 659 int64_t Value = CE->getValue(); 660 return Value > 0 && Value <= 16; 661 } 662 bool isShrImm32() const { 663 if (Kind != k_Immediate) 664 return false; 665 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 666 if (!CE) return false; 667 int64_t Value = CE->getValue(); 668 return Value > 0 && Value <= 32; 669 } 670 bool isShrImm64() const { 671 if (Kind != k_Immediate) 672 return false; 673 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 674 if (!CE) return false; 675 int64_t Value = CE->getValue(); 676 return Value > 0 && Value <= 64; 677 } 678 bool isImm1_7() const { 679 if (Kind != k_Immediate) 680 return false; 681 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 682 if (!CE) return false; 683 int64_t Value = CE->getValue(); 684 return Value > 0 && Value < 8; 685 } 686 bool isImm1_15() const { 687 if (Kind != k_Immediate) 688 return false; 689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 690 if (!CE) return false; 691 int64_t Value = CE->getValue(); 692 return Value > 0 && Value < 16; 693 } 694 bool isImm1_31() const { 695 if (Kind != k_Immediate) 696 return false; 697 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 698 if (!CE) return false; 699 int64_t Value = CE->getValue(); 700 return Value > 0 && Value < 32; 701 } 702 bool isImm1_16() const { 703 if (Kind != k_Immediate) 704 return false; 705 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 706 if (!CE) return false; 707 int64_t Value = CE->getValue(); 708 return Value > 0 && Value < 17; 709 } 710 bool isImm1_32() const { 711 if (Kind != k_Immediate) 712 return false; 713 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 714 if (!CE) return false; 715 int64_t Value = CE->getValue(); 716 return Value > 0 && Value < 33; 717 } 718 bool isImm0_32() const { 719 if (Kind != k_Immediate) 720 return false; 721 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 722 if (!CE) return false; 723 int64_t Value = CE->getValue(); 724 return Value >= 0 && Value < 33; 725 } 726 bool isImm0_65535() const { 727 if (Kind != k_Immediate) 728 return false; 729 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 730 if (!CE) return false; 731 int64_t Value = CE->getValue(); 732 return Value >= 0 && Value < 65536; 733 } 734 bool isImm0_65535Expr() const { 735 if (Kind != k_Immediate) 736 return false; 737 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 738 // If it's not a constant expression, it'll generate a fixup and be 739 // handled later. 740 if (!CE) return true; 741 int64_t Value = CE->getValue(); 742 return Value >= 0 && Value < 65536; 743 } 744 bool isImm24bit() const { 745 if (Kind != k_Immediate) 746 return false; 747 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 748 if (!CE) return false; 749 int64_t Value = CE->getValue(); 750 return Value >= 0 && Value <= 0xffffff; 751 } 752 bool isImmThumbSR() const { 753 if (Kind != k_Immediate) 754 return false; 755 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 756 if (!CE) return false; 757 int64_t Value = CE->getValue(); 758 return Value > 0 && Value < 33; 759 } 760 bool isPKHLSLImm() const { 761 if (Kind != k_Immediate) 762 return false; 763 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 764 if (!CE) return false; 765 int64_t Value = CE->getValue(); 766 return Value >= 0 && Value < 32; 767 } 768 bool isPKHASRImm() const { 769 if (Kind != k_Immediate) 770 return false; 771 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 772 if (!CE) return false; 773 int64_t Value = CE->getValue(); 774 return Value > 0 && Value <= 32; 775 } 776 bool isARMSOImm() const { 777 if (Kind != k_Immediate) 778 return false; 779 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 780 if (!CE) return false; 781 int64_t Value = CE->getValue(); 782 return ARM_AM::getSOImmVal(Value) != -1; 783 } 784 bool isARMSOImmNot() const { 785 if (Kind != k_Immediate) 786 return false; 787 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 788 if (!CE) return false; 789 int64_t Value = CE->getValue(); 790 return ARM_AM::getSOImmVal(~Value) != -1; 791 } 792 bool isARMSOImmNeg() const { 793 if (Kind != k_Immediate) 794 return false; 795 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 796 if (!CE) return false; 797 int64_t Value = CE->getValue(); 798 return ARM_AM::getSOImmVal(-Value) != -1; 799 } 800 bool isT2SOImm() const { 801 if (Kind != k_Immediate) 802 return false; 803 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 804 if (!CE) return false; 805 int64_t Value = CE->getValue(); 806 return ARM_AM::getT2SOImmVal(Value) != -1; 807 } 808 bool isT2SOImmNot() const { 809 if (Kind != k_Immediate) 810 return false; 811 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 812 if (!CE) return false; 813 int64_t Value = CE->getValue(); 814 return ARM_AM::getT2SOImmVal(~Value) != -1; 815 } 816 bool isT2SOImmNeg() const { 817 if (Kind != k_Immediate) 818 return false; 819 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 820 if (!CE) return false; 821 int64_t Value = CE->getValue(); 822 return ARM_AM::getT2SOImmVal(-Value) != -1; 823 } 824 bool isSetEndImm() const { 825 if (Kind != k_Immediate) 826 return false; 827 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 828 if (!CE) return false; 829 int64_t Value = CE->getValue(); 830 return Value == 1 || Value == 0; 831 } 832 bool isReg() const { return Kind == k_Register; } 833 bool isRegList() const { return Kind == k_RegisterList; } 834 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 835 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 836 bool isToken() const { return Kind == k_Token; } 837 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 838 bool isMemory() const { return Kind == k_Memory; } 839 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 840 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 841 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 842 bool isRotImm() const { return Kind == k_RotateImmediate; } 843 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 844 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 845 bool isPostIdxReg() const { 846 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 847 } 848 bool isMemNoOffset(bool alignOK = false) const { 849 if (!isMemory()) 850 return false; 851 // No offset of any kind. 852 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 853 (alignOK || Memory.Alignment == 0); 854 } 855 bool isAlignedMemory() const { 856 return isMemNoOffset(true); 857 } 858 bool isAddrMode2() const { 859 if (!isMemory() || Memory.Alignment != 0) return false; 860 // Check for register offset. 861 if (Memory.OffsetRegNum) return true; 862 // Immediate offset in range [-4095, 4095]. 863 if (!Memory.OffsetImm) return true; 864 int64_t Val = Memory.OffsetImm->getValue(); 865 return Val > -4096 && Val < 4096; 866 } 867 bool isAM2OffsetImm() const { 868 if (Kind != k_Immediate) 869 return false; 870 // Immediate offset in range [-4095, 4095]. 871 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 872 if (!CE) return false; 873 int64_t Val = CE->getValue(); 874 return Val > -4096 && Val < 4096; 875 } 876 bool isAddrMode3() const { 877 if (!isMemory() || Memory.Alignment != 0) return false; 878 // No shifts are legal for AM3. 879 if (Memory.ShiftType != ARM_AM::no_shift) return false; 880 // Check for register offset. 881 if (Memory.OffsetRegNum) return true; 882 // Immediate offset in range [-255, 255]. 883 if (!Memory.OffsetImm) return true; 884 int64_t Val = Memory.OffsetImm->getValue(); 885 return Val > -256 && Val < 256; 886 } 887 bool isAM3Offset() const { 888 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 889 return false; 890 if (Kind == k_PostIndexRegister) 891 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 892 // Immediate offset in range [-255, 255]. 893 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 894 if (!CE) return false; 895 int64_t Val = CE->getValue(); 896 // Special case, #-0 is INT32_MIN. 897 return (Val > -256 && Val < 256) || Val == INT32_MIN; 898 } 899 bool isAddrMode5() const { 900 // If we have an immediate that's not a constant, treat it as a label 901 // reference needing a fixup. If it is a constant, it's something else 902 // and we reject it. 903 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 904 return true; 905 if (!isMemory() || Memory.Alignment != 0) return false; 906 // Check for register offset. 907 if (Memory.OffsetRegNum) return false; 908 // Immediate offset in range [-1020, 1020] and a multiple of 4. 909 if (!Memory.OffsetImm) return true; 910 int64_t Val = Memory.OffsetImm->getValue(); 911 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 912 Val == INT32_MIN; 913 } 914 bool isMemTBB() const { 915 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 916 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 917 return false; 918 return true; 919 } 920 bool isMemTBH() const { 921 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 922 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 923 Memory.Alignment != 0 ) 924 return false; 925 return true; 926 } 927 bool isMemRegOffset() const { 928 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 929 return false; 930 return true; 931 } 932 bool isT2MemRegOffset() const { 933 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 934 Memory.Alignment != 0) 935 return false; 936 // Only lsl #{0, 1, 2, 3} allowed. 937 if (Memory.ShiftType == ARM_AM::no_shift) 938 return true; 939 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 940 return false; 941 return true; 942 } 943 bool isMemThumbRR() const { 944 // Thumb reg+reg addressing is simple. Just two registers, a base and 945 // an offset. No shifts, negations or any other complicating factors. 946 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 947 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 948 return false; 949 return isARMLowRegister(Memory.BaseRegNum) && 950 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 951 } 952 bool isMemThumbRIs4() const { 953 if (!isMemory() || Memory.OffsetRegNum != 0 || 954 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 955 return false; 956 // Immediate offset, multiple of 4 in range [0, 124]. 957 if (!Memory.OffsetImm) return true; 958 int64_t Val = Memory.OffsetImm->getValue(); 959 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 960 } 961 bool isMemThumbRIs2() const { 962 if (!isMemory() || Memory.OffsetRegNum != 0 || 963 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 964 return false; 965 // Immediate offset, multiple of 4 in range [0, 62]. 966 if (!Memory.OffsetImm) return true; 967 int64_t Val = Memory.OffsetImm->getValue(); 968 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 969 } 970 bool isMemThumbRIs1() const { 971 if (!isMemory() || Memory.OffsetRegNum != 0 || 972 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 973 return false; 974 // Immediate offset in range [0, 31]. 975 if (!Memory.OffsetImm) return true; 976 int64_t Val = Memory.OffsetImm->getValue(); 977 return Val >= 0 && Val <= 31; 978 } 979 bool isMemThumbSPI() const { 980 if (!isMemory() || Memory.OffsetRegNum != 0 || 981 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 982 return false; 983 // Immediate offset, multiple of 4 in range [0, 1020]. 984 if (!Memory.OffsetImm) return true; 985 int64_t Val = Memory.OffsetImm->getValue(); 986 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 987 } 988 bool isMemImm8s4Offset() const { 989 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 990 return false; 991 // Immediate offset a multiple of 4 in range [-1020, 1020]. 992 if (!Memory.OffsetImm) return true; 993 int64_t Val = Memory.OffsetImm->getValue(); 994 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 995 } 996 bool isMemImm0_1020s4Offset() const { 997 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 998 return false; 999 // Immediate offset a multiple of 4 in range [0, 1020]. 1000 if (!Memory.OffsetImm) return true; 1001 int64_t Val = Memory.OffsetImm->getValue(); 1002 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 1003 } 1004 bool isMemImm8Offset() const { 1005 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1006 return false; 1007 // Immediate offset in range [-255, 255]. 1008 if (!Memory.OffsetImm) return true; 1009 int64_t Val = Memory.OffsetImm->getValue(); 1010 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 1011 } 1012 bool isMemPosImm8Offset() const { 1013 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1014 return false; 1015 // Immediate offset in range [0, 255]. 1016 if (!Memory.OffsetImm) return true; 1017 int64_t Val = Memory.OffsetImm->getValue(); 1018 return Val >= 0 && Val < 256; 1019 } 1020 bool isMemNegImm8Offset() const { 1021 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1022 return false; 1023 // Immediate offset in range [-255, -1]. 1024 if (!Memory.OffsetImm) return false; 1025 int64_t Val = Memory.OffsetImm->getValue(); 1026 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 1027 } 1028 bool isMemUImm12Offset() const { 1029 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1030 return false; 1031 // Immediate offset in range [0, 4095]. 1032 if (!Memory.OffsetImm) return true; 1033 int64_t Val = Memory.OffsetImm->getValue(); 1034 return (Val >= 0 && Val < 4096); 1035 } 1036 bool isMemImm12Offset() const { 1037 // If we have an immediate that's not a constant, treat it as a label 1038 // reference needing a fixup. If it is a constant, it's something else 1039 // and we reject it. 1040 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 1041 return true; 1042 1043 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1044 return false; 1045 // Immediate offset in range [-4095, 4095]. 1046 if (!Memory.OffsetImm) return true; 1047 int64_t Val = Memory.OffsetImm->getValue(); 1048 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 1049 } 1050 bool isPostIdxImm8() const { 1051 if (Kind != k_Immediate) 1052 return false; 1053 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1054 if (!CE) return false; 1055 int64_t Val = CE->getValue(); 1056 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 1057 } 1058 bool isPostIdxImm8s4() const { 1059 if (Kind != k_Immediate) 1060 return false; 1061 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1062 if (!CE) return false; 1063 int64_t Val = CE->getValue(); 1064 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1065 (Val == INT32_MIN); 1066 } 1067 1068 bool isMSRMask() const { return Kind == k_MSRMask; } 1069 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1070 1071 // NEON operands. 1072 bool isVecListOneD() const { 1073 if (Kind != k_VectorList) return false; 1074 return VectorList.Count == 1; 1075 } 1076 1077 bool isVecListTwoD() const { 1078 if (Kind != k_VectorList) return false; 1079 return VectorList.Count == 2; 1080 } 1081 1082 bool isVecListThreeD() const { 1083 if (Kind != k_VectorList) return false; 1084 return VectorList.Count == 3; 1085 } 1086 1087 bool isVecListFourD() const { 1088 if (Kind != k_VectorList) return false; 1089 return VectorList.Count == 4; 1090 } 1091 1092 bool isVecListTwoQ() const { 1093 if (Kind != k_VectorList) return false; 1094 //FIXME: We haven't taught the parser to handle by-two register lists 1095 // yet, so don't pretend to know one. 1096 return VectorList.Count == 2 && false; 1097 } 1098 1099 bool isVecListOneDAllLanes() const { 1100 if (Kind != k_VectorListAllLanes) return false; 1101 return VectorList.Count == 1; 1102 } 1103 1104 bool isVecListTwoDAllLanes() const { 1105 if (Kind != k_VectorListAllLanes) return false; 1106 return VectorList.Count == 2; 1107 } 1108 1109 bool isVecListOneDByteIndexed() const { 1110 if (Kind != k_VectorListIndexed) return false; 1111 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1112 } 1113 1114 bool isVectorIndex8() const { 1115 if (Kind != k_VectorIndex) return false; 1116 return VectorIndex.Val < 8; 1117 } 1118 bool isVectorIndex16() const { 1119 if (Kind != k_VectorIndex) return false; 1120 return VectorIndex.Val < 4; 1121 } 1122 bool isVectorIndex32() const { 1123 if (Kind != k_VectorIndex) return false; 1124 return VectorIndex.Val < 2; 1125 } 1126 1127 bool isNEONi8splat() const { 1128 if (Kind != k_Immediate) 1129 return false; 1130 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1131 // Must be a constant. 1132 if (!CE) return false; 1133 int64_t Value = CE->getValue(); 1134 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1135 // value. 1136 return Value >= 0 && Value < 256; 1137 } 1138 1139 bool isNEONi16splat() const { 1140 if (Kind != k_Immediate) 1141 return false; 1142 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1143 // Must be a constant. 1144 if (!CE) return false; 1145 int64_t Value = CE->getValue(); 1146 // i16 value in the range [0,255] or [0x0100, 0xff00] 1147 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1148 } 1149 1150 bool isNEONi32splat() const { 1151 if (Kind != k_Immediate) 1152 return false; 1153 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1154 // Must be a constant. 1155 if (!CE) return false; 1156 int64_t Value = CE->getValue(); 1157 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1158 return (Value >= 0 && Value < 256) || 1159 (Value >= 0x0100 && Value <= 0xff00) || 1160 (Value >= 0x010000 && Value <= 0xff0000) || 1161 (Value >= 0x01000000 && Value <= 0xff000000); 1162 } 1163 1164 bool isNEONi32vmov() const { 1165 if (Kind != k_Immediate) 1166 return false; 1167 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1168 // Must be a constant. 1169 if (!CE) return false; 1170 int64_t Value = CE->getValue(); 1171 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1172 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1173 return (Value >= 0 && Value < 256) || 1174 (Value >= 0x0100 && Value <= 0xff00) || 1175 (Value >= 0x010000 && Value <= 0xff0000) || 1176 (Value >= 0x01000000 && Value <= 0xff000000) || 1177 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1178 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1179 } 1180 1181 bool isNEONi64splat() const { 1182 if (Kind != k_Immediate) 1183 return false; 1184 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1185 // Must be a constant. 1186 if (!CE) return false; 1187 uint64_t Value = CE->getValue(); 1188 // i64 value with each byte being either 0 or 0xff. 1189 for (unsigned i = 0; i < 8; ++i) 1190 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1191 return true; 1192 } 1193 1194 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1195 // Add as immediates when possible. Null MCExpr = 0. 1196 if (Expr == 0) 1197 Inst.addOperand(MCOperand::CreateImm(0)); 1198 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1199 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1200 else 1201 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1202 } 1203 1204 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1205 assert(N == 2 && "Invalid number of operands!"); 1206 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1207 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1208 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1209 } 1210 1211 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1212 assert(N == 1 && "Invalid number of operands!"); 1213 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1214 } 1215 1216 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1217 assert(N == 1 && "Invalid number of operands!"); 1218 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1219 } 1220 1221 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1222 assert(N == 1 && "Invalid number of operands!"); 1223 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1224 } 1225 1226 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1227 assert(N == 1 && "Invalid number of operands!"); 1228 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1229 } 1230 1231 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1232 assert(N == 1 && "Invalid number of operands!"); 1233 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1234 } 1235 1236 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1237 assert(N == 1 && "Invalid number of operands!"); 1238 Inst.addOperand(MCOperand::CreateReg(getReg())); 1239 } 1240 1241 void addRegOperands(MCInst &Inst, unsigned N) const { 1242 assert(N == 1 && "Invalid number of operands!"); 1243 Inst.addOperand(MCOperand::CreateReg(getReg())); 1244 } 1245 1246 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1247 assert(N == 3 && "Invalid number of operands!"); 1248 assert(isRegShiftedReg() && 1249 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1250 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1251 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1252 Inst.addOperand(MCOperand::CreateImm( 1253 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1254 } 1255 1256 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1257 assert(N == 2 && "Invalid number of operands!"); 1258 assert(isRegShiftedImm() && 1259 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1260 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1261 Inst.addOperand(MCOperand::CreateImm( 1262 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1263 } 1264 1265 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1266 assert(N == 1 && "Invalid number of operands!"); 1267 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1268 ShifterImm.Imm)); 1269 } 1270 1271 void addRegListOperands(MCInst &Inst, unsigned N) const { 1272 assert(N == 1 && "Invalid number of operands!"); 1273 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1274 for (SmallVectorImpl<unsigned>::const_iterator 1275 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1276 Inst.addOperand(MCOperand::CreateReg(*I)); 1277 } 1278 1279 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1280 addRegListOperands(Inst, N); 1281 } 1282 1283 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1284 addRegListOperands(Inst, N); 1285 } 1286 1287 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1288 assert(N == 1 && "Invalid number of operands!"); 1289 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1290 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1291 } 1292 1293 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1294 assert(N == 1 && "Invalid number of operands!"); 1295 // Munge the lsb/width into a bitfield mask. 1296 unsigned lsb = Bitfield.LSB; 1297 unsigned width = Bitfield.Width; 1298 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1299 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1300 (32 - (lsb + width))); 1301 Inst.addOperand(MCOperand::CreateImm(Mask)); 1302 } 1303 1304 void addImmOperands(MCInst &Inst, unsigned N) const { 1305 assert(N == 1 && "Invalid number of operands!"); 1306 addExpr(Inst, getImm()); 1307 } 1308 1309 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1310 assert(N == 1 && "Invalid number of operands!"); 1311 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1312 } 1313 1314 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1315 assert(N == 1 && "Invalid number of operands!"); 1316 // FIXME: We really want to scale the value here, but the LDRD/STRD 1317 // instruction don't encode operands that way yet. 1318 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1319 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1320 } 1321 1322 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1323 assert(N == 1 && "Invalid number of operands!"); 1324 // The immediate is scaled by four in the encoding and is stored 1325 // in the MCInst as such. Lop off the low two bits here. 1326 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1327 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1328 } 1329 1330 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1331 assert(N == 1 && "Invalid number of operands!"); 1332 // The immediate is scaled by four in the encoding and is stored 1333 // in the MCInst as such. Lop off the low two bits here. 1334 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1335 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1336 } 1337 1338 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1339 assert(N == 1 && "Invalid number of operands!"); 1340 // The constant encodes as the immediate-1, and we store in the instruction 1341 // the bits as encoded, so subtract off one here. 1342 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1343 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1344 } 1345 1346 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1347 assert(N == 1 && "Invalid number of operands!"); 1348 // The constant encodes as the immediate-1, and we store in the instruction 1349 // the bits as encoded, so subtract off one here. 1350 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1351 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1352 } 1353 1354 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1355 assert(N == 1 && "Invalid number of operands!"); 1356 // The constant encodes as the immediate, except for 32, which encodes as 1357 // zero. 1358 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1359 unsigned Imm = CE->getValue(); 1360 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1361 } 1362 1363 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1364 assert(N == 1 && "Invalid number of operands!"); 1365 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1366 // the instruction as well. 1367 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1368 int Val = CE->getValue(); 1369 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1370 } 1371 1372 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1373 assert(N == 1 && "Invalid number of operands!"); 1374 // The operand is actually a t2_so_imm, but we have its bitwise 1375 // negation in the assembly source, so twiddle it here. 1376 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1377 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1378 } 1379 1380 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 1381 assert(N == 1 && "Invalid number of operands!"); 1382 // The operand is actually a t2_so_imm, but we have its 1383 // negation in the assembly source, so twiddle it here. 1384 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1385 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1386 } 1387 1388 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1389 assert(N == 1 && "Invalid number of operands!"); 1390 // The operand is actually a so_imm, but we have its bitwise 1391 // negation in the assembly source, so twiddle it here. 1392 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1393 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1394 } 1395 1396 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const { 1397 assert(N == 1 && "Invalid number of operands!"); 1398 // The operand is actually a so_imm, but we have its 1399 // negation in the assembly source, so twiddle it here. 1400 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1401 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1402 } 1403 1404 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1405 assert(N == 1 && "Invalid number of operands!"); 1406 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1407 } 1408 1409 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1410 assert(N == 1 && "Invalid number of operands!"); 1411 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1412 } 1413 1414 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1415 assert(N == 2 && "Invalid number of operands!"); 1416 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1417 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1418 } 1419 1420 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1421 assert(N == 3 && "Invalid number of operands!"); 1422 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1423 if (!Memory.OffsetRegNum) { 1424 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1425 // Special case for #-0 1426 if (Val == INT32_MIN) Val = 0; 1427 if (Val < 0) Val = -Val; 1428 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1429 } else { 1430 // For register offset, we encode the shift type and negation flag 1431 // here. 1432 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1433 Memory.ShiftImm, Memory.ShiftType); 1434 } 1435 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1436 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1437 Inst.addOperand(MCOperand::CreateImm(Val)); 1438 } 1439 1440 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1441 assert(N == 2 && "Invalid number of operands!"); 1442 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1443 assert(CE && "non-constant AM2OffsetImm operand!"); 1444 int32_t Val = CE->getValue(); 1445 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1446 // Special case for #-0 1447 if (Val == INT32_MIN) Val = 0; 1448 if (Val < 0) Val = -Val; 1449 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1450 Inst.addOperand(MCOperand::CreateReg(0)); 1451 Inst.addOperand(MCOperand::CreateImm(Val)); 1452 } 1453 1454 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1455 assert(N == 3 && "Invalid number of operands!"); 1456 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1457 if (!Memory.OffsetRegNum) { 1458 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1459 // Special case for #-0 1460 if (Val == INT32_MIN) Val = 0; 1461 if (Val < 0) Val = -Val; 1462 Val = ARM_AM::getAM3Opc(AddSub, Val); 1463 } else { 1464 // For register offset, we encode the shift type and negation flag 1465 // here. 1466 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1467 } 1468 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1469 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1470 Inst.addOperand(MCOperand::CreateImm(Val)); 1471 } 1472 1473 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1474 assert(N == 2 && "Invalid number of operands!"); 1475 if (Kind == k_PostIndexRegister) { 1476 int32_t Val = 1477 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1478 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1479 Inst.addOperand(MCOperand::CreateImm(Val)); 1480 return; 1481 } 1482 1483 // Constant offset. 1484 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1485 int32_t Val = CE->getValue(); 1486 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1487 // Special case for #-0 1488 if (Val == INT32_MIN) Val = 0; 1489 if (Val < 0) Val = -Val; 1490 Val = ARM_AM::getAM3Opc(AddSub, Val); 1491 Inst.addOperand(MCOperand::CreateReg(0)); 1492 Inst.addOperand(MCOperand::CreateImm(Val)); 1493 } 1494 1495 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1496 assert(N == 2 && "Invalid number of operands!"); 1497 // If we have an immediate that's not a constant, treat it as a label 1498 // reference needing a fixup. If it is a constant, it's something else 1499 // and we reject it. 1500 if (isImm()) { 1501 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1502 Inst.addOperand(MCOperand::CreateImm(0)); 1503 return; 1504 } 1505 1506 // The lower two bits are always zero and as such are not encoded. 1507 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1508 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1509 // Special case for #-0 1510 if (Val == INT32_MIN) Val = 0; 1511 if (Val < 0) Val = -Val; 1512 Val = ARM_AM::getAM5Opc(AddSub, Val); 1513 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1514 Inst.addOperand(MCOperand::CreateImm(Val)); 1515 } 1516 1517 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1518 assert(N == 2 && "Invalid number of operands!"); 1519 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1520 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1521 Inst.addOperand(MCOperand::CreateImm(Val)); 1522 } 1523 1524 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1525 assert(N == 2 && "Invalid number of operands!"); 1526 // The lower two bits are always zero and as such are not encoded. 1527 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1528 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1529 Inst.addOperand(MCOperand::CreateImm(Val)); 1530 } 1531 1532 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1533 assert(N == 2 && "Invalid number of operands!"); 1534 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1535 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1536 Inst.addOperand(MCOperand::CreateImm(Val)); 1537 } 1538 1539 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1540 addMemImm8OffsetOperands(Inst, N); 1541 } 1542 1543 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1544 addMemImm8OffsetOperands(Inst, N); 1545 } 1546 1547 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1548 assert(N == 2 && "Invalid number of operands!"); 1549 // If this is an immediate, it's a label reference. 1550 if (Kind == k_Immediate) { 1551 addExpr(Inst, getImm()); 1552 Inst.addOperand(MCOperand::CreateImm(0)); 1553 return; 1554 } 1555 1556 // Otherwise, it's a normal memory reg+offset. 1557 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1558 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1559 Inst.addOperand(MCOperand::CreateImm(Val)); 1560 } 1561 1562 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1563 assert(N == 2 && "Invalid number of operands!"); 1564 // If this is an immediate, it's a label reference. 1565 if (Kind == k_Immediate) { 1566 addExpr(Inst, getImm()); 1567 Inst.addOperand(MCOperand::CreateImm(0)); 1568 return; 1569 } 1570 1571 // Otherwise, it's a normal memory reg+offset. 1572 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1573 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1574 Inst.addOperand(MCOperand::CreateImm(Val)); 1575 } 1576 1577 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1578 assert(N == 2 && "Invalid number of operands!"); 1579 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1580 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1581 } 1582 1583 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1584 assert(N == 2 && "Invalid number of operands!"); 1585 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1586 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1587 } 1588 1589 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1590 assert(N == 3 && "Invalid number of operands!"); 1591 unsigned Val = 1592 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1593 Memory.ShiftImm, Memory.ShiftType); 1594 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1595 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1596 Inst.addOperand(MCOperand::CreateImm(Val)); 1597 } 1598 1599 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1600 assert(N == 3 && "Invalid number of operands!"); 1601 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1602 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1603 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1604 } 1605 1606 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1607 assert(N == 2 && "Invalid number of operands!"); 1608 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1609 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1610 } 1611 1612 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1613 assert(N == 2 && "Invalid number of operands!"); 1614 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1615 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1616 Inst.addOperand(MCOperand::CreateImm(Val)); 1617 } 1618 1619 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1620 assert(N == 2 && "Invalid number of operands!"); 1621 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1622 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1623 Inst.addOperand(MCOperand::CreateImm(Val)); 1624 } 1625 1626 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1627 assert(N == 2 && "Invalid number of operands!"); 1628 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1629 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1630 Inst.addOperand(MCOperand::CreateImm(Val)); 1631 } 1632 1633 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1634 assert(N == 2 && "Invalid number of operands!"); 1635 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1636 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1637 Inst.addOperand(MCOperand::CreateImm(Val)); 1638 } 1639 1640 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1641 assert(N == 1 && "Invalid number of operands!"); 1642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1643 assert(CE && "non-constant post-idx-imm8 operand!"); 1644 int Imm = CE->getValue(); 1645 bool isAdd = Imm >= 0; 1646 if (Imm == INT32_MIN) Imm = 0; 1647 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1648 Inst.addOperand(MCOperand::CreateImm(Imm)); 1649 } 1650 1651 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1652 assert(N == 1 && "Invalid number of operands!"); 1653 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1654 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1655 int Imm = CE->getValue(); 1656 bool isAdd = Imm >= 0; 1657 if (Imm == INT32_MIN) Imm = 0; 1658 // Immediate is scaled by 4. 1659 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1660 Inst.addOperand(MCOperand::CreateImm(Imm)); 1661 } 1662 1663 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1664 assert(N == 2 && "Invalid number of operands!"); 1665 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1666 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1667 } 1668 1669 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1670 assert(N == 2 && "Invalid number of operands!"); 1671 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1672 // The sign, shift type, and shift amount are encoded in a single operand 1673 // using the AM2 encoding helpers. 1674 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1675 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1676 PostIdxReg.ShiftTy); 1677 Inst.addOperand(MCOperand::CreateImm(Imm)); 1678 } 1679 1680 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1681 assert(N == 1 && "Invalid number of operands!"); 1682 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1683 } 1684 1685 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1686 assert(N == 1 && "Invalid number of operands!"); 1687 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1688 } 1689 1690 void addVecListOperands(MCInst &Inst, unsigned N) const { 1691 assert(N == 1 && "Invalid number of operands!"); 1692 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1693 } 1694 1695 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 1696 assert(N == 2 && "Invalid number of operands!"); 1697 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1698 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 1699 } 1700 1701 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1702 assert(N == 1 && "Invalid number of operands!"); 1703 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1704 } 1705 1706 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1707 assert(N == 1 && "Invalid number of operands!"); 1708 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1709 } 1710 1711 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1712 assert(N == 1 && "Invalid number of operands!"); 1713 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1714 } 1715 1716 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1717 assert(N == 1 && "Invalid number of operands!"); 1718 // The immediate encodes the type of constant as well as the value. 1719 // Mask in that this is an i8 splat. 1720 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1721 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1722 } 1723 1724 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1725 assert(N == 1 && "Invalid number of operands!"); 1726 // The immediate encodes the type of constant as well as the value. 1727 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1728 unsigned Value = CE->getValue(); 1729 if (Value >= 256) 1730 Value = (Value >> 8) | 0xa00; 1731 else 1732 Value |= 0x800; 1733 Inst.addOperand(MCOperand::CreateImm(Value)); 1734 } 1735 1736 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1737 assert(N == 1 && "Invalid number of operands!"); 1738 // The immediate encodes the type of constant as well as the value. 1739 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1740 unsigned Value = CE->getValue(); 1741 if (Value >= 256 && Value <= 0xff00) 1742 Value = (Value >> 8) | 0x200; 1743 else if (Value > 0xffff && Value <= 0xff0000) 1744 Value = (Value >> 16) | 0x400; 1745 else if (Value > 0xffffff) 1746 Value = (Value >> 24) | 0x600; 1747 Inst.addOperand(MCOperand::CreateImm(Value)); 1748 } 1749 1750 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1751 assert(N == 1 && "Invalid number of operands!"); 1752 // The immediate encodes the type of constant as well as the value. 1753 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1754 unsigned Value = CE->getValue(); 1755 if (Value >= 256 && Value <= 0xffff) 1756 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1757 else if (Value > 0xffff && Value <= 0xffffff) 1758 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1759 else if (Value > 0xffffff) 1760 Value = (Value >> 24) | 0x600; 1761 Inst.addOperand(MCOperand::CreateImm(Value)); 1762 } 1763 1764 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1765 assert(N == 1 && "Invalid number of operands!"); 1766 // The immediate encodes the type of constant as well as the value. 1767 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1768 uint64_t Value = CE->getValue(); 1769 unsigned Imm = 0; 1770 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1771 Imm |= (Value & 1) << i; 1772 } 1773 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1774 } 1775 1776 virtual void print(raw_ostream &OS) const; 1777 1778 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1779 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1780 Op->ITMask.Mask = Mask; 1781 Op->StartLoc = S; 1782 Op->EndLoc = S; 1783 return Op; 1784 } 1785 1786 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1787 ARMOperand *Op = new ARMOperand(k_CondCode); 1788 Op->CC.Val = CC; 1789 Op->StartLoc = S; 1790 Op->EndLoc = S; 1791 return Op; 1792 } 1793 1794 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1795 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1796 Op->Cop.Val = CopVal; 1797 Op->StartLoc = S; 1798 Op->EndLoc = S; 1799 return Op; 1800 } 1801 1802 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1803 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1804 Op->Cop.Val = CopVal; 1805 Op->StartLoc = S; 1806 Op->EndLoc = S; 1807 return Op; 1808 } 1809 1810 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1811 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1812 Op->Cop.Val = Val; 1813 Op->StartLoc = S; 1814 Op->EndLoc = E; 1815 return Op; 1816 } 1817 1818 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1819 ARMOperand *Op = new ARMOperand(k_CCOut); 1820 Op->Reg.RegNum = RegNum; 1821 Op->StartLoc = S; 1822 Op->EndLoc = S; 1823 return Op; 1824 } 1825 1826 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1827 ARMOperand *Op = new ARMOperand(k_Token); 1828 Op->Tok.Data = Str.data(); 1829 Op->Tok.Length = Str.size(); 1830 Op->StartLoc = S; 1831 Op->EndLoc = S; 1832 return Op; 1833 } 1834 1835 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1836 ARMOperand *Op = new ARMOperand(k_Register); 1837 Op->Reg.RegNum = RegNum; 1838 Op->StartLoc = S; 1839 Op->EndLoc = E; 1840 return Op; 1841 } 1842 1843 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1844 unsigned SrcReg, 1845 unsigned ShiftReg, 1846 unsigned ShiftImm, 1847 SMLoc S, SMLoc E) { 1848 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1849 Op->RegShiftedReg.ShiftTy = ShTy; 1850 Op->RegShiftedReg.SrcReg = SrcReg; 1851 Op->RegShiftedReg.ShiftReg = ShiftReg; 1852 Op->RegShiftedReg.ShiftImm = ShiftImm; 1853 Op->StartLoc = S; 1854 Op->EndLoc = E; 1855 return Op; 1856 } 1857 1858 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1859 unsigned SrcReg, 1860 unsigned ShiftImm, 1861 SMLoc S, SMLoc E) { 1862 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1863 Op->RegShiftedImm.ShiftTy = ShTy; 1864 Op->RegShiftedImm.SrcReg = SrcReg; 1865 Op->RegShiftedImm.ShiftImm = ShiftImm; 1866 Op->StartLoc = S; 1867 Op->EndLoc = E; 1868 return Op; 1869 } 1870 1871 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1872 SMLoc S, SMLoc E) { 1873 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1874 Op->ShifterImm.isASR = isASR; 1875 Op->ShifterImm.Imm = Imm; 1876 Op->StartLoc = S; 1877 Op->EndLoc = E; 1878 return Op; 1879 } 1880 1881 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1882 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1883 Op->RotImm.Imm = Imm; 1884 Op->StartLoc = S; 1885 Op->EndLoc = E; 1886 return Op; 1887 } 1888 1889 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1890 SMLoc S, SMLoc E) { 1891 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1892 Op->Bitfield.LSB = LSB; 1893 Op->Bitfield.Width = Width; 1894 Op->StartLoc = S; 1895 Op->EndLoc = E; 1896 return Op; 1897 } 1898 1899 static ARMOperand * 1900 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1901 SMLoc StartLoc, SMLoc EndLoc) { 1902 KindTy Kind = k_RegisterList; 1903 1904 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1905 Kind = k_DPRRegisterList; 1906 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1907 contains(Regs.front().first)) 1908 Kind = k_SPRRegisterList; 1909 1910 ARMOperand *Op = new ARMOperand(Kind); 1911 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1912 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1913 Op->Registers.push_back(I->first); 1914 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1915 Op->StartLoc = StartLoc; 1916 Op->EndLoc = EndLoc; 1917 return Op; 1918 } 1919 1920 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1921 SMLoc S, SMLoc E) { 1922 ARMOperand *Op = new ARMOperand(k_VectorList); 1923 Op->VectorList.RegNum = RegNum; 1924 Op->VectorList.Count = Count; 1925 Op->StartLoc = S; 1926 Op->EndLoc = E; 1927 return Op; 1928 } 1929 1930 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, 1931 SMLoc S, SMLoc E) { 1932 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); 1933 Op->VectorList.RegNum = RegNum; 1934 Op->VectorList.Count = Count; 1935 Op->StartLoc = S; 1936 Op->EndLoc = E; 1937 return Op; 1938 } 1939 1940 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, 1941 unsigned Index, SMLoc S, SMLoc E) { 1942 ARMOperand *Op = new ARMOperand(k_VectorListIndexed); 1943 Op->VectorList.RegNum = RegNum; 1944 Op->VectorList.Count = Count; 1945 Op->VectorList.LaneIndex = Index; 1946 Op->StartLoc = S; 1947 Op->EndLoc = E; 1948 return Op; 1949 } 1950 1951 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1952 MCContext &Ctx) { 1953 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1954 Op->VectorIndex.Val = Idx; 1955 Op->StartLoc = S; 1956 Op->EndLoc = E; 1957 return Op; 1958 } 1959 1960 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1961 ARMOperand *Op = new ARMOperand(k_Immediate); 1962 Op->Imm.Val = Val; 1963 Op->StartLoc = S; 1964 Op->EndLoc = E; 1965 return Op; 1966 } 1967 1968 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1969 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1970 Op->FPImm.Val = Val; 1971 Op->StartLoc = S; 1972 Op->EndLoc = S; 1973 return Op; 1974 } 1975 1976 static ARMOperand *CreateMem(unsigned BaseRegNum, 1977 const MCConstantExpr *OffsetImm, 1978 unsigned OffsetRegNum, 1979 ARM_AM::ShiftOpc ShiftType, 1980 unsigned ShiftImm, 1981 unsigned Alignment, 1982 bool isNegative, 1983 SMLoc S, SMLoc E) { 1984 ARMOperand *Op = new ARMOperand(k_Memory); 1985 Op->Memory.BaseRegNum = BaseRegNum; 1986 Op->Memory.OffsetImm = OffsetImm; 1987 Op->Memory.OffsetRegNum = OffsetRegNum; 1988 Op->Memory.ShiftType = ShiftType; 1989 Op->Memory.ShiftImm = ShiftImm; 1990 Op->Memory.Alignment = Alignment; 1991 Op->Memory.isNegative = isNegative; 1992 Op->StartLoc = S; 1993 Op->EndLoc = E; 1994 return Op; 1995 } 1996 1997 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1998 ARM_AM::ShiftOpc ShiftTy, 1999 unsigned ShiftImm, 2000 SMLoc S, SMLoc E) { 2001 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 2002 Op->PostIdxReg.RegNum = RegNum; 2003 Op->PostIdxReg.isAdd = isAdd; 2004 Op->PostIdxReg.ShiftTy = ShiftTy; 2005 Op->PostIdxReg.ShiftImm = ShiftImm; 2006 Op->StartLoc = S; 2007 Op->EndLoc = E; 2008 return Op; 2009 } 2010 2011 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 2012 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 2013 Op->MBOpt.Val = Opt; 2014 Op->StartLoc = S; 2015 Op->EndLoc = S; 2016 return Op; 2017 } 2018 2019 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 2020 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 2021 Op->IFlags.Val = IFlags; 2022 Op->StartLoc = S; 2023 Op->EndLoc = S; 2024 return Op; 2025 } 2026 2027 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 2028 ARMOperand *Op = new ARMOperand(k_MSRMask); 2029 Op->MMask.Val = MMask; 2030 Op->StartLoc = S; 2031 Op->EndLoc = S; 2032 return Op; 2033 } 2034}; 2035 2036} // end anonymous namespace. 2037 2038void ARMOperand::print(raw_ostream &OS) const { 2039 switch (Kind) { 2040 case k_FPImmediate: 2041 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 2042 << ") >"; 2043 break; 2044 case k_CondCode: 2045 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 2046 break; 2047 case k_CCOut: 2048 OS << "<ccout " << getReg() << ">"; 2049 break; 2050 case k_ITCondMask: { 2051 static const char *MaskStr[] = { 2052 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 2053 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 2054 }; 2055 assert((ITMask.Mask & 0xf) == ITMask.Mask); 2056 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 2057 break; 2058 } 2059 case k_CoprocNum: 2060 OS << "<coprocessor number: " << getCoproc() << ">"; 2061 break; 2062 case k_CoprocReg: 2063 OS << "<coprocessor register: " << getCoproc() << ">"; 2064 break; 2065 case k_CoprocOption: 2066 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 2067 break; 2068 case k_MSRMask: 2069 OS << "<mask: " << getMSRMask() << ">"; 2070 break; 2071 case k_Immediate: 2072 getImm()->print(OS); 2073 break; 2074 case k_MemBarrierOpt: 2075 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 2076 break; 2077 case k_Memory: 2078 OS << "<memory " 2079 << " base:" << Memory.BaseRegNum; 2080 OS << ">"; 2081 break; 2082 case k_PostIndexRegister: 2083 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 2084 << PostIdxReg.RegNum; 2085 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 2086 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 2087 << PostIdxReg.ShiftImm; 2088 OS << ">"; 2089 break; 2090 case k_ProcIFlags: { 2091 OS << "<ARM_PROC::"; 2092 unsigned IFlags = getProcIFlags(); 2093 for (int i=2; i >= 0; --i) 2094 if (IFlags & (1 << i)) 2095 OS << ARM_PROC::IFlagsToString(1 << i); 2096 OS << ">"; 2097 break; 2098 } 2099 case k_Register: 2100 OS << "<register " << getReg() << ">"; 2101 break; 2102 case k_ShifterImmediate: 2103 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2104 << " #" << ShifterImm.Imm << ">"; 2105 break; 2106 case k_ShiftedRegister: 2107 OS << "<so_reg_reg " 2108 << RegShiftedReg.SrcReg << " " 2109 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 2110 << " " << RegShiftedReg.ShiftReg << ">"; 2111 break; 2112 case k_ShiftedImmediate: 2113 OS << "<so_reg_imm " 2114 << RegShiftedImm.SrcReg << " " 2115 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 2116 << " #" << RegShiftedImm.ShiftImm << ">"; 2117 break; 2118 case k_RotateImmediate: 2119 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2120 break; 2121 case k_BitfieldDescriptor: 2122 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2123 << ", width: " << Bitfield.Width << ">"; 2124 break; 2125 case k_RegisterList: 2126 case k_DPRRegisterList: 2127 case k_SPRRegisterList: { 2128 OS << "<register_list "; 2129 2130 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2131 for (SmallVectorImpl<unsigned>::const_iterator 2132 I = RegList.begin(), E = RegList.end(); I != E; ) { 2133 OS << *I; 2134 if (++I < E) OS << ", "; 2135 } 2136 2137 OS << ">"; 2138 break; 2139 } 2140 case k_VectorList: 2141 OS << "<vector_list " << VectorList.Count << " * " 2142 << VectorList.RegNum << ">"; 2143 break; 2144 case k_VectorListAllLanes: 2145 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 2146 << VectorList.RegNum << ">"; 2147 break; 2148 case k_VectorListIndexed: 2149 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 2150 << VectorList.Count << " * " << VectorList.RegNum << ">"; 2151 break; 2152 case k_Token: 2153 OS << "'" << getToken() << "'"; 2154 break; 2155 case k_VectorIndex: 2156 OS << "<vectorindex " << getVectorIndex() << ">"; 2157 break; 2158 } 2159} 2160 2161/// @name Auto-generated Match Functions 2162/// { 2163 2164static unsigned MatchRegisterName(StringRef Name); 2165 2166/// } 2167 2168bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2169 SMLoc &StartLoc, SMLoc &EndLoc) { 2170 RegNo = tryParseRegister(); 2171 2172 return (RegNo == (unsigned)-1); 2173} 2174 2175/// Try to parse a register name. The token must be an Identifier when called, 2176/// and if it is a register name the token is eaten and the register number is 2177/// returned. Otherwise return -1. 2178/// 2179int ARMAsmParser::tryParseRegister() { 2180 const AsmToken &Tok = Parser.getTok(); 2181 if (Tok.isNot(AsmToken::Identifier)) return -1; 2182 2183 std::string lowerCase = Tok.getString().lower(); 2184 unsigned RegNum = MatchRegisterName(lowerCase); 2185 if (!RegNum) { 2186 RegNum = StringSwitch<unsigned>(lowerCase) 2187 .Case("r13", ARM::SP) 2188 .Case("r14", ARM::LR) 2189 .Case("r15", ARM::PC) 2190 .Case("ip", ARM::R12) 2191 // Additional register name aliases for 'gas' compatibility. 2192 .Case("a1", ARM::R0) 2193 .Case("a2", ARM::R1) 2194 .Case("a3", ARM::R2) 2195 .Case("a4", ARM::R3) 2196 .Case("v1", ARM::R4) 2197 .Case("v2", ARM::R5) 2198 .Case("v3", ARM::R6) 2199 .Case("v4", ARM::R7) 2200 .Case("v5", ARM::R8) 2201 .Case("v6", ARM::R9) 2202 .Case("v7", ARM::R10) 2203 .Case("v8", ARM::R11) 2204 .Case("sb", ARM::R9) 2205 .Case("sl", ARM::R10) 2206 .Case("fp", ARM::R11) 2207 .Default(0); 2208 } 2209 if (!RegNum) return -1; 2210 2211 Parser.Lex(); // Eat identifier token. 2212 2213 return RegNum; 2214} 2215 2216// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2217// If a recoverable error occurs, return 1. If an irrecoverable error 2218// occurs, return -1. An irrecoverable error is one where tokens have been 2219// consumed in the process of trying to parse the shifter (i.e., when it is 2220// indeed a shifter operand, but malformed). 2221int ARMAsmParser::tryParseShiftRegister( 2222 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2223 SMLoc S = Parser.getTok().getLoc(); 2224 const AsmToken &Tok = Parser.getTok(); 2225 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2226 2227 std::string lowerCase = Tok.getString().lower(); 2228 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2229 .Case("asl", ARM_AM::lsl) 2230 .Case("lsl", ARM_AM::lsl) 2231 .Case("lsr", ARM_AM::lsr) 2232 .Case("asr", ARM_AM::asr) 2233 .Case("ror", ARM_AM::ror) 2234 .Case("rrx", ARM_AM::rrx) 2235 .Default(ARM_AM::no_shift); 2236 2237 if (ShiftTy == ARM_AM::no_shift) 2238 return 1; 2239 2240 Parser.Lex(); // Eat the operator. 2241 2242 // The source register for the shift has already been added to the 2243 // operand list, so we need to pop it off and combine it into the shifted 2244 // register operand instead. 2245 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2246 if (!PrevOp->isReg()) 2247 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2248 int SrcReg = PrevOp->getReg(); 2249 int64_t Imm = 0; 2250 int ShiftReg = 0; 2251 if (ShiftTy == ARM_AM::rrx) { 2252 // RRX Doesn't have an explicit shift amount. The encoder expects 2253 // the shift register to be the same as the source register. Seems odd, 2254 // but OK. 2255 ShiftReg = SrcReg; 2256 } else { 2257 // Figure out if this is shifted by a constant or a register (for non-RRX). 2258 if (Parser.getTok().is(AsmToken::Hash) || 2259 Parser.getTok().is(AsmToken::Dollar)) { 2260 Parser.Lex(); // Eat hash. 2261 SMLoc ImmLoc = Parser.getTok().getLoc(); 2262 const MCExpr *ShiftExpr = 0; 2263 if (getParser().ParseExpression(ShiftExpr)) { 2264 Error(ImmLoc, "invalid immediate shift value"); 2265 return -1; 2266 } 2267 // The expression must be evaluatable as an immediate. 2268 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2269 if (!CE) { 2270 Error(ImmLoc, "invalid immediate shift value"); 2271 return -1; 2272 } 2273 // Range check the immediate. 2274 // lsl, ror: 0 <= imm <= 31 2275 // lsr, asr: 0 <= imm <= 32 2276 Imm = CE->getValue(); 2277 if (Imm < 0 || 2278 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2279 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2280 Error(ImmLoc, "immediate shift value out of range"); 2281 return -1; 2282 } 2283 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2284 ShiftReg = tryParseRegister(); 2285 SMLoc L = Parser.getTok().getLoc(); 2286 if (ShiftReg == -1) { 2287 Error (L, "expected immediate or register in shift operand"); 2288 return -1; 2289 } 2290 } else { 2291 Error (Parser.getTok().getLoc(), 2292 "expected immediate or register in shift operand"); 2293 return -1; 2294 } 2295 } 2296 2297 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2298 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2299 ShiftReg, Imm, 2300 S, Parser.getTok().getLoc())); 2301 else 2302 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2303 S, Parser.getTok().getLoc())); 2304 2305 return 0; 2306} 2307 2308 2309/// Try to parse a register name. The token must be an Identifier when called. 2310/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2311/// if there is a "writeback". 'true' if it's not a register. 2312/// 2313/// TODO this is likely to change to allow different register types and or to 2314/// parse for a specific register type. 2315bool ARMAsmParser:: 2316tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2317 SMLoc S = Parser.getTok().getLoc(); 2318 int RegNo = tryParseRegister(); 2319 if (RegNo == -1) 2320 return true; 2321 2322 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2323 2324 const AsmToken &ExclaimTok = Parser.getTok(); 2325 if (ExclaimTok.is(AsmToken::Exclaim)) { 2326 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2327 ExclaimTok.getLoc())); 2328 Parser.Lex(); // Eat exclaim token 2329 return false; 2330 } 2331 2332 // Also check for an index operand. This is only legal for vector registers, 2333 // but that'll get caught OK in operand matching, so we don't need to 2334 // explicitly filter everything else out here. 2335 if (Parser.getTok().is(AsmToken::LBrac)) { 2336 SMLoc SIdx = Parser.getTok().getLoc(); 2337 Parser.Lex(); // Eat left bracket token. 2338 2339 const MCExpr *ImmVal; 2340 if (getParser().ParseExpression(ImmVal)) 2341 return MatchOperand_ParseFail; 2342 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2343 if (!MCE) { 2344 TokError("immediate value expected for vector index"); 2345 return MatchOperand_ParseFail; 2346 } 2347 2348 SMLoc E = Parser.getTok().getLoc(); 2349 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2350 Error(E, "']' expected"); 2351 return MatchOperand_ParseFail; 2352 } 2353 2354 Parser.Lex(); // Eat right bracket token. 2355 2356 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2357 SIdx, E, 2358 getContext())); 2359 } 2360 2361 return false; 2362} 2363 2364/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2365/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2366/// "c5", ... 2367static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2368 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2369 // but efficient. 2370 switch (Name.size()) { 2371 default: break; 2372 case 2: 2373 if (Name[0] != CoprocOp) 2374 return -1; 2375 switch (Name[1]) { 2376 default: return -1; 2377 case '0': return 0; 2378 case '1': return 1; 2379 case '2': return 2; 2380 case '3': return 3; 2381 case '4': return 4; 2382 case '5': return 5; 2383 case '6': return 6; 2384 case '7': return 7; 2385 case '8': return 8; 2386 case '9': return 9; 2387 } 2388 break; 2389 case 3: 2390 if (Name[0] != CoprocOp || Name[1] != '1') 2391 return -1; 2392 switch (Name[2]) { 2393 default: return -1; 2394 case '0': return 10; 2395 case '1': return 11; 2396 case '2': return 12; 2397 case '3': return 13; 2398 case '4': return 14; 2399 case '5': return 15; 2400 } 2401 break; 2402 } 2403 2404 return -1; 2405} 2406 2407/// parseITCondCode - Try to parse a condition code for an IT instruction. 2408ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2409parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2410 SMLoc S = Parser.getTok().getLoc(); 2411 const AsmToken &Tok = Parser.getTok(); 2412 if (!Tok.is(AsmToken::Identifier)) 2413 return MatchOperand_NoMatch; 2414 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2415 .Case("eq", ARMCC::EQ) 2416 .Case("ne", ARMCC::NE) 2417 .Case("hs", ARMCC::HS) 2418 .Case("cs", ARMCC::HS) 2419 .Case("lo", ARMCC::LO) 2420 .Case("cc", ARMCC::LO) 2421 .Case("mi", ARMCC::MI) 2422 .Case("pl", ARMCC::PL) 2423 .Case("vs", ARMCC::VS) 2424 .Case("vc", ARMCC::VC) 2425 .Case("hi", ARMCC::HI) 2426 .Case("ls", ARMCC::LS) 2427 .Case("ge", ARMCC::GE) 2428 .Case("lt", ARMCC::LT) 2429 .Case("gt", ARMCC::GT) 2430 .Case("le", ARMCC::LE) 2431 .Case("al", ARMCC::AL) 2432 .Default(~0U); 2433 if (CC == ~0U) 2434 return MatchOperand_NoMatch; 2435 Parser.Lex(); // Eat the token. 2436 2437 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2438 2439 return MatchOperand_Success; 2440} 2441 2442/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2443/// token must be an Identifier when called, and if it is a coprocessor 2444/// number, the token is eaten and the operand is added to the operand list. 2445ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2446parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2447 SMLoc S = Parser.getTok().getLoc(); 2448 const AsmToken &Tok = Parser.getTok(); 2449 if (Tok.isNot(AsmToken::Identifier)) 2450 return MatchOperand_NoMatch; 2451 2452 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2453 if (Num == -1) 2454 return MatchOperand_NoMatch; 2455 2456 Parser.Lex(); // Eat identifier token. 2457 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2458 return MatchOperand_Success; 2459} 2460 2461/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2462/// token must be an Identifier when called, and if it is a coprocessor 2463/// number, the token is eaten and the operand is added to the operand list. 2464ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2465parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2466 SMLoc S = Parser.getTok().getLoc(); 2467 const AsmToken &Tok = Parser.getTok(); 2468 if (Tok.isNot(AsmToken::Identifier)) 2469 return MatchOperand_NoMatch; 2470 2471 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2472 if (Reg == -1) 2473 return MatchOperand_NoMatch; 2474 2475 Parser.Lex(); // Eat identifier token. 2476 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2477 return MatchOperand_Success; 2478} 2479 2480/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2481/// coproc_option : '{' imm0_255 '}' 2482ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2483parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2484 SMLoc S = Parser.getTok().getLoc(); 2485 2486 // If this isn't a '{', this isn't a coprocessor immediate operand. 2487 if (Parser.getTok().isNot(AsmToken::LCurly)) 2488 return MatchOperand_NoMatch; 2489 Parser.Lex(); // Eat the '{' 2490 2491 const MCExpr *Expr; 2492 SMLoc Loc = Parser.getTok().getLoc(); 2493 if (getParser().ParseExpression(Expr)) { 2494 Error(Loc, "illegal expression"); 2495 return MatchOperand_ParseFail; 2496 } 2497 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2498 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2499 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2500 return MatchOperand_ParseFail; 2501 } 2502 int Val = CE->getValue(); 2503 2504 // Check for and consume the closing '}' 2505 if (Parser.getTok().isNot(AsmToken::RCurly)) 2506 return MatchOperand_ParseFail; 2507 SMLoc E = Parser.getTok().getLoc(); 2508 Parser.Lex(); // Eat the '}' 2509 2510 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2511 return MatchOperand_Success; 2512} 2513 2514// For register list parsing, we need to map from raw GPR register numbering 2515// to the enumeration values. The enumeration values aren't sorted by 2516// register number due to our using "sp", "lr" and "pc" as canonical names. 2517static unsigned getNextRegister(unsigned Reg) { 2518 // If this is a GPR, we need to do it manually, otherwise we can rely 2519 // on the sort ordering of the enumeration since the other reg-classes 2520 // are sane. 2521 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2522 return Reg + 1; 2523 switch(Reg) { 2524 default: assert(0 && "Invalid GPR number!"); 2525 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2526 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2527 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2528 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2529 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2530 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2531 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2532 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2533 } 2534} 2535 2536// Return the low-subreg of a given Q register. 2537static unsigned getDRegFromQReg(unsigned QReg) { 2538 switch (QReg) { 2539 default: llvm_unreachable("expected a Q register!"); 2540 case ARM::Q0: return ARM::D0; 2541 case ARM::Q1: return ARM::D2; 2542 case ARM::Q2: return ARM::D4; 2543 case ARM::Q3: return ARM::D6; 2544 case ARM::Q4: return ARM::D8; 2545 case ARM::Q5: return ARM::D10; 2546 case ARM::Q6: return ARM::D12; 2547 case ARM::Q7: return ARM::D14; 2548 case ARM::Q8: return ARM::D16; 2549 case ARM::Q9: return ARM::D18; 2550 case ARM::Q10: return ARM::D20; 2551 case ARM::Q11: return ARM::D22; 2552 case ARM::Q12: return ARM::D24; 2553 case ARM::Q13: return ARM::D26; 2554 case ARM::Q14: return ARM::D28; 2555 case ARM::Q15: return ARM::D30; 2556 } 2557} 2558 2559/// Parse a register list. 2560bool ARMAsmParser:: 2561parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2562 assert(Parser.getTok().is(AsmToken::LCurly) && 2563 "Token is not a Left Curly Brace"); 2564 SMLoc S = Parser.getTok().getLoc(); 2565 Parser.Lex(); // Eat '{' token. 2566 SMLoc RegLoc = Parser.getTok().getLoc(); 2567 2568 // Check the first register in the list to see what register class 2569 // this is a list of. 2570 int Reg = tryParseRegister(); 2571 if (Reg == -1) 2572 return Error(RegLoc, "register expected"); 2573 2574 // The reglist instructions have at most 16 registers, so reserve 2575 // space for that many. 2576 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2577 2578 // Allow Q regs and just interpret them as the two D sub-registers. 2579 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2580 Reg = getDRegFromQReg(Reg); 2581 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2582 ++Reg; 2583 } 2584 const MCRegisterClass *RC; 2585 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2586 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2587 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2588 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2589 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2590 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2591 else 2592 return Error(RegLoc, "invalid register in register list"); 2593 2594 // Store the register. 2595 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2596 2597 // This starts immediately after the first register token in the list, 2598 // so we can see either a comma or a minus (range separator) as a legal 2599 // next token. 2600 while (Parser.getTok().is(AsmToken::Comma) || 2601 Parser.getTok().is(AsmToken::Minus)) { 2602 if (Parser.getTok().is(AsmToken::Minus)) { 2603 Parser.Lex(); // Eat the minus. 2604 SMLoc EndLoc = Parser.getTok().getLoc(); 2605 int EndReg = tryParseRegister(); 2606 if (EndReg == -1) 2607 return Error(EndLoc, "register expected"); 2608 // Allow Q regs and just interpret them as the two D sub-registers. 2609 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2610 EndReg = getDRegFromQReg(EndReg) + 1; 2611 // If the register is the same as the start reg, there's nothing 2612 // more to do. 2613 if (Reg == EndReg) 2614 continue; 2615 // The register must be in the same register class as the first. 2616 if (!RC->contains(EndReg)) 2617 return Error(EndLoc, "invalid register in register list"); 2618 // Ranges must go from low to high. 2619 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2620 return Error(EndLoc, "bad range in register list"); 2621 2622 // Add all the registers in the range to the register list. 2623 while (Reg != EndReg) { 2624 Reg = getNextRegister(Reg); 2625 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2626 } 2627 continue; 2628 } 2629 Parser.Lex(); // Eat the comma. 2630 RegLoc = Parser.getTok().getLoc(); 2631 int OldReg = Reg; 2632 const AsmToken RegTok = Parser.getTok(); 2633 Reg = tryParseRegister(); 2634 if (Reg == -1) 2635 return Error(RegLoc, "register expected"); 2636 // Allow Q regs and just interpret them as the two D sub-registers. 2637 bool isQReg = false; 2638 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2639 Reg = getDRegFromQReg(Reg); 2640 isQReg = true; 2641 } 2642 // The register must be in the same register class as the first. 2643 if (!RC->contains(Reg)) 2644 return Error(RegLoc, "invalid register in register list"); 2645 // List must be monotonically increasing. 2646 if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) 2647 return Error(RegLoc, "register list not in ascending order"); 2648 if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) { 2649 Warning(RegLoc, "duplicated register (" + RegTok.getString() + 2650 ") in register list"); 2651 continue; 2652 } 2653 // VFP register lists must also be contiguous. 2654 // It's OK to use the enumeration values directly here rather, as the 2655 // VFP register classes have the enum sorted properly. 2656 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2657 Reg != OldReg + 1) 2658 return Error(RegLoc, "non-contiguous register range"); 2659 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2660 if (isQReg) 2661 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2662 } 2663 2664 SMLoc E = Parser.getTok().getLoc(); 2665 if (Parser.getTok().isNot(AsmToken::RCurly)) 2666 return Error(E, "'}' expected"); 2667 Parser.Lex(); // Eat '}' token. 2668 2669 // Push the register list operand. 2670 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2671 2672 // The ARM system instruction variants for LDM/STM have a '^' token here. 2673 if (Parser.getTok().is(AsmToken::Caret)) { 2674 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); 2675 Parser.Lex(); // Eat '^' token. 2676 } 2677 2678 return false; 2679} 2680 2681// Helper function to parse the lane index for vector lists. 2682ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2683parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) { 2684 Index = 0; // Always return a defined index value. 2685 if (Parser.getTok().is(AsmToken::LBrac)) { 2686 Parser.Lex(); // Eat the '['. 2687 if (Parser.getTok().is(AsmToken::RBrac)) { 2688 // "Dn[]" is the 'all lanes' syntax. 2689 LaneKind = AllLanes; 2690 Parser.Lex(); // Eat the ']'. 2691 return MatchOperand_Success; 2692 } 2693 if (Parser.getTok().is(AsmToken::Integer)) { 2694 int64_t Val = Parser.getTok().getIntVal(); 2695 // Make this range check context sensitive for .8, .16, .32. 2696 if (Val < 0 && Val > 7) 2697 Error(Parser.getTok().getLoc(), "lane index out of range"); 2698 Index = Val; 2699 LaneKind = IndexedLane; 2700 Parser.Lex(); // Eat the token; 2701 if (Parser.getTok().isNot(AsmToken::RBrac)) 2702 Error(Parser.getTok().getLoc(), "']' expected"); 2703 Parser.Lex(); // Eat the ']'. 2704 return MatchOperand_Success; 2705 } 2706 Error(Parser.getTok().getLoc(), "lane index must be empty or an integer"); 2707 return MatchOperand_ParseFail; 2708 } 2709 LaneKind = NoLanes; 2710 return MatchOperand_Success; 2711} 2712 2713// parse a vector register list 2714ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2715parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2716 VectorLaneTy LaneKind; 2717 unsigned LaneIndex; 2718 SMLoc S = Parser.getTok().getLoc(); 2719 // As an extension (to match gas), support a plain D register or Q register 2720 // (without encosing curly braces) as a single or double entry list, 2721 // respectively. 2722 if (Parser.getTok().is(AsmToken::Identifier)) { 2723 int Reg = tryParseRegister(); 2724 if (Reg == -1) 2725 return MatchOperand_NoMatch; 2726 SMLoc E = Parser.getTok().getLoc(); 2727 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 2728 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2729 if (Res != MatchOperand_Success) 2730 return Res; 2731 switch (LaneKind) { 2732 default: 2733 assert(0 && "unexpected lane kind!"); 2734 case NoLanes: 2735 E = Parser.getTok().getLoc(); 2736 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E)); 2737 break; 2738 case AllLanes: 2739 E = Parser.getTok().getLoc(); 2740 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E)); 2741 break; 2742 case IndexedLane: 2743 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 2744 LaneIndex, S,E)); 2745 break; 2746 } 2747 return MatchOperand_Success; 2748 } 2749 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2750 Reg = getDRegFromQReg(Reg); 2751 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2752 if (Res != MatchOperand_Success) 2753 return Res; 2754 switch (LaneKind) { 2755 default: 2756 assert(0 && "unexpected lane kind!"); 2757 case NoLanes: 2758 E = Parser.getTok().getLoc(); 2759 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E)); 2760 break; 2761 case AllLanes: 2762 E = Parser.getTok().getLoc(); 2763 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E)); 2764 break; 2765 case IndexedLane: 2766 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 2767 LaneIndex, S,E)); 2768 break; 2769 } 2770 return MatchOperand_Success; 2771 } 2772 Error(S, "vector register expected"); 2773 return MatchOperand_ParseFail; 2774 } 2775 2776 if (Parser.getTok().isNot(AsmToken::LCurly)) 2777 return MatchOperand_NoMatch; 2778 2779 Parser.Lex(); // Eat '{' token. 2780 SMLoc RegLoc = Parser.getTok().getLoc(); 2781 2782 int Reg = tryParseRegister(); 2783 if (Reg == -1) { 2784 Error(RegLoc, "register expected"); 2785 return MatchOperand_ParseFail; 2786 } 2787 unsigned Count = 1; 2788 unsigned FirstReg = Reg; 2789 // The list is of D registers, but we also allow Q regs and just interpret 2790 // them as the two D sub-registers. 2791 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2792 FirstReg = Reg = getDRegFromQReg(Reg); 2793 ++Reg; 2794 ++Count; 2795 } 2796 if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success) 2797 return MatchOperand_ParseFail; 2798 2799 while (Parser.getTok().is(AsmToken::Comma) || 2800 Parser.getTok().is(AsmToken::Minus)) { 2801 if (Parser.getTok().is(AsmToken::Minus)) { 2802 Parser.Lex(); // Eat the minus. 2803 SMLoc EndLoc = Parser.getTok().getLoc(); 2804 int EndReg = tryParseRegister(); 2805 if (EndReg == -1) { 2806 Error(EndLoc, "register expected"); 2807 return MatchOperand_ParseFail; 2808 } 2809 // Allow Q regs and just interpret them as the two D sub-registers. 2810 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2811 EndReg = getDRegFromQReg(EndReg) + 1; 2812 // If the register is the same as the start reg, there's nothing 2813 // more to do. 2814 if (Reg == EndReg) 2815 continue; 2816 // The register must be in the same register class as the first. 2817 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 2818 Error(EndLoc, "invalid register in register list"); 2819 return MatchOperand_ParseFail; 2820 } 2821 // Ranges must go from low to high. 2822 if (Reg > EndReg) { 2823 Error(EndLoc, "bad range in register list"); 2824 return MatchOperand_ParseFail; 2825 } 2826 // Parse the lane specifier if present. 2827 VectorLaneTy NextLaneKind; 2828 unsigned NextLaneIndex; 2829 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2830 return MatchOperand_ParseFail; 2831 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2832 Error(EndLoc, "mismatched lane index in register list"); 2833 return MatchOperand_ParseFail; 2834 } 2835 EndLoc = Parser.getTok().getLoc(); 2836 2837 // Add all the registers in the range to the register list. 2838 Count += EndReg - Reg; 2839 Reg = EndReg; 2840 continue; 2841 } 2842 Parser.Lex(); // Eat the comma. 2843 RegLoc = Parser.getTok().getLoc(); 2844 int OldReg = Reg; 2845 Reg = tryParseRegister(); 2846 if (Reg == -1) { 2847 Error(RegLoc, "register expected"); 2848 return MatchOperand_ParseFail; 2849 } 2850 // vector register lists must be contiguous. 2851 // It's OK to use the enumeration values directly here rather, as the 2852 // VFP register classes have the enum sorted properly. 2853 // 2854 // The list is of D registers, but we also allow Q regs and just interpret 2855 // them as the two D sub-registers. 2856 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2857 Reg = getDRegFromQReg(Reg); 2858 if (Reg != OldReg + 1) { 2859 Error(RegLoc, "non-contiguous register range"); 2860 return MatchOperand_ParseFail; 2861 } 2862 ++Reg; 2863 Count += 2; 2864 // Parse the lane specifier if present. 2865 VectorLaneTy NextLaneKind; 2866 unsigned NextLaneIndex; 2867 SMLoc EndLoc = Parser.getTok().getLoc(); 2868 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2869 return MatchOperand_ParseFail; 2870 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2871 Error(EndLoc, "mismatched lane index in register list"); 2872 return MatchOperand_ParseFail; 2873 } 2874 continue; 2875 } 2876 // Normal D register. Just check that it's contiguous and keep going. 2877 if (Reg != OldReg + 1) { 2878 Error(RegLoc, "non-contiguous register range"); 2879 return MatchOperand_ParseFail; 2880 } 2881 ++Count; 2882 // Parse the lane specifier if present. 2883 VectorLaneTy NextLaneKind; 2884 unsigned NextLaneIndex; 2885 SMLoc EndLoc = Parser.getTok().getLoc(); 2886 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2887 return MatchOperand_ParseFail; 2888 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2889 Error(EndLoc, "mismatched lane index in register list"); 2890 return MatchOperand_ParseFail; 2891 } 2892 } 2893 2894 SMLoc E = Parser.getTok().getLoc(); 2895 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2896 Error(E, "'}' expected"); 2897 return MatchOperand_ParseFail; 2898 } 2899 Parser.Lex(); // Eat '}' token. 2900 2901 switch (LaneKind) { 2902 default: 2903 assert(0 && "unexpected lane kind in register list."); 2904 case NoLanes: 2905 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2906 break; 2907 case AllLanes: 2908 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 2909 S, E)); 2910 break; 2911 case IndexedLane: 2912 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 2913 LaneIndex, S, E)); 2914 break; 2915 } 2916 return MatchOperand_Success; 2917} 2918 2919/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2920ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2921parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2922 SMLoc S = Parser.getTok().getLoc(); 2923 const AsmToken &Tok = Parser.getTok(); 2924 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2925 StringRef OptStr = Tok.getString(); 2926 2927 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2928 .Case("sy", ARM_MB::SY) 2929 .Case("st", ARM_MB::ST) 2930 .Case("sh", ARM_MB::ISH) 2931 .Case("ish", ARM_MB::ISH) 2932 .Case("shst", ARM_MB::ISHST) 2933 .Case("ishst", ARM_MB::ISHST) 2934 .Case("nsh", ARM_MB::NSH) 2935 .Case("un", ARM_MB::NSH) 2936 .Case("nshst", ARM_MB::NSHST) 2937 .Case("unst", ARM_MB::NSHST) 2938 .Case("osh", ARM_MB::OSH) 2939 .Case("oshst", ARM_MB::OSHST) 2940 .Default(~0U); 2941 2942 if (Opt == ~0U) 2943 return MatchOperand_NoMatch; 2944 2945 Parser.Lex(); // Eat identifier token. 2946 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2947 return MatchOperand_Success; 2948} 2949 2950/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2951ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2952parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2953 SMLoc S = Parser.getTok().getLoc(); 2954 const AsmToken &Tok = Parser.getTok(); 2955 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2956 StringRef IFlagsStr = Tok.getString(); 2957 2958 // An iflags string of "none" is interpreted to mean that none of the AIF 2959 // bits are set. Not a terribly useful instruction, but a valid encoding. 2960 unsigned IFlags = 0; 2961 if (IFlagsStr != "none") { 2962 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2963 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2964 .Case("a", ARM_PROC::A) 2965 .Case("i", ARM_PROC::I) 2966 .Case("f", ARM_PROC::F) 2967 .Default(~0U); 2968 2969 // If some specific iflag is already set, it means that some letter is 2970 // present more than once, this is not acceptable. 2971 if (Flag == ~0U || (IFlags & Flag)) 2972 return MatchOperand_NoMatch; 2973 2974 IFlags |= Flag; 2975 } 2976 } 2977 2978 Parser.Lex(); // Eat identifier token. 2979 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2980 return MatchOperand_Success; 2981} 2982 2983/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2984ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2985parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2986 SMLoc S = Parser.getTok().getLoc(); 2987 const AsmToken &Tok = Parser.getTok(); 2988 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2989 StringRef Mask = Tok.getString(); 2990 2991 if (isMClass()) { 2992 // See ARMv6-M 10.1.1 2993 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2994 .Case("apsr", 0) 2995 .Case("iapsr", 1) 2996 .Case("eapsr", 2) 2997 .Case("xpsr", 3) 2998 .Case("ipsr", 5) 2999 .Case("epsr", 6) 3000 .Case("iepsr", 7) 3001 .Case("msp", 8) 3002 .Case("psp", 9) 3003 .Case("primask", 16) 3004 .Case("basepri", 17) 3005 .Case("basepri_max", 18) 3006 .Case("faultmask", 19) 3007 .Case("control", 20) 3008 .Default(~0U); 3009 3010 if (FlagsVal == ~0U) 3011 return MatchOperand_NoMatch; 3012 3013 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 3014 // basepri, basepri_max and faultmask only valid for V7m. 3015 return MatchOperand_NoMatch; 3016 3017 Parser.Lex(); // Eat identifier token. 3018 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3019 return MatchOperand_Success; 3020 } 3021 3022 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 3023 size_t Start = 0, Next = Mask.find('_'); 3024 StringRef Flags = ""; 3025 std::string SpecReg = Mask.slice(Start, Next).lower(); 3026 if (Next != StringRef::npos) 3027 Flags = Mask.slice(Next+1, Mask.size()); 3028 3029 // FlagsVal contains the complete mask: 3030 // 3-0: Mask 3031 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3032 unsigned FlagsVal = 0; 3033 3034 if (SpecReg == "apsr") { 3035 FlagsVal = StringSwitch<unsigned>(Flags) 3036 .Case("nzcvq", 0x8) // same as CPSR_f 3037 .Case("g", 0x4) // same as CPSR_s 3038 .Case("nzcvqg", 0xc) // same as CPSR_fs 3039 .Default(~0U); 3040 3041 if (FlagsVal == ~0U) { 3042 if (!Flags.empty()) 3043 return MatchOperand_NoMatch; 3044 else 3045 FlagsVal = 8; // No flag 3046 } 3047 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 3048 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 3049 Flags = "fc"; 3050 for (int i = 0, e = Flags.size(); i != e; ++i) { 3051 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 3052 .Case("c", 1) 3053 .Case("x", 2) 3054 .Case("s", 4) 3055 .Case("f", 8) 3056 .Default(~0U); 3057 3058 // If some specific flag is already set, it means that some letter is 3059 // present more than once, this is not acceptable. 3060 if (FlagsVal == ~0U || (FlagsVal & Flag)) 3061 return MatchOperand_NoMatch; 3062 FlagsVal |= Flag; 3063 } 3064 } else // No match for special register. 3065 return MatchOperand_NoMatch; 3066 3067 // Special register without flags is NOT equivalent to "fc" flags. 3068 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 3069 // two lines would enable gas compatibility at the expense of breaking 3070 // round-tripping. 3071 // 3072 // if (!FlagsVal) 3073 // FlagsVal = 0x9; 3074 3075 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3076 if (SpecReg == "spsr") 3077 FlagsVal |= 16; 3078 3079 Parser.Lex(); // Eat identifier token. 3080 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3081 return MatchOperand_Success; 3082} 3083 3084ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3085parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 3086 int Low, int High) { 3087 const AsmToken &Tok = Parser.getTok(); 3088 if (Tok.isNot(AsmToken::Identifier)) { 3089 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3090 return MatchOperand_ParseFail; 3091 } 3092 StringRef ShiftName = Tok.getString(); 3093 std::string LowerOp = Op.lower(); 3094 std::string UpperOp = Op.upper(); 3095 if (ShiftName != LowerOp && ShiftName != UpperOp) { 3096 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3097 return MatchOperand_ParseFail; 3098 } 3099 Parser.Lex(); // Eat shift type token. 3100 3101 // There must be a '#' and a shift amount. 3102 if (Parser.getTok().isNot(AsmToken::Hash) && 3103 Parser.getTok().isNot(AsmToken::Dollar)) { 3104 Error(Parser.getTok().getLoc(), "'#' expected"); 3105 return MatchOperand_ParseFail; 3106 } 3107 Parser.Lex(); // Eat hash token. 3108 3109 const MCExpr *ShiftAmount; 3110 SMLoc Loc = Parser.getTok().getLoc(); 3111 if (getParser().ParseExpression(ShiftAmount)) { 3112 Error(Loc, "illegal expression"); 3113 return MatchOperand_ParseFail; 3114 } 3115 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3116 if (!CE) { 3117 Error(Loc, "constant expression expected"); 3118 return MatchOperand_ParseFail; 3119 } 3120 int Val = CE->getValue(); 3121 if (Val < Low || Val > High) { 3122 Error(Loc, "immediate value out of range"); 3123 return MatchOperand_ParseFail; 3124 } 3125 3126 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 3127 3128 return MatchOperand_Success; 3129} 3130 3131ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3132parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3133 const AsmToken &Tok = Parser.getTok(); 3134 SMLoc S = Tok.getLoc(); 3135 if (Tok.isNot(AsmToken::Identifier)) { 3136 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3137 return MatchOperand_ParseFail; 3138 } 3139 int Val = StringSwitch<int>(Tok.getString()) 3140 .Case("be", 1) 3141 .Case("le", 0) 3142 .Default(-1); 3143 Parser.Lex(); // Eat the token. 3144 3145 if (Val == -1) { 3146 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3147 return MatchOperand_ParseFail; 3148 } 3149 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 3150 getContext()), 3151 S, Parser.getTok().getLoc())); 3152 return MatchOperand_Success; 3153} 3154 3155/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 3156/// instructions. Legal values are: 3157/// lsl #n 'n' in [0,31] 3158/// asr #n 'n' in [1,32] 3159/// n == 32 encoded as n == 0. 3160ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3161parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3162 const AsmToken &Tok = Parser.getTok(); 3163 SMLoc S = Tok.getLoc(); 3164 if (Tok.isNot(AsmToken::Identifier)) { 3165 Error(S, "shift operator 'asr' or 'lsl' expected"); 3166 return MatchOperand_ParseFail; 3167 } 3168 StringRef ShiftName = Tok.getString(); 3169 bool isASR; 3170 if (ShiftName == "lsl" || ShiftName == "LSL") 3171 isASR = false; 3172 else if (ShiftName == "asr" || ShiftName == "ASR") 3173 isASR = true; 3174 else { 3175 Error(S, "shift operator 'asr' or 'lsl' expected"); 3176 return MatchOperand_ParseFail; 3177 } 3178 Parser.Lex(); // Eat the operator. 3179 3180 // A '#' and a shift amount. 3181 if (Parser.getTok().isNot(AsmToken::Hash) && 3182 Parser.getTok().isNot(AsmToken::Dollar)) { 3183 Error(Parser.getTok().getLoc(), "'#' expected"); 3184 return MatchOperand_ParseFail; 3185 } 3186 Parser.Lex(); // Eat hash token. 3187 3188 const MCExpr *ShiftAmount; 3189 SMLoc E = Parser.getTok().getLoc(); 3190 if (getParser().ParseExpression(ShiftAmount)) { 3191 Error(E, "malformed shift expression"); 3192 return MatchOperand_ParseFail; 3193 } 3194 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3195 if (!CE) { 3196 Error(E, "shift amount must be an immediate"); 3197 return MatchOperand_ParseFail; 3198 } 3199 3200 int64_t Val = CE->getValue(); 3201 if (isASR) { 3202 // Shift amount must be in [1,32] 3203 if (Val < 1 || Val > 32) { 3204 Error(E, "'asr' shift amount must be in range [1,32]"); 3205 return MatchOperand_ParseFail; 3206 } 3207 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 3208 if (isThumb() && Val == 32) { 3209 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 3210 return MatchOperand_ParseFail; 3211 } 3212 if (Val == 32) Val = 0; 3213 } else { 3214 // Shift amount must be in [1,32] 3215 if (Val < 0 || Val > 31) { 3216 Error(E, "'lsr' shift amount must be in range [0,31]"); 3217 return MatchOperand_ParseFail; 3218 } 3219 } 3220 3221 E = Parser.getTok().getLoc(); 3222 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 3223 3224 return MatchOperand_Success; 3225} 3226 3227/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 3228/// of instructions. Legal values are: 3229/// ror #n 'n' in {0, 8, 16, 24} 3230ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3231parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3232 const AsmToken &Tok = Parser.getTok(); 3233 SMLoc S = Tok.getLoc(); 3234 if (Tok.isNot(AsmToken::Identifier)) 3235 return MatchOperand_NoMatch; 3236 StringRef ShiftName = Tok.getString(); 3237 if (ShiftName != "ror" && ShiftName != "ROR") 3238 return MatchOperand_NoMatch; 3239 Parser.Lex(); // Eat the operator. 3240 3241 // A '#' and a rotate amount. 3242 if (Parser.getTok().isNot(AsmToken::Hash) && 3243 Parser.getTok().isNot(AsmToken::Dollar)) { 3244 Error(Parser.getTok().getLoc(), "'#' expected"); 3245 return MatchOperand_ParseFail; 3246 } 3247 Parser.Lex(); // Eat hash token. 3248 3249 const MCExpr *ShiftAmount; 3250 SMLoc E = Parser.getTok().getLoc(); 3251 if (getParser().ParseExpression(ShiftAmount)) { 3252 Error(E, "malformed rotate expression"); 3253 return MatchOperand_ParseFail; 3254 } 3255 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3256 if (!CE) { 3257 Error(E, "rotate amount must be an immediate"); 3258 return MatchOperand_ParseFail; 3259 } 3260 3261 int64_t Val = CE->getValue(); 3262 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 3263 // normally, zero is represented in asm by omitting the rotate operand 3264 // entirely. 3265 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 3266 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 3267 return MatchOperand_ParseFail; 3268 } 3269 3270 E = Parser.getTok().getLoc(); 3271 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 3272 3273 return MatchOperand_Success; 3274} 3275 3276ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3277parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3278 SMLoc S = Parser.getTok().getLoc(); 3279 // The bitfield descriptor is really two operands, the LSB and the width. 3280 if (Parser.getTok().isNot(AsmToken::Hash) && 3281 Parser.getTok().isNot(AsmToken::Dollar)) { 3282 Error(Parser.getTok().getLoc(), "'#' expected"); 3283 return MatchOperand_ParseFail; 3284 } 3285 Parser.Lex(); // Eat hash token. 3286 3287 const MCExpr *LSBExpr; 3288 SMLoc E = Parser.getTok().getLoc(); 3289 if (getParser().ParseExpression(LSBExpr)) { 3290 Error(E, "malformed immediate expression"); 3291 return MatchOperand_ParseFail; 3292 } 3293 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 3294 if (!CE) { 3295 Error(E, "'lsb' operand must be an immediate"); 3296 return MatchOperand_ParseFail; 3297 } 3298 3299 int64_t LSB = CE->getValue(); 3300 // The LSB must be in the range [0,31] 3301 if (LSB < 0 || LSB > 31) { 3302 Error(E, "'lsb' operand must be in the range [0,31]"); 3303 return MatchOperand_ParseFail; 3304 } 3305 E = Parser.getTok().getLoc(); 3306 3307 // Expect another immediate operand. 3308 if (Parser.getTok().isNot(AsmToken::Comma)) { 3309 Error(Parser.getTok().getLoc(), "too few operands"); 3310 return MatchOperand_ParseFail; 3311 } 3312 Parser.Lex(); // Eat hash token. 3313 if (Parser.getTok().isNot(AsmToken::Hash) && 3314 Parser.getTok().isNot(AsmToken::Dollar)) { 3315 Error(Parser.getTok().getLoc(), "'#' expected"); 3316 return MatchOperand_ParseFail; 3317 } 3318 Parser.Lex(); // Eat hash token. 3319 3320 const MCExpr *WidthExpr; 3321 if (getParser().ParseExpression(WidthExpr)) { 3322 Error(E, "malformed immediate expression"); 3323 return MatchOperand_ParseFail; 3324 } 3325 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3326 if (!CE) { 3327 Error(E, "'width' operand must be an immediate"); 3328 return MatchOperand_ParseFail; 3329 } 3330 3331 int64_t Width = CE->getValue(); 3332 // The LSB must be in the range [1,32-lsb] 3333 if (Width < 1 || Width > 32 - LSB) { 3334 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3335 return MatchOperand_ParseFail; 3336 } 3337 E = Parser.getTok().getLoc(); 3338 3339 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3340 3341 return MatchOperand_Success; 3342} 3343 3344ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3345parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3346 // Check for a post-index addressing register operand. Specifically: 3347 // postidx_reg := '+' register {, shift} 3348 // | '-' register {, shift} 3349 // | register {, shift} 3350 3351 // This method must return MatchOperand_NoMatch without consuming any tokens 3352 // in the case where there is no match, as other alternatives take other 3353 // parse methods. 3354 AsmToken Tok = Parser.getTok(); 3355 SMLoc S = Tok.getLoc(); 3356 bool haveEaten = false; 3357 bool isAdd = true; 3358 int Reg = -1; 3359 if (Tok.is(AsmToken::Plus)) { 3360 Parser.Lex(); // Eat the '+' token. 3361 haveEaten = true; 3362 } else if (Tok.is(AsmToken::Minus)) { 3363 Parser.Lex(); // Eat the '-' token. 3364 isAdd = false; 3365 haveEaten = true; 3366 } 3367 if (Parser.getTok().is(AsmToken::Identifier)) 3368 Reg = tryParseRegister(); 3369 if (Reg == -1) { 3370 if (!haveEaten) 3371 return MatchOperand_NoMatch; 3372 Error(Parser.getTok().getLoc(), "register expected"); 3373 return MatchOperand_ParseFail; 3374 } 3375 SMLoc E = Parser.getTok().getLoc(); 3376 3377 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3378 unsigned ShiftImm = 0; 3379 if (Parser.getTok().is(AsmToken::Comma)) { 3380 Parser.Lex(); // Eat the ','. 3381 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3382 return MatchOperand_ParseFail; 3383 } 3384 3385 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3386 ShiftImm, S, E)); 3387 3388 return MatchOperand_Success; 3389} 3390 3391ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3392parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3393 // Check for a post-index addressing register operand. Specifically: 3394 // am3offset := '+' register 3395 // | '-' register 3396 // | register 3397 // | # imm 3398 // | # + imm 3399 // | # - imm 3400 3401 // This method must return MatchOperand_NoMatch without consuming any tokens 3402 // in the case where there is no match, as other alternatives take other 3403 // parse methods. 3404 AsmToken Tok = Parser.getTok(); 3405 SMLoc S = Tok.getLoc(); 3406 3407 // Do immediates first, as we always parse those if we have a '#'. 3408 if (Parser.getTok().is(AsmToken::Hash) || 3409 Parser.getTok().is(AsmToken::Dollar)) { 3410 Parser.Lex(); // Eat the '#'. 3411 // Explicitly look for a '-', as we need to encode negative zero 3412 // differently. 3413 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3414 const MCExpr *Offset; 3415 if (getParser().ParseExpression(Offset)) 3416 return MatchOperand_ParseFail; 3417 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3418 if (!CE) { 3419 Error(S, "constant expression expected"); 3420 return MatchOperand_ParseFail; 3421 } 3422 SMLoc E = Tok.getLoc(); 3423 // Negative zero is encoded as the flag value INT32_MIN. 3424 int32_t Val = CE->getValue(); 3425 if (isNegative && Val == 0) 3426 Val = INT32_MIN; 3427 3428 Operands.push_back( 3429 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3430 3431 return MatchOperand_Success; 3432 } 3433 3434 3435 bool haveEaten = false; 3436 bool isAdd = true; 3437 int Reg = -1; 3438 if (Tok.is(AsmToken::Plus)) { 3439 Parser.Lex(); // Eat the '+' token. 3440 haveEaten = true; 3441 } else if (Tok.is(AsmToken::Minus)) { 3442 Parser.Lex(); // Eat the '-' token. 3443 isAdd = false; 3444 haveEaten = true; 3445 } 3446 if (Parser.getTok().is(AsmToken::Identifier)) 3447 Reg = tryParseRegister(); 3448 if (Reg == -1) { 3449 if (!haveEaten) 3450 return MatchOperand_NoMatch; 3451 Error(Parser.getTok().getLoc(), "register expected"); 3452 return MatchOperand_ParseFail; 3453 } 3454 SMLoc E = Parser.getTok().getLoc(); 3455 3456 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3457 0, S, E)); 3458 3459 return MatchOperand_Success; 3460} 3461 3462/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3463/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3464/// when they refer multiple MIOperands inside a single one. 3465bool ARMAsmParser:: 3466cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3467 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3468 // Rt, Rt2 3469 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3470 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3471 // Create a writeback register dummy placeholder. 3472 Inst.addOperand(MCOperand::CreateReg(0)); 3473 // addr 3474 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3475 // pred 3476 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3477 return true; 3478} 3479 3480/// cvtT2StrdPre - Convert parsed operands to MCInst. 3481/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3482/// when they refer multiple MIOperands inside a single one. 3483bool ARMAsmParser:: 3484cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3485 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3486 // Create a writeback register dummy placeholder. 3487 Inst.addOperand(MCOperand::CreateReg(0)); 3488 // Rt, Rt2 3489 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3490 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3491 // addr 3492 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3493 // pred 3494 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3495 return true; 3496} 3497 3498/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3499/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3500/// when they refer multiple MIOperands inside a single one. 3501bool ARMAsmParser:: 3502cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3503 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3504 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3505 3506 // Create a writeback register dummy placeholder. 3507 Inst.addOperand(MCOperand::CreateImm(0)); 3508 3509 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3510 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3511 return true; 3512} 3513 3514/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3515/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3516/// when they refer multiple MIOperands inside a single one. 3517bool ARMAsmParser:: 3518cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3519 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3520 // Create a writeback register dummy placeholder. 3521 Inst.addOperand(MCOperand::CreateImm(0)); 3522 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3523 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3524 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3525 return true; 3526} 3527 3528/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3529/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3530/// when they refer multiple MIOperands inside a single one. 3531bool ARMAsmParser:: 3532cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3533 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3534 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3535 3536 // Create a writeback register dummy placeholder. 3537 Inst.addOperand(MCOperand::CreateImm(0)); 3538 3539 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3540 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3541 return true; 3542} 3543 3544/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3545/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3546/// when they refer multiple MIOperands inside a single one. 3547bool ARMAsmParser:: 3548cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3549 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3550 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3551 3552 // Create a writeback register dummy placeholder. 3553 Inst.addOperand(MCOperand::CreateImm(0)); 3554 3555 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3556 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3557 return true; 3558} 3559 3560 3561/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3562/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3563/// when they refer multiple MIOperands inside a single one. 3564bool ARMAsmParser:: 3565cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3566 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3567 // Create a writeback register dummy placeholder. 3568 Inst.addOperand(MCOperand::CreateImm(0)); 3569 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3570 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3571 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3572 return true; 3573} 3574 3575/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3576/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3577/// when they refer multiple MIOperands inside a single one. 3578bool ARMAsmParser:: 3579cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3580 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3581 // Create a writeback register dummy placeholder. 3582 Inst.addOperand(MCOperand::CreateImm(0)); 3583 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3584 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3585 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3586 return true; 3587} 3588 3589/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3590/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3591/// when they refer multiple MIOperands inside a single one. 3592bool ARMAsmParser:: 3593cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3594 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3595 // Create a writeback register dummy placeholder. 3596 Inst.addOperand(MCOperand::CreateImm(0)); 3597 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3598 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3599 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3600 return true; 3601} 3602 3603/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3604/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3605/// when they refer multiple MIOperands inside a single one. 3606bool ARMAsmParser:: 3607cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3608 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3609 // Rt 3610 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3611 // Create a writeback register dummy placeholder. 3612 Inst.addOperand(MCOperand::CreateImm(0)); 3613 // addr 3614 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3615 // offset 3616 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3617 // pred 3618 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3619 return true; 3620} 3621 3622/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3623/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3624/// when they refer multiple MIOperands inside a single one. 3625bool ARMAsmParser:: 3626cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3627 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3628 // Rt 3629 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3630 // Create a writeback register dummy placeholder. 3631 Inst.addOperand(MCOperand::CreateImm(0)); 3632 // addr 3633 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3634 // offset 3635 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3636 // pred 3637 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3638 return true; 3639} 3640 3641/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3642/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3643/// when they refer multiple MIOperands inside a single one. 3644bool ARMAsmParser:: 3645cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3646 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3647 // Create a writeback register dummy placeholder. 3648 Inst.addOperand(MCOperand::CreateImm(0)); 3649 // Rt 3650 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3651 // addr 3652 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3653 // offset 3654 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3655 // pred 3656 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3657 return true; 3658} 3659 3660/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3661/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3662/// when they refer multiple MIOperands inside a single one. 3663bool ARMAsmParser:: 3664cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3665 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3666 // Create a writeback register dummy placeholder. 3667 Inst.addOperand(MCOperand::CreateImm(0)); 3668 // Rt 3669 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3670 // addr 3671 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3672 // offset 3673 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3674 // pred 3675 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3676 return true; 3677} 3678 3679/// cvtLdrdPre - Convert parsed operands to MCInst. 3680/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3681/// when they refer multiple MIOperands inside a single one. 3682bool ARMAsmParser:: 3683cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3684 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3685 // Rt, Rt2 3686 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3687 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3688 // Create a writeback register dummy placeholder. 3689 Inst.addOperand(MCOperand::CreateImm(0)); 3690 // addr 3691 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3692 // pred 3693 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3694 return true; 3695} 3696 3697/// cvtStrdPre - Convert parsed operands to MCInst. 3698/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3699/// when they refer multiple MIOperands inside a single one. 3700bool ARMAsmParser:: 3701cvtStrdPre(MCInst &Inst, unsigned Opcode, 3702 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3703 // Create a writeback register dummy placeholder. 3704 Inst.addOperand(MCOperand::CreateImm(0)); 3705 // Rt, Rt2 3706 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3707 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3708 // addr 3709 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3710 // pred 3711 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3712 return true; 3713} 3714 3715/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3716/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3717/// when they refer multiple MIOperands inside a single one. 3718bool ARMAsmParser:: 3719cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3720 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3721 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3722 // Create a writeback register dummy placeholder. 3723 Inst.addOperand(MCOperand::CreateImm(0)); 3724 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3725 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3726 return true; 3727} 3728 3729/// cvtThumbMultiple- Convert parsed operands to MCInst. 3730/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3731/// when they refer multiple MIOperands inside a single one. 3732bool ARMAsmParser:: 3733cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3734 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3735 // The second source operand must be the same register as the destination 3736 // operand. 3737 if (Operands.size() == 6 && 3738 (((ARMOperand*)Operands[3])->getReg() != 3739 ((ARMOperand*)Operands[5])->getReg()) && 3740 (((ARMOperand*)Operands[3])->getReg() != 3741 ((ARMOperand*)Operands[4])->getReg())) { 3742 Error(Operands[3]->getStartLoc(), 3743 "destination register must match source register"); 3744 return false; 3745 } 3746 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3747 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3748 // If we have a three-operand form, make sure to set Rn to be the operand 3749 // that isn't the same as Rd. 3750 unsigned RegOp = 4; 3751 if (Operands.size() == 6 && 3752 ((ARMOperand*)Operands[4])->getReg() == 3753 ((ARMOperand*)Operands[3])->getReg()) 3754 RegOp = 5; 3755 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 3756 Inst.addOperand(Inst.getOperand(0)); 3757 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3758 3759 return true; 3760} 3761 3762bool ARMAsmParser:: 3763cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3764 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3765 // Vd 3766 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3767 // Create a writeback register dummy placeholder. 3768 Inst.addOperand(MCOperand::CreateImm(0)); 3769 // Vn 3770 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3771 // pred 3772 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3773 return true; 3774} 3775 3776bool ARMAsmParser:: 3777cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3778 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3779 // Vd 3780 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3781 // Create a writeback register dummy placeholder. 3782 Inst.addOperand(MCOperand::CreateImm(0)); 3783 // Vn 3784 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3785 // Vm 3786 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3787 // pred 3788 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3789 return true; 3790} 3791 3792bool ARMAsmParser:: 3793cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3794 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3795 // Create a writeback register dummy placeholder. 3796 Inst.addOperand(MCOperand::CreateImm(0)); 3797 // Vn 3798 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3799 // Vt 3800 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3801 // pred 3802 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3803 return true; 3804} 3805 3806bool ARMAsmParser:: 3807cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3808 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3809 // Create a writeback register dummy placeholder. 3810 Inst.addOperand(MCOperand::CreateImm(0)); 3811 // Vn 3812 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3813 // Vm 3814 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3815 // Vt 3816 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3817 // pred 3818 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3819 return true; 3820} 3821 3822/// Parse an ARM memory expression, return false if successful else return true 3823/// or an error. The first token must be a '[' when called. 3824bool ARMAsmParser:: 3825parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3826 SMLoc S, E; 3827 assert(Parser.getTok().is(AsmToken::LBrac) && 3828 "Token is not a Left Bracket"); 3829 S = Parser.getTok().getLoc(); 3830 Parser.Lex(); // Eat left bracket token. 3831 3832 const AsmToken &BaseRegTok = Parser.getTok(); 3833 int BaseRegNum = tryParseRegister(); 3834 if (BaseRegNum == -1) 3835 return Error(BaseRegTok.getLoc(), "register expected"); 3836 3837 // The next token must either be a comma or a closing bracket. 3838 const AsmToken &Tok = Parser.getTok(); 3839 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3840 return Error(Tok.getLoc(), "malformed memory operand"); 3841 3842 if (Tok.is(AsmToken::RBrac)) { 3843 E = Tok.getLoc(); 3844 Parser.Lex(); // Eat right bracket token. 3845 3846 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3847 0, 0, false, S, E)); 3848 3849 // If there's a pre-indexing writeback marker, '!', just add it as a token 3850 // operand. It's rather odd, but syntactically valid. 3851 if (Parser.getTok().is(AsmToken::Exclaim)) { 3852 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3853 Parser.Lex(); // Eat the '!'. 3854 } 3855 3856 return false; 3857 } 3858 3859 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3860 Parser.Lex(); // Eat the comma. 3861 3862 // If we have a ':', it's an alignment specifier. 3863 if (Parser.getTok().is(AsmToken::Colon)) { 3864 Parser.Lex(); // Eat the ':'. 3865 E = Parser.getTok().getLoc(); 3866 3867 const MCExpr *Expr; 3868 if (getParser().ParseExpression(Expr)) 3869 return true; 3870 3871 // The expression has to be a constant. Memory references with relocations 3872 // don't come through here, as they use the <label> forms of the relevant 3873 // instructions. 3874 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3875 if (!CE) 3876 return Error (E, "constant expression expected"); 3877 3878 unsigned Align = 0; 3879 switch (CE->getValue()) { 3880 default: 3881 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3882 case 64: Align = 8; break; 3883 case 128: Align = 16; break; 3884 case 256: Align = 32; break; 3885 } 3886 3887 // Now we should have the closing ']' 3888 E = Parser.getTok().getLoc(); 3889 if (Parser.getTok().isNot(AsmToken::RBrac)) 3890 return Error(E, "']' expected"); 3891 Parser.Lex(); // Eat right bracket token. 3892 3893 // Don't worry about range checking the value here. That's handled by 3894 // the is*() predicates. 3895 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3896 ARM_AM::no_shift, 0, Align, 3897 false, S, E)); 3898 3899 // If there's a pre-indexing writeback marker, '!', just add it as a token 3900 // operand. 3901 if (Parser.getTok().is(AsmToken::Exclaim)) { 3902 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3903 Parser.Lex(); // Eat the '!'. 3904 } 3905 3906 return false; 3907 } 3908 3909 // If we have a '#', it's an immediate offset, else assume it's a register 3910 // offset. Be friendly and also accept a plain integer (without a leading 3911 // hash) for gas compatibility. 3912 if (Parser.getTok().is(AsmToken::Hash) || 3913 Parser.getTok().is(AsmToken::Dollar) || 3914 Parser.getTok().is(AsmToken::Integer)) { 3915 if (Parser.getTok().isNot(AsmToken::Integer)) 3916 Parser.Lex(); // Eat the '#'. 3917 E = Parser.getTok().getLoc(); 3918 3919 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3920 const MCExpr *Offset; 3921 if (getParser().ParseExpression(Offset)) 3922 return true; 3923 3924 // The expression has to be a constant. Memory references with relocations 3925 // don't come through here, as they use the <label> forms of the relevant 3926 // instructions. 3927 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3928 if (!CE) 3929 return Error (E, "constant expression expected"); 3930 3931 // If the constant was #-0, represent it as INT32_MIN. 3932 int32_t Val = CE->getValue(); 3933 if (isNegative && Val == 0) 3934 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3935 3936 // Now we should have the closing ']' 3937 E = Parser.getTok().getLoc(); 3938 if (Parser.getTok().isNot(AsmToken::RBrac)) 3939 return Error(E, "']' expected"); 3940 Parser.Lex(); // Eat right bracket token. 3941 3942 // Don't worry about range checking the value here. That's handled by 3943 // the is*() predicates. 3944 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3945 ARM_AM::no_shift, 0, 0, 3946 false, S, E)); 3947 3948 // If there's a pre-indexing writeback marker, '!', just add it as a token 3949 // operand. 3950 if (Parser.getTok().is(AsmToken::Exclaim)) { 3951 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3952 Parser.Lex(); // Eat the '!'. 3953 } 3954 3955 return false; 3956 } 3957 3958 // The register offset is optionally preceded by a '+' or '-' 3959 bool isNegative = false; 3960 if (Parser.getTok().is(AsmToken::Minus)) { 3961 isNegative = true; 3962 Parser.Lex(); // Eat the '-'. 3963 } else if (Parser.getTok().is(AsmToken::Plus)) { 3964 // Nothing to do. 3965 Parser.Lex(); // Eat the '+'. 3966 } 3967 3968 E = Parser.getTok().getLoc(); 3969 int OffsetRegNum = tryParseRegister(); 3970 if (OffsetRegNum == -1) 3971 return Error(E, "register expected"); 3972 3973 // If there's a shift operator, handle it. 3974 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3975 unsigned ShiftImm = 0; 3976 if (Parser.getTok().is(AsmToken::Comma)) { 3977 Parser.Lex(); // Eat the ','. 3978 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3979 return true; 3980 } 3981 3982 // Now we should have the closing ']' 3983 E = Parser.getTok().getLoc(); 3984 if (Parser.getTok().isNot(AsmToken::RBrac)) 3985 return Error(E, "']' expected"); 3986 Parser.Lex(); // Eat right bracket token. 3987 3988 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3989 ShiftType, ShiftImm, 0, isNegative, 3990 S, E)); 3991 3992 // If there's a pre-indexing writeback marker, '!', just add it as a token 3993 // operand. 3994 if (Parser.getTok().is(AsmToken::Exclaim)) { 3995 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3996 Parser.Lex(); // Eat the '!'. 3997 } 3998 3999 return false; 4000} 4001 4002/// parseMemRegOffsetShift - one of these two: 4003/// ( lsl | lsr | asr | ror ) , # shift_amount 4004/// rrx 4005/// return true if it parses a shift otherwise it returns false. 4006bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 4007 unsigned &Amount) { 4008 SMLoc Loc = Parser.getTok().getLoc(); 4009 const AsmToken &Tok = Parser.getTok(); 4010 if (Tok.isNot(AsmToken::Identifier)) 4011 return true; 4012 StringRef ShiftName = Tok.getString(); 4013 if (ShiftName == "lsl" || ShiftName == "LSL" || 4014 ShiftName == "asl" || ShiftName == "ASL") 4015 St = ARM_AM::lsl; 4016 else if (ShiftName == "lsr" || ShiftName == "LSR") 4017 St = ARM_AM::lsr; 4018 else if (ShiftName == "asr" || ShiftName == "ASR") 4019 St = ARM_AM::asr; 4020 else if (ShiftName == "ror" || ShiftName == "ROR") 4021 St = ARM_AM::ror; 4022 else if (ShiftName == "rrx" || ShiftName == "RRX") 4023 St = ARM_AM::rrx; 4024 else 4025 return Error(Loc, "illegal shift operator"); 4026 Parser.Lex(); // Eat shift type token. 4027 4028 // rrx stands alone. 4029 Amount = 0; 4030 if (St != ARM_AM::rrx) { 4031 Loc = Parser.getTok().getLoc(); 4032 // A '#' and a shift amount. 4033 const AsmToken &HashTok = Parser.getTok(); 4034 if (HashTok.isNot(AsmToken::Hash) && 4035 HashTok.isNot(AsmToken::Dollar)) 4036 return Error(HashTok.getLoc(), "'#' expected"); 4037 Parser.Lex(); // Eat hash token. 4038 4039 const MCExpr *Expr; 4040 if (getParser().ParseExpression(Expr)) 4041 return true; 4042 // Range check the immediate. 4043 // lsl, ror: 0 <= imm <= 31 4044 // lsr, asr: 0 <= imm <= 32 4045 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4046 if (!CE) 4047 return Error(Loc, "shift amount must be an immediate"); 4048 int64_t Imm = CE->getValue(); 4049 if (Imm < 0 || 4050 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 4051 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 4052 return Error(Loc, "immediate shift value out of range"); 4053 Amount = Imm; 4054 } 4055 4056 return false; 4057} 4058 4059/// parseFPImm - A floating point immediate expression operand. 4060ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 4061parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4062 SMLoc S = Parser.getTok().getLoc(); 4063 4064 if (Parser.getTok().isNot(AsmToken::Hash) && 4065 Parser.getTok().isNot(AsmToken::Dollar)) 4066 return MatchOperand_NoMatch; 4067 4068 // Disambiguate the VMOV forms that can accept an FP immediate. 4069 // vmov.f32 <sreg>, #imm 4070 // vmov.f64 <dreg>, #imm 4071 // vmov.f32 <dreg>, #imm @ vector f32x2 4072 // vmov.f32 <qreg>, #imm @ vector f32x4 4073 // 4074 // There are also the NEON VMOV instructions which expect an 4075 // integer constant. Make sure we don't try to parse an FPImm 4076 // for these: 4077 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 4078 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 4079 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 4080 TyOp->getToken() != ".f64")) 4081 return MatchOperand_NoMatch; 4082 4083 Parser.Lex(); // Eat the '#'. 4084 4085 // Handle negation, as that still comes through as a separate token. 4086 bool isNegative = false; 4087 if (Parser.getTok().is(AsmToken::Minus)) { 4088 isNegative = true; 4089 Parser.Lex(); 4090 } 4091 const AsmToken &Tok = Parser.getTok(); 4092 if (Tok.is(AsmToken::Real)) { 4093 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 4094 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 4095 // If we had a '-' in front, toggle the sign bit. 4096 IntVal ^= (uint64_t)isNegative << 63; 4097 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 4098 Parser.Lex(); // Eat the token. 4099 if (Val == -1) { 4100 TokError("floating point value out of range"); 4101 return MatchOperand_ParseFail; 4102 } 4103 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4104 return MatchOperand_Success; 4105 } 4106 if (Tok.is(AsmToken::Integer)) { 4107 int64_t Val = Tok.getIntVal(); 4108 Parser.Lex(); // Eat the token. 4109 if (Val > 255 || Val < 0) { 4110 TokError("encoded floating point value out of range"); 4111 return MatchOperand_ParseFail; 4112 } 4113 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4114 return MatchOperand_Success; 4115 } 4116 4117 TokError("invalid floating point immediate"); 4118 return MatchOperand_ParseFail; 4119} 4120/// Parse a arm instruction operand. For now this parses the operand regardless 4121/// of the mnemonic. 4122bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4123 StringRef Mnemonic) { 4124 SMLoc S, E; 4125 4126 // Check if the current operand has a custom associated parser, if so, try to 4127 // custom parse the operand, or fallback to the general approach. 4128 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 4129 if (ResTy == MatchOperand_Success) 4130 return false; 4131 // If there wasn't a custom match, try the generic matcher below. Otherwise, 4132 // there was a match, but an error occurred, in which case, just return that 4133 // the operand parsing failed. 4134 if (ResTy == MatchOperand_ParseFail) 4135 return true; 4136 4137 switch (getLexer().getKind()) { 4138 default: 4139 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 4140 return true; 4141 case AsmToken::Identifier: { 4142 // If this is VMRS, check for the apsr_nzcv operand. 4143 if (!tryParseRegisterWithWriteBack(Operands)) 4144 return false; 4145 int Res = tryParseShiftRegister(Operands); 4146 if (Res == 0) // success 4147 return false; 4148 else if (Res == -1) // irrecoverable error 4149 return true; 4150 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 4151 S = Parser.getTok().getLoc(); 4152 Parser.Lex(); 4153 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 4154 return false; 4155 } 4156 4157 // Fall though for the Identifier case that is not a register or a 4158 // special name. 4159 } 4160 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 4161 case AsmToken::Integer: // things like 1f and 2b as a branch targets 4162 case AsmToken::String: // quoted label names. 4163 case AsmToken::Dot: { // . as a branch target 4164 // This was not a register so parse other operands that start with an 4165 // identifier (like labels) as expressions and create them as immediates. 4166 const MCExpr *IdVal; 4167 S = Parser.getTok().getLoc(); 4168 if (getParser().ParseExpression(IdVal)) 4169 return true; 4170 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4171 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 4172 return false; 4173 } 4174 case AsmToken::LBrac: 4175 return parseMemory(Operands); 4176 case AsmToken::LCurly: 4177 return parseRegisterList(Operands); 4178 case AsmToken::Dollar: 4179 case AsmToken::Hash: { 4180 // #42 -> immediate. 4181 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 4182 S = Parser.getTok().getLoc(); 4183 Parser.Lex(); 4184 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4185 const MCExpr *ImmVal; 4186 if (getParser().ParseExpression(ImmVal)) 4187 return true; 4188 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 4189 if (CE) { 4190 int32_t Val = CE->getValue(); 4191 if (isNegative && Val == 0) 4192 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 4193 } 4194 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4195 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 4196 return false; 4197 } 4198 case AsmToken::Colon: { 4199 // ":lower16:" and ":upper16:" expression prefixes 4200 // FIXME: Check it's an expression prefix, 4201 // e.g. (FOO - :lower16:BAR) isn't legal. 4202 ARMMCExpr::VariantKind RefKind; 4203 if (parsePrefix(RefKind)) 4204 return true; 4205 4206 const MCExpr *SubExprVal; 4207 if (getParser().ParseExpression(SubExprVal)) 4208 return true; 4209 4210 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 4211 getContext()); 4212 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4213 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 4214 return false; 4215 } 4216 } 4217} 4218 4219// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 4220// :lower16: and :upper16:. 4221bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 4222 RefKind = ARMMCExpr::VK_ARM_None; 4223 4224 // :lower16: and :upper16: modifiers 4225 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 4226 Parser.Lex(); // Eat ':' 4227 4228 if (getLexer().isNot(AsmToken::Identifier)) { 4229 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 4230 return true; 4231 } 4232 4233 StringRef IDVal = Parser.getTok().getIdentifier(); 4234 if (IDVal == "lower16") { 4235 RefKind = ARMMCExpr::VK_ARM_LO16; 4236 } else if (IDVal == "upper16") { 4237 RefKind = ARMMCExpr::VK_ARM_HI16; 4238 } else { 4239 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 4240 return true; 4241 } 4242 Parser.Lex(); 4243 4244 if (getLexer().isNot(AsmToken::Colon)) { 4245 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 4246 return true; 4247 } 4248 Parser.Lex(); // Eat the last ':' 4249 return false; 4250} 4251 4252/// \brief Given a mnemonic, split out possible predication code and carry 4253/// setting letters to form a canonical mnemonic and flags. 4254// 4255// FIXME: Would be nice to autogen this. 4256// FIXME: This is a bit of a maze of special cases. 4257StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 4258 unsigned &PredicationCode, 4259 bool &CarrySetting, 4260 unsigned &ProcessorIMod, 4261 StringRef &ITMask) { 4262 PredicationCode = ARMCC::AL; 4263 CarrySetting = false; 4264 ProcessorIMod = 0; 4265 4266 // Ignore some mnemonics we know aren't predicated forms. 4267 // 4268 // FIXME: Would be nice to autogen this. 4269 if ((Mnemonic == "movs" && isThumb()) || 4270 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 4271 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 4272 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 4273 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 4274 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 4275 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 4276 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 4277 return Mnemonic; 4278 4279 // First, split out any predication code. Ignore mnemonics we know aren't 4280 // predicated but do have a carry-set and so weren't caught above. 4281 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 4282 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 4283 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 4284 Mnemonic != "sbcs" && Mnemonic != "rscs") { 4285 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 4286 .Case("eq", ARMCC::EQ) 4287 .Case("ne", ARMCC::NE) 4288 .Case("hs", ARMCC::HS) 4289 .Case("cs", ARMCC::HS) 4290 .Case("lo", ARMCC::LO) 4291 .Case("cc", ARMCC::LO) 4292 .Case("mi", ARMCC::MI) 4293 .Case("pl", ARMCC::PL) 4294 .Case("vs", ARMCC::VS) 4295 .Case("vc", ARMCC::VC) 4296 .Case("hi", ARMCC::HI) 4297 .Case("ls", ARMCC::LS) 4298 .Case("ge", ARMCC::GE) 4299 .Case("lt", ARMCC::LT) 4300 .Case("gt", ARMCC::GT) 4301 .Case("le", ARMCC::LE) 4302 .Case("al", ARMCC::AL) 4303 .Default(~0U); 4304 if (CC != ~0U) { 4305 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 4306 PredicationCode = CC; 4307 } 4308 } 4309 4310 // Next, determine if we have a carry setting bit. We explicitly ignore all 4311 // the instructions we know end in 's'. 4312 if (Mnemonic.endswith("s") && 4313 !(Mnemonic == "cps" || Mnemonic == "mls" || 4314 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 4315 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 4316 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 4317 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || 4318 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || 4319 Mnemonic == "fsts" || 4320 (Mnemonic == "movs" && isThumb()))) { 4321 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4322 CarrySetting = true; 4323 } 4324 4325 // The "cps" instruction can have a interrupt mode operand which is glued into 4326 // the mnemonic. Check if this is the case, split it and parse the imod op 4327 if (Mnemonic.startswith("cps")) { 4328 // Split out any imod code. 4329 unsigned IMod = 4330 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4331 .Case("ie", ARM_PROC::IE) 4332 .Case("id", ARM_PROC::ID) 4333 .Default(~0U); 4334 if (IMod != ~0U) { 4335 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4336 ProcessorIMod = IMod; 4337 } 4338 } 4339 4340 // The "it" instruction has the condition mask on the end of the mnemonic. 4341 if (Mnemonic.startswith("it")) { 4342 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4343 Mnemonic = Mnemonic.slice(0, 2); 4344 } 4345 4346 return Mnemonic; 4347} 4348 4349/// \brief Given a canonical mnemonic, determine if the instruction ever allows 4350/// inclusion of carry set or predication code operands. 4351// 4352// FIXME: It would be nice to autogen this. 4353void ARMAsmParser:: 4354getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4355 bool &CanAcceptPredicationCode) { 4356 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4357 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4358 Mnemonic == "add" || Mnemonic == "adc" || 4359 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4360 Mnemonic == "orr" || Mnemonic == "mvn" || 4361 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4362 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4363 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4364 Mnemonic == "mla" || Mnemonic == "smlal" || 4365 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4366 CanAcceptCarrySet = true; 4367 } else 4368 CanAcceptCarrySet = false; 4369 4370 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4371 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4372 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4373 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4374 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4375 (Mnemonic == "clrex" && !isThumb()) || 4376 (Mnemonic == "nop" && isThumbOne()) || 4377 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4378 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4379 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4380 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4381 !isThumb()) || 4382 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4383 CanAcceptPredicationCode = false; 4384 } else 4385 CanAcceptPredicationCode = true; 4386 4387 if (isThumb()) { 4388 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4389 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4390 CanAcceptPredicationCode = false; 4391 } 4392} 4393 4394bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4395 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4396 // FIXME: This is all horribly hacky. We really need a better way to deal 4397 // with optional operands like this in the matcher table. 4398 4399 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4400 // another does not. Specifically, the MOVW instruction does not. So we 4401 // special case it here and remove the defaulted (non-setting) cc_out 4402 // operand if that's the instruction we're trying to match. 4403 // 4404 // We do this as post-processing of the explicit operands rather than just 4405 // conditionally adding the cc_out in the first place because we need 4406 // to check the type of the parsed immediate operand. 4407 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4408 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4409 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4410 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4411 return true; 4412 4413 // Register-register 'add' for thumb does not have a cc_out operand 4414 // when there are only two register operands. 4415 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4416 static_cast<ARMOperand*>(Operands[3])->isReg() && 4417 static_cast<ARMOperand*>(Operands[4])->isReg() && 4418 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4419 return true; 4420 // Register-register 'add' for thumb does not have a cc_out operand 4421 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4422 // have to check the immediate range here since Thumb2 has a variant 4423 // that can handle a different range and has a cc_out operand. 4424 if (((isThumb() && Mnemonic == "add") || 4425 (isThumbTwo() && Mnemonic == "sub")) && 4426 Operands.size() == 6 && 4427 static_cast<ARMOperand*>(Operands[3])->isReg() && 4428 static_cast<ARMOperand*>(Operands[4])->isReg() && 4429 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4430 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4431 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4432 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4433 return true; 4434 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4435 // imm0_4095 variant. That's the least-preferred variant when 4436 // selecting via the generic "add" mnemonic, so to know that we 4437 // should remove the cc_out operand, we have to explicitly check that 4438 // it's not one of the other variants. Ugh. 4439 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4440 Operands.size() == 6 && 4441 static_cast<ARMOperand*>(Operands[3])->isReg() && 4442 static_cast<ARMOperand*>(Operands[4])->isReg() && 4443 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4444 // Nest conditions rather than one big 'if' statement for readability. 4445 // 4446 // If either register is a high reg, it's either one of the SP 4447 // variants (handled above) or a 32-bit encoding, so we just 4448 // check against T3. 4449 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4450 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4451 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4452 return false; 4453 // If both registers are low, we're in an IT block, and the immediate is 4454 // in range, we should use encoding T1 instead, which has a cc_out. 4455 if (inITBlock() && 4456 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4457 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4458 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4459 return false; 4460 4461 // Otherwise, we use encoding T4, which does not have a cc_out 4462 // operand. 4463 return true; 4464 } 4465 4466 // The thumb2 multiply instruction doesn't have a CCOut register, so 4467 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4468 // use the 16-bit encoding or not. 4469 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4470 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4471 static_cast<ARMOperand*>(Operands[3])->isReg() && 4472 static_cast<ARMOperand*>(Operands[4])->isReg() && 4473 static_cast<ARMOperand*>(Operands[5])->isReg() && 4474 // If the registers aren't low regs, the destination reg isn't the 4475 // same as one of the source regs, or the cc_out operand is zero 4476 // outside of an IT block, we have to use the 32-bit encoding, so 4477 // remove the cc_out operand. 4478 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4479 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4480 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4481 !inITBlock() || 4482 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4483 static_cast<ARMOperand*>(Operands[5])->getReg() && 4484 static_cast<ARMOperand*>(Operands[3])->getReg() != 4485 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4486 return true; 4487 4488 // Also check the 'mul' syntax variant that doesn't specify an explicit 4489 // destination register. 4490 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4491 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4492 static_cast<ARMOperand*>(Operands[3])->isReg() && 4493 static_cast<ARMOperand*>(Operands[4])->isReg() && 4494 // If the registers aren't low regs or the cc_out operand is zero 4495 // outside of an IT block, we have to use the 32-bit encoding, so 4496 // remove the cc_out operand. 4497 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4498 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4499 !inITBlock())) 4500 return true; 4501 4502 4503 4504 // Register-register 'add/sub' for thumb does not have a cc_out operand 4505 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4506 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4507 // right, this will result in better diagnostics (which operand is off) 4508 // anyway. 4509 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4510 (Operands.size() == 5 || Operands.size() == 6) && 4511 static_cast<ARMOperand*>(Operands[3])->isReg() && 4512 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4513 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4514 return true; 4515 4516 return false; 4517} 4518 4519static bool isDataTypeToken(StringRef Tok) { 4520 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4521 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4522 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4523 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4524 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4525 Tok == ".f" || Tok == ".d"; 4526} 4527 4528// FIXME: This bit should probably be handled via an explicit match class 4529// in the .td files that matches the suffix instead of having it be 4530// a literal string token the way it is now. 4531static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4532 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4533} 4534 4535static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features); 4536/// Parse an arm instruction mnemonic followed by its operands. 4537bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4538 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4539 // Apply mnemonic aliases before doing anything else, as the destination 4540 // mnemnonic may include suffices and we want to handle them normally. 4541 // The generic tblgen'erated code does this later, at the start of 4542 // MatchInstructionImpl(), but that's too late for aliases that include 4543 // any sort of suffix. 4544 unsigned AvailableFeatures = getAvailableFeatures(); 4545 applyMnemonicAliases(Name, AvailableFeatures); 4546 4547 // Create the leading tokens for the mnemonic, split by '.' characters. 4548 size_t Start = 0, Next = Name.find('.'); 4549 StringRef Mnemonic = Name.slice(Start, Next); 4550 4551 // Split out the predication code and carry setting flag from the mnemonic. 4552 unsigned PredicationCode; 4553 unsigned ProcessorIMod; 4554 bool CarrySetting; 4555 StringRef ITMask; 4556 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4557 ProcessorIMod, ITMask); 4558 4559 // In Thumb1, only the branch (B) instruction can be predicated. 4560 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4561 Parser.EatToEndOfStatement(); 4562 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4563 } 4564 4565 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4566 4567 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4568 // is the mask as it will be for the IT encoding if the conditional 4569 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4570 // where the conditional bit0 is zero, the instruction post-processing 4571 // will adjust the mask accordingly. 4572 if (Mnemonic == "it") { 4573 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4574 if (ITMask.size() > 3) { 4575 Parser.EatToEndOfStatement(); 4576 return Error(Loc, "too many conditions on IT instruction"); 4577 } 4578 unsigned Mask = 8; 4579 for (unsigned i = ITMask.size(); i != 0; --i) { 4580 char pos = ITMask[i - 1]; 4581 if (pos != 't' && pos != 'e') { 4582 Parser.EatToEndOfStatement(); 4583 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4584 } 4585 Mask >>= 1; 4586 if (ITMask[i - 1] == 't') 4587 Mask |= 8; 4588 } 4589 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4590 } 4591 4592 // FIXME: This is all a pretty gross hack. We should automatically handle 4593 // optional operands like this via tblgen. 4594 4595 // Next, add the CCOut and ConditionCode operands, if needed. 4596 // 4597 // For mnemonics which can ever incorporate a carry setting bit or predication 4598 // code, our matching model involves us always generating CCOut and 4599 // ConditionCode operands to match the mnemonic "as written" and then we let 4600 // the matcher deal with finding the right instruction or generating an 4601 // appropriate error. 4602 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4603 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4604 4605 // If we had a carry-set on an instruction that can't do that, issue an 4606 // error. 4607 if (!CanAcceptCarrySet && CarrySetting) { 4608 Parser.EatToEndOfStatement(); 4609 return Error(NameLoc, "instruction '" + Mnemonic + 4610 "' can not set flags, but 's' suffix specified"); 4611 } 4612 // If we had a predication code on an instruction that can't do that, issue an 4613 // error. 4614 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4615 Parser.EatToEndOfStatement(); 4616 return Error(NameLoc, "instruction '" + Mnemonic + 4617 "' is not predicable, but condition code specified"); 4618 } 4619 4620 // Add the carry setting operand, if necessary. 4621 if (CanAcceptCarrySet) { 4622 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4623 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4624 Loc)); 4625 } 4626 4627 // Add the predication code operand, if necessary. 4628 if (CanAcceptPredicationCode) { 4629 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4630 CarrySetting); 4631 Operands.push_back(ARMOperand::CreateCondCode( 4632 ARMCC::CondCodes(PredicationCode), Loc)); 4633 } 4634 4635 // Add the processor imod operand, if necessary. 4636 if (ProcessorIMod) { 4637 Operands.push_back(ARMOperand::CreateImm( 4638 MCConstantExpr::Create(ProcessorIMod, getContext()), 4639 NameLoc, NameLoc)); 4640 } 4641 4642 // Add the remaining tokens in the mnemonic. 4643 while (Next != StringRef::npos) { 4644 Start = Next; 4645 Next = Name.find('.', Start + 1); 4646 StringRef ExtraToken = Name.slice(Start, Next); 4647 4648 // Some NEON instructions have an optional datatype suffix that is 4649 // completely ignored. Check for that. 4650 if (isDataTypeToken(ExtraToken) && 4651 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 4652 continue; 4653 4654 if (ExtraToken != ".n") { 4655 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4656 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4657 } 4658 } 4659 4660 // Read the remaining operands. 4661 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4662 // Read the first operand. 4663 if (parseOperand(Operands, Mnemonic)) { 4664 Parser.EatToEndOfStatement(); 4665 return true; 4666 } 4667 4668 while (getLexer().is(AsmToken::Comma)) { 4669 Parser.Lex(); // Eat the comma. 4670 4671 // Parse and remember the operand. 4672 if (parseOperand(Operands, Mnemonic)) { 4673 Parser.EatToEndOfStatement(); 4674 return true; 4675 } 4676 } 4677 } 4678 4679 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4680 SMLoc Loc = getLexer().getLoc(); 4681 Parser.EatToEndOfStatement(); 4682 return Error(Loc, "unexpected token in argument list"); 4683 } 4684 4685 Parser.Lex(); // Consume the EndOfStatement 4686 4687 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4688 // do and don't have a cc_out optional-def operand. With some spot-checks 4689 // of the operand list, we can figure out which variant we're trying to 4690 // parse and adjust accordingly before actually matching. We shouldn't ever 4691 // try to remove a cc_out operand that was explicitly set on the the 4692 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4693 // table driven matcher doesn't fit well with the ARM instruction set. 4694 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4695 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4696 Operands.erase(Operands.begin() + 1); 4697 delete Op; 4698 } 4699 4700 // ARM mode 'blx' need special handling, as the register operand version 4701 // is predicable, but the label operand version is not. So, we can't rely 4702 // on the Mnemonic based checking to correctly figure out when to put 4703 // a k_CondCode operand in the list. If we're trying to match the label 4704 // version, remove the k_CondCode operand here. 4705 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4706 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4707 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4708 Operands.erase(Operands.begin() + 1); 4709 delete Op; 4710 } 4711 4712 // The vector-compare-to-zero instructions have a literal token "#0" at 4713 // the end that comes to here as an immediate operand. Convert it to a 4714 // token to play nicely with the matcher. 4715 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4716 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4717 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4718 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4719 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4720 if (CE && CE->getValue() == 0) { 4721 Operands.erase(Operands.begin() + 5); 4722 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4723 delete Op; 4724 } 4725 } 4726 // VCMP{E} does the same thing, but with a different operand count. 4727 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4728 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4729 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4730 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4731 if (CE && CE->getValue() == 0) { 4732 Operands.erase(Operands.begin() + 4); 4733 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4734 delete Op; 4735 } 4736 } 4737 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4738 // end. Convert it to a token here. Take care not to convert those 4739 // that should hit the Thumb2 encoding. 4740 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4741 static_cast<ARMOperand*>(Operands[3])->isReg() && 4742 static_cast<ARMOperand*>(Operands[4])->isReg() && 4743 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4744 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4745 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4746 if (CE && CE->getValue() == 0 && 4747 (isThumbOne() || 4748 // The cc_out operand matches the IT block. 4749 ((inITBlock() != CarrySetting) && 4750 // Neither register operand is a high register. 4751 (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4752 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){ 4753 Operands.erase(Operands.begin() + 5); 4754 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4755 delete Op; 4756 } 4757 } 4758 4759 return false; 4760} 4761 4762// Validate context-sensitive operand constraints. 4763 4764// return 'true' if register list contains non-low GPR registers, 4765// 'false' otherwise. If Reg is in the register list or is HiReg, set 4766// 'containsReg' to true. 4767static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4768 unsigned HiReg, bool &containsReg) { 4769 containsReg = false; 4770 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4771 unsigned OpReg = Inst.getOperand(i).getReg(); 4772 if (OpReg == Reg) 4773 containsReg = true; 4774 // Anything other than a low register isn't legal here. 4775 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4776 return true; 4777 } 4778 return false; 4779} 4780 4781// Check if the specified regisgter is in the register list of the inst, 4782// starting at the indicated operand number. 4783static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4784 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4785 unsigned OpReg = Inst.getOperand(i).getReg(); 4786 if (OpReg == Reg) 4787 return true; 4788 } 4789 return false; 4790} 4791 4792// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4793// the ARMInsts array) instead. Getting that here requires awkward 4794// API changes, though. Better way? 4795namespace llvm { 4796extern const MCInstrDesc ARMInsts[]; 4797} 4798static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4799 return ARMInsts[Opcode]; 4800} 4801 4802// FIXME: We would really like to be able to tablegen'erate this. 4803bool ARMAsmParser:: 4804validateInstruction(MCInst &Inst, 4805 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4806 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4807 SMLoc Loc = Operands[0]->getStartLoc(); 4808 // Check the IT block state first. 4809 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4810 // being allowed in IT blocks, but not being predicable. It just always 4811 // executes. 4812 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4813 unsigned bit = 1; 4814 if (ITState.FirstCond) 4815 ITState.FirstCond = false; 4816 else 4817 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4818 // The instruction must be predicable. 4819 if (!MCID.isPredicable()) 4820 return Error(Loc, "instructions in IT block must be predicable"); 4821 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4822 unsigned ITCond = bit ? ITState.Cond : 4823 ARMCC::getOppositeCondition(ITState.Cond); 4824 if (Cond != ITCond) { 4825 // Find the condition code Operand to get its SMLoc information. 4826 SMLoc CondLoc; 4827 for (unsigned i = 1; i < Operands.size(); ++i) 4828 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4829 CondLoc = Operands[i]->getStartLoc(); 4830 return Error(CondLoc, "incorrect condition in IT block; got '" + 4831 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4832 "', but expected '" + 4833 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4834 } 4835 // Check for non-'al' condition codes outside of the IT block. 4836 } else if (isThumbTwo() && MCID.isPredicable() && 4837 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4838 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4839 Inst.getOpcode() != ARM::t2B) 4840 return Error(Loc, "predicated instructions must be in IT block"); 4841 4842 switch (Inst.getOpcode()) { 4843 case ARM::LDRD: 4844 case ARM::LDRD_PRE: 4845 case ARM::LDRD_POST: 4846 case ARM::LDREXD: { 4847 // Rt2 must be Rt + 1. 4848 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4849 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4850 if (Rt2 != Rt + 1) 4851 return Error(Operands[3]->getStartLoc(), 4852 "destination operands must be sequential"); 4853 return false; 4854 } 4855 case ARM::STRD: { 4856 // Rt2 must be Rt + 1. 4857 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4858 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4859 if (Rt2 != Rt + 1) 4860 return Error(Operands[3]->getStartLoc(), 4861 "source operands must be sequential"); 4862 return false; 4863 } 4864 case ARM::STRD_PRE: 4865 case ARM::STRD_POST: 4866 case ARM::STREXD: { 4867 // Rt2 must be Rt + 1. 4868 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4869 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4870 if (Rt2 != Rt + 1) 4871 return Error(Operands[3]->getStartLoc(), 4872 "source operands must be sequential"); 4873 return false; 4874 } 4875 case ARM::SBFX: 4876 case ARM::UBFX: { 4877 // width must be in range [1, 32-lsb] 4878 unsigned lsb = Inst.getOperand(2).getImm(); 4879 unsigned widthm1 = Inst.getOperand(3).getImm(); 4880 if (widthm1 >= 32 - lsb) 4881 return Error(Operands[5]->getStartLoc(), 4882 "bitfield width must be in range [1,32-lsb]"); 4883 return false; 4884 } 4885 case ARM::tLDMIA: { 4886 // If we're parsing Thumb2, the .w variant is available and handles 4887 // most cases that are normally illegal for a Thumb1 LDM 4888 // instruction. We'll make the transformation in processInstruction() 4889 // if necessary. 4890 // 4891 // Thumb LDM instructions are writeback iff the base register is not 4892 // in the register list. 4893 unsigned Rn = Inst.getOperand(0).getReg(); 4894 bool hasWritebackToken = 4895 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4896 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4897 bool listContainsBase; 4898 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4899 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4900 "registers must be in range r0-r7"); 4901 // If we should have writeback, then there should be a '!' token. 4902 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4903 return Error(Operands[2]->getStartLoc(), 4904 "writeback operator '!' expected"); 4905 // If we should not have writeback, there must not be a '!'. This is 4906 // true even for the 32-bit wide encodings. 4907 if (listContainsBase && hasWritebackToken) 4908 return Error(Operands[3]->getStartLoc(), 4909 "writeback operator '!' not allowed when base register " 4910 "in register list"); 4911 4912 break; 4913 } 4914 case ARM::t2LDMIA_UPD: { 4915 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4916 return Error(Operands[4]->getStartLoc(), 4917 "writeback operator '!' not allowed when base register " 4918 "in register list"); 4919 break; 4920 } 4921 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 4922 // so only issue a diagnostic for thumb1. The instructions will be 4923 // switched to the t2 encodings in processInstruction() if necessary. 4924 case ARM::tPOP: { 4925 bool listContainsBase; 4926 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 4927 !isThumbTwo()) 4928 return Error(Operands[2]->getStartLoc(), 4929 "registers must be in range r0-r7 or pc"); 4930 break; 4931 } 4932 case ARM::tPUSH: { 4933 bool listContainsBase; 4934 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 4935 !isThumbTwo()) 4936 return Error(Operands[2]->getStartLoc(), 4937 "registers must be in range r0-r7 or lr"); 4938 break; 4939 } 4940 case ARM::tSTMIA_UPD: { 4941 bool listContainsBase; 4942 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4943 return Error(Operands[4]->getStartLoc(), 4944 "registers must be in range r0-r7"); 4945 break; 4946 } 4947 } 4948 4949 return false; 4950} 4951 4952static unsigned getRealVSTLNOpcode(unsigned Opc) { 4953 switch(Opc) { 4954 default: assert(0 && "unexpected opcode!"); 4955 case ARM::VST1LNdWB_fixed_Asm_8: return ARM::VST1LNd8_UPD; 4956 case ARM::VST1LNdWB_fixed_Asm_P8: return ARM::VST1LNd8_UPD; 4957 case ARM::VST1LNdWB_fixed_Asm_I8: return ARM::VST1LNd8_UPD; 4958 case ARM::VST1LNdWB_fixed_Asm_S8: return ARM::VST1LNd8_UPD; 4959 case ARM::VST1LNdWB_fixed_Asm_U8: return ARM::VST1LNd8_UPD; 4960 case ARM::VST1LNdWB_fixed_Asm_16: return ARM::VST1LNd16_UPD; 4961 case ARM::VST1LNdWB_fixed_Asm_P16: return ARM::VST1LNd16_UPD; 4962 case ARM::VST1LNdWB_fixed_Asm_I16: return ARM::VST1LNd16_UPD; 4963 case ARM::VST1LNdWB_fixed_Asm_S16: return ARM::VST1LNd16_UPD; 4964 case ARM::VST1LNdWB_fixed_Asm_U16: return ARM::VST1LNd16_UPD; 4965 case ARM::VST1LNdWB_fixed_Asm_32: return ARM::VST1LNd32_UPD; 4966 case ARM::VST1LNdWB_fixed_Asm_F: return ARM::VST1LNd32_UPD; 4967 case ARM::VST1LNdWB_fixed_Asm_F32: return ARM::VST1LNd32_UPD; 4968 case ARM::VST1LNdWB_fixed_Asm_I32: return ARM::VST1LNd32_UPD; 4969 case ARM::VST1LNdWB_fixed_Asm_S32: return ARM::VST1LNd32_UPD; 4970 case ARM::VST1LNdWB_fixed_Asm_U32: return ARM::VST1LNd32_UPD; 4971 case ARM::VST1LNdWB_register_Asm_8: return ARM::VST1LNd8_UPD; 4972 case ARM::VST1LNdWB_register_Asm_P8: return ARM::VST1LNd8_UPD; 4973 case ARM::VST1LNdWB_register_Asm_I8: return ARM::VST1LNd8_UPD; 4974 case ARM::VST1LNdWB_register_Asm_S8: return ARM::VST1LNd8_UPD; 4975 case ARM::VST1LNdWB_register_Asm_U8: return ARM::VST1LNd8_UPD; 4976 case ARM::VST1LNdWB_register_Asm_16: return ARM::VST1LNd16_UPD; 4977 case ARM::VST1LNdWB_register_Asm_P16: return ARM::VST1LNd16_UPD; 4978 case ARM::VST1LNdWB_register_Asm_I16: return ARM::VST1LNd16_UPD; 4979 case ARM::VST1LNdWB_register_Asm_S16: return ARM::VST1LNd16_UPD; 4980 case ARM::VST1LNdWB_register_Asm_U16: return ARM::VST1LNd16_UPD; 4981 case ARM::VST1LNdWB_register_Asm_32: return ARM::VST1LNd32_UPD; 4982 case ARM::VST1LNdWB_register_Asm_F: return ARM::VST1LNd32_UPD; 4983 case ARM::VST1LNdWB_register_Asm_F32: return ARM::VST1LNd32_UPD; 4984 case ARM::VST1LNdWB_register_Asm_I32: return ARM::VST1LNd32_UPD; 4985 case ARM::VST1LNdWB_register_Asm_S32: return ARM::VST1LNd32_UPD; 4986 case ARM::VST1LNdWB_register_Asm_U32: return ARM::VST1LNd32_UPD; 4987 case ARM::VST1LNdAsm_8: return ARM::VST1LNd8; 4988 case ARM::VST1LNdAsm_P8: return ARM::VST1LNd8; 4989 case ARM::VST1LNdAsm_I8: return ARM::VST1LNd8; 4990 case ARM::VST1LNdAsm_S8: return ARM::VST1LNd8; 4991 case ARM::VST1LNdAsm_U8: return ARM::VST1LNd8; 4992 case ARM::VST1LNdAsm_16: return ARM::VST1LNd16; 4993 case ARM::VST1LNdAsm_P16: return ARM::VST1LNd16; 4994 case ARM::VST1LNdAsm_I16: return ARM::VST1LNd16; 4995 case ARM::VST1LNdAsm_S16: return ARM::VST1LNd16; 4996 case ARM::VST1LNdAsm_U16: return ARM::VST1LNd16; 4997 case ARM::VST1LNdAsm_32: return ARM::VST1LNd32; 4998 case ARM::VST1LNdAsm_F: return ARM::VST1LNd32; 4999 case ARM::VST1LNdAsm_F32: return ARM::VST1LNd32; 5000 case ARM::VST1LNdAsm_I32: return ARM::VST1LNd32; 5001 case ARM::VST1LNdAsm_S32: return ARM::VST1LNd32; 5002 case ARM::VST1LNdAsm_U32: return ARM::VST1LNd32; 5003 } 5004} 5005 5006static unsigned getRealVLDLNOpcode(unsigned Opc) { 5007 switch(Opc) { 5008 default: assert(0 && "unexpected opcode!"); 5009 case ARM::VLD1LNdWB_fixed_Asm_8: return ARM::VLD1LNd8_UPD; 5010 case ARM::VLD1LNdWB_fixed_Asm_P8: return ARM::VLD1LNd8_UPD; 5011 case ARM::VLD1LNdWB_fixed_Asm_I8: return ARM::VLD1LNd8_UPD; 5012 case ARM::VLD1LNdWB_fixed_Asm_S8: return ARM::VLD1LNd8_UPD; 5013 case ARM::VLD1LNdWB_fixed_Asm_U8: return ARM::VLD1LNd8_UPD; 5014 case ARM::VLD1LNdWB_fixed_Asm_16: return ARM::VLD1LNd16_UPD; 5015 case ARM::VLD1LNdWB_fixed_Asm_P16: return ARM::VLD1LNd16_UPD; 5016 case ARM::VLD1LNdWB_fixed_Asm_I16: return ARM::VLD1LNd16_UPD; 5017 case ARM::VLD1LNdWB_fixed_Asm_S16: return ARM::VLD1LNd16_UPD; 5018 case ARM::VLD1LNdWB_fixed_Asm_U16: return ARM::VLD1LNd16_UPD; 5019 case ARM::VLD1LNdWB_fixed_Asm_32: return ARM::VLD1LNd32_UPD; 5020 case ARM::VLD1LNdWB_fixed_Asm_F: return ARM::VLD1LNd32_UPD; 5021 case ARM::VLD1LNdWB_fixed_Asm_F32: return ARM::VLD1LNd32_UPD; 5022 case ARM::VLD1LNdWB_fixed_Asm_I32: return ARM::VLD1LNd32_UPD; 5023 case ARM::VLD1LNdWB_fixed_Asm_S32: return ARM::VLD1LNd32_UPD; 5024 case ARM::VLD1LNdWB_fixed_Asm_U32: return ARM::VLD1LNd32_UPD; 5025 case ARM::VLD1LNdWB_register_Asm_8: return ARM::VLD1LNd8_UPD; 5026 case ARM::VLD1LNdWB_register_Asm_P8: return ARM::VLD1LNd8_UPD; 5027 case ARM::VLD1LNdWB_register_Asm_I8: return ARM::VLD1LNd8_UPD; 5028 case ARM::VLD1LNdWB_register_Asm_S8: return ARM::VLD1LNd8_UPD; 5029 case ARM::VLD1LNdWB_register_Asm_U8: return ARM::VLD1LNd8_UPD; 5030 case ARM::VLD1LNdWB_register_Asm_16: return ARM::VLD1LNd16_UPD; 5031 case ARM::VLD1LNdWB_register_Asm_P16: return ARM::VLD1LNd16_UPD; 5032 case ARM::VLD1LNdWB_register_Asm_I16: return ARM::VLD1LNd16_UPD; 5033 case ARM::VLD1LNdWB_register_Asm_S16: return ARM::VLD1LNd16_UPD; 5034 case ARM::VLD1LNdWB_register_Asm_U16: return ARM::VLD1LNd16_UPD; 5035 case ARM::VLD1LNdWB_register_Asm_32: return ARM::VLD1LNd32_UPD; 5036 case ARM::VLD1LNdWB_register_Asm_F: return ARM::VLD1LNd32_UPD; 5037 case ARM::VLD1LNdWB_register_Asm_F32: return ARM::VLD1LNd32_UPD; 5038 case ARM::VLD1LNdWB_register_Asm_I32: return ARM::VLD1LNd32_UPD; 5039 case ARM::VLD1LNdWB_register_Asm_S32: return ARM::VLD1LNd32_UPD; 5040 case ARM::VLD1LNdWB_register_Asm_U32: return ARM::VLD1LNd32_UPD; 5041 case ARM::VLD1LNdAsm_8: return ARM::VLD1LNd8; 5042 case ARM::VLD1LNdAsm_P8: return ARM::VLD1LNd8; 5043 case ARM::VLD1LNdAsm_I8: return ARM::VLD1LNd8; 5044 case ARM::VLD1LNdAsm_S8: return ARM::VLD1LNd8; 5045 case ARM::VLD1LNdAsm_U8: return ARM::VLD1LNd8; 5046 case ARM::VLD1LNdAsm_16: return ARM::VLD1LNd16; 5047 case ARM::VLD1LNdAsm_P16: return ARM::VLD1LNd16; 5048 case ARM::VLD1LNdAsm_I16: return ARM::VLD1LNd16; 5049 case ARM::VLD1LNdAsm_S16: return ARM::VLD1LNd16; 5050 case ARM::VLD1LNdAsm_U16: return ARM::VLD1LNd16; 5051 case ARM::VLD1LNdAsm_32: return ARM::VLD1LNd32; 5052 case ARM::VLD1LNdAsm_F: return ARM::VLD1LNd32; 5053 case ARM::VLD1LNdAsm_F32: return ARM::VLD1LNd32; 5054 case ARM::VLD1LNdAsm_I32: return ARM::VLD1LNd32; 5055 case ARM::VLD1LNdAsm_S32: return ARM::VLD1LNd32; 5056 case ARM::VLD1LNdAsm_U32: return ARM::VLD1LNd32; 5057 } 5058} 5059 5060bool ARMAsmParser:: 5061processInstruction(MCInst &Inst, 5062 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5063 switch (Inst.getOpcode()) { 5064 // Handle NEON VST1 complex aliases. 5065 case ARM::VST1LNdWB_register_Asm_8: 5066 case ARM::VST1LNdWB_register_Asm_P8: 5067 case ARM::VST1LNdWB_register_Asm_I8: 5068 case ARM::VST1LNdWB_register_Asm_S8: 5069 case ARM::VST1LNdWB_register_Asm_U8: 5070 case ARM::VST1LNdWB_register_Asm_16: 5071 case ARM::VST1LNdWB_register_Asm_P16: 5072 case ARM::VST1LNdWB_register_Asm_I16: 5073 case ARM::VST1LNdWB_register_Asm_S16: 5074 case ARM::VST1LNdWB_register_Asm_U16: 5075 case ARM::VST1LNdWB_register_Asm_32: 5076 case ARM::VST1LNdWB_register_Asm_F: 5077 case ARM::VST1LNdWB_register_Asm_F32: 5078 case ARM::VST1LNdWB_register_Asm_I32: 5079 case ARM::VST1LNdWB_register_Asm_S32: 5080 case ARM::VST1LNdWB_register_Asm_U32: { 5081 MCInst TmpInst; 5082 // Shuffle the operands around so the lane index operand is in the 5083 // right place. 5084 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5085 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5086 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5087 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5088 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5089 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5090 TmpInst.addOperand(Inst.getOperand(1)); // lane 5091 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5092 TmpInst.addOperand(Inst.getOperand(6)); 5093 Inst = TmpInst; 5094 return true; 5095 } 5096 case ARM::VST1LNdWB_fixed_Asm_8: 5097 case ARM::VST1LNdWB_fixed_Asm_P8: 5098 case ARM::VST1LNdWB_fixed_Asm_I8: 5099 case ARM::VST1LNdWB_fixed_Asm_S8: 5100 case ARM::VST1LNdWB_fixed_Asm_U8: 5101 case ARM::VST1LNdWB_fixed_Asm_16: 5102 case ARM::VST1LNdWB_fixed_Asm_P16: 5103 case ARM::VST1LNdWB_fixed_Asm_I16: 5104 case ARM::VST1LNdWB_fixed_Asm_S16: 5105 case ARM::VST1LNdWB_fixed_Asm_U16: 5106 case ARM::VST1LNdWB_fixed_Asm_32: 5107 case ARM::VST1LNdWB_fixed_Asm_F: 5108 case ARM::VST1LNdWB_fixed_Asm_F32: 5109 case ARM::VST1LNdWB_fixed_Asm_I32: 5110 case ARM::VST1LNdWB_fixed_Asm_S32: 5111 case ARM::VST1LNdWB_fixed_Asm_U32: { 5112 MCInst TmpInst; 5113 // Shuffle the operands around so the lane index operand is in the 5114 // right place. 5115 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5116 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5117 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5118 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5119 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5120 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5121 TmpInst.addOperand(Inst.getOperand(1)); // lane 5122 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5123 TmpInst.addOperand(Inst.getOperand(5)); 5124 Inst = TmpInst; 5125 return true; 5126 } 5127 case ARM::VST1LNdAsm_8: 5128 case ARM::VST1LNdAsm_P8: 5129 case ARM::VST1LNdAsm_I8: 5130 case ARM::VST1LNdAsm_S8: 5131 case ARM::VST1LNdAsm_U8: 5132 case ARM::VST1LNdAsm_16: 5133 case ARM::VST1LNdAsm_P16: 5134 case ARM::VST1LNdAsm_I16: 5135 case ARM::VST1LNdAsm_S16: 5136 case ARM::VST1LNdAsm_U16: 5137 case ARM::VST1LNdAsm_32: 5138 case ARM::VST1LNdAsm_F: 5139 case ARM::VST1LNdAsm_F32: 5140 case ARM::VST1LNdAsm_I32: 5141 case ARM::VST1LNdAsm_S32: 5142 case ARM::VST1LNdAsm_U32: { 5143 MCInst TmpInst; 5144 // Shuffle the operands around so the lane index operand is in the 5145 // right place. 5146 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5147 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5148 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5149 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5150 TmpInst.addOperand(Inst.getOperand(1)); // lane 5151 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5152 TmpInst.addOperand(Inst.getOperand(5)); 5153 Inst = TmpInst; 5154 return true; 5155 } 5156 // Handle NEON VLD1 complex aliases. 5157 case ARM::VLD1LNdWB_register_Asm_8: 5158 case ARM::VLD1LNdWB_register_Asm_P8: 5159 case ARM::VLD1LNdWB_register_Asm_I8: 5160 case ARM::VLD1LNdWB_register_Asm_S8: 5161 case ARM::VLD1LNdWB_register_Asm_U8: 5162 case ARM::VLD1LNdWB_register_Asm_16: 5163 case ARM::VLD1LNdWB_register_Asm_P16: 5164 case ARM::VLD1LNdWB_register_Asm_I16: 5165 case ARM::VLD1LNdWB_register_Asm_S16: 5166 case ARM::VLD1LNdWB_register_Asm_U16: 5167 case ARM::VLD1LNdWB_register_Asm_32: 5168 case ARM::VLD1LNdWB_register_Asm_F: 5169 case ARM::VLD1LNdWB_register_Asm_F32: 5170 case ARM::VLD1LNdWB_register_Asm_I32: 5171 case ARM::VLD1LNdWB_register_Asm_S32: 5172 case ARM::VLD1LNdWB_register_Asm_U32: { 5173 MCInst TmpInst; 5174 // Shuffle the operands around so the lane index operand is in the 5175 // right place. 5176 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5177 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5178 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5179 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5180 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5181 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5182 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5183 TmpInst.addOperand(Inst.getOperand(1)); // lane 5184 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5185 TmpInst.addOperand(Inst.getOperand(6)); 5186 Inst = TmpInst; 5187 return true; 5188 } 5189 case ARM::VLD1LNdWB_fixed_Asm_8: 5190 case ARM::VLD1LNdWB_fixed_Asm_P8: 5191 case ARM::VLD1LNdWB_fixed_Asm_I8: 5192 case ARM::VLD1LNdWB_fixed_Asm_S8: 5193 case ARM::VLD1LNdWB_fixed_Asm_U8: 5194 case ARM::VLD1LNdWB_fixed_Asm_16: 5195 case ARM::VLD1LNdWB_fixed_Asm_P16: 5196 case ARM::VLD1LNdWB_fixed_Asm_I16: 5197 case ARM::VLD1LNdWB_fixed_Asm_S16: 5198 case ARM::VLD1LNdWB_fixed_Asm_U16: 5199 case ARM::VLD1LNdWB_fixed_Asm_32: 5200 case ARM::VLD1LNdWB_fixed_Asm_F: 5201 case ARM::VLD1LNdWB_fixed_Asm_F32: 5202 case ARM::VLD1LNdWB_fixed_Asm_I32: 5203 case ARM::VLD1LNdWB_fixed_Asm_S32: 5204 case ARM::VLD1LNdWB_fixed_Asm_U32: { 5205 MCInst TmpInst; 5206 // Shuffle the operands around so the lane index operand is in the 5207 // right place. 5208 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5209 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5210 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5211 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5212 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5213 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5214 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5215 TmpInst.addOperand(Inst.getOperand(1)); // lane 5216 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5217 TmpInst.addOperand(Inst.getOperand(5)); 5218 Inst = TmpInst; 5219 return true; 5220 } 5221 case ARM::VLD1LNdAsm_8: 5222 case ARM::VLD1LNdAsm_P8: 5223 case ARM::VLD1LNdAsm_I8: 5224 case ARM::VLD1LNdAsm_S8: 5225 case ARM::VLD1LNdAsm_U8: 5226 case ARM::VLD1LNdAsm_16: 5227 case ARM::VLD1LNdAsm_P16: 5228 case ARM::VLD1LNdAsm_I16: 5229 case ARM::VLD1LNdAsm_S16: 5230 case ARM::VLD1LNdAsm_U16: 5231 case ARM::VLD1LNdAsm_32: 5232 case ARM::VLD1LNdAsm_F: 5233 case ARM::VLD1LNdAsm_F32: 5234 case ARM::VLD1LNdAsm_I32: 5235 case ARM::VLD1LNdAsm_S32: 5236 case ARM::VLD1LNdAsm_U32: { 5237 MCInst TmpInst; 5238 // Shuffle the operands around so the lane index operand is in the 5239 // right place. 5240 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5241 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5242 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5243 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5244 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5245 TmpInst.addOperand(Inst.getOperand(1)); // lane 5246 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5247 TmpInst.addOperand(Inst.getOperand(5)); 5248 Inst = TmpInst; 5249 return true; 5250 } 5251 // Handle the MOV complex aliases. 5252 case ARM::ASRr: 5253 case ARM::LSRr: 5254 case ARM::LSLr: 5255 case ARM::RORr: { 5256 ARM_AM::ShiftOpc ShiftTy; 5257 switch(Inst.getOpcode()) { 5258 default: llvm_unreachable("unexpected opcode!"); 5259 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 5260 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 5261 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 5262 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 5263 } 5264 // A shift by zero is a plain MOVr, not a MOVsi. 5265 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 5266 MCInst TmpInst; 5267 TmpInst.setOpcode(ARM::MOVsr); 5268 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5269 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5270 TmpInst.addOperand(Inst.getOperand(2)); // Rm 5271 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5272 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5273 TmpInst.addOperand(Inst.getOperand(4)); 5274 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5275 Inst = TmpInst; 5276 return true; 5277 } 5278 case ARM::ASRi: 5279 case ARM::LSRi: 5280 case ARM::LSLi: 5281 case ARM::RORi: { 5282 ARM_AM::ShiftOpc ShiftTy; 5283 switch(Inst.getOpcode()) { 5284 default: llvm_unreachable("unexpected opcode!"); 5285 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 5286 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 5287 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 5288 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 5289 } 5290 // A shift by zero is a plain MOVr, not a MOVsi. 5291 unsigned Amt = Inst.getOperand(2).getImm(); 5292 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 5293 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 5294 MCInst TmpInst; 5295 TmpInst.setOpcode(Opc); 5296 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5297 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5298 if (Opc == ARM::MOVsi) 5299 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5300 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5301 TmpInst.addOperand(Inst.getOperand(4)); 5302 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5303 Inst = TmpInst; 5304 return true; 5305 } 5306 case ARM::RRXi: { 5307 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 5308 MCInst TmpInst; 5309 TmpInst.setOpcode(ARM::MOVsi); 5310 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5311 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5312 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5313 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5314 TmpInst.addOperand(Inst.getOperand(3)); 5315 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 5316 Inst = TmpInst; 5317 return true; 5318 } 5319 case ARM::t2LDMIA_UPD: { 5320 // If this is a load of a single register, then we should use 5321 // a post-indexed LDR instruction instead, per the ARM ARM. 5322 if (Inst.getNumOperands() != 5) 5323 return false; 5324 MCInst TmpInst; 5325 TmpInst.setOpcode(ARM::t2LDR_POST); 5326 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5327 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5328 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5329 TmpInst.addOperand(MCOperand::CreateImm(4)); 5330 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5331 TmpInst.addOperand(Inst.getOperand(3)); 5332 Inst = TmpInst; 5333 return true; 5334 } 5335 case ARM::t2STMDB_UPD: { 5336 // If this is a store of a single register, then we should use 5337 // a pre-indexed STR instruction instead, per the ARM ARM. 5338 if (Inst.getNumOperands() != 5) 5339 return false; 5340 MCInst TmpInst; 5341 TmpInst.setOpcode(ARM::t2STR_PRE); 5342 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5343 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5344 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5345 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5346 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5347 TmpInst.addOperand(Inst.getOperand(3)); 5348 Inst = TmpInst; 5349 return true; 5350 } 5351 case ARM::LDMIA_UPD: 5352 // If this is a load of a single register via a 'pop', then we should use 5353 // a post-indexed LDR instruction instead, per the ARM ARM. 5354 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 5355 Inst.getNumOperands() == 5) { 5356 MCInst TmpInst; 5357 TmpInst.setOpcode(ARM::LDR_POST_IMM); 5358 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5359 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5360 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5361 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 5362 TmpInst.addOperand(MCOperand::CreateImm(4)); 5363 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5364 TmpInst.addOperand(Inst.getOperand(3)); 5365 Inst = TmpInst; 5366 return true; 5367 } 5368 break; 5369 case ARM::STMDB_UPD: 5370 // If this is a store of a single register via a 'push', then we should use 5371 // a pre-indexed STR instruction instead, per the ARM ARM. 5372 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 5373 Inst.getNumOperands() == 5) { 5374 MCInst TmpInst; 5375 TmpInst.setOpcode(ARM::STR_PRE_IMM); 5376 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5377 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5378 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 5379 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5380 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5381 TmpInst.addOperand(Inst.getOperand(3)); 5382 Inst = TmpInst; 5383 } 5384 break; 5385 case ARM::t2ADDri12: 5386 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 5387 // mnemonic was used (not "addw"), encoding T3 is preferred. 5388 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" || 5389 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5390 break; 5391 Inst.setOpcode(ARM::t2ADDri); 5392 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5393 break; 5394 case ARM::t2SUBri12: 5395 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 5396 // mnemonic was used (not "subw"), encoding T3 is preferred. 5397 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" || 5398 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5399 break; 5400 Inst.setOpcode(ARM::t2SUBri); 5401 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5402 break; 5403 case ARM::tADDi8: 5404 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5405 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5406 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5407 // to encoding T1 if <Rd> is omitted." 5408 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5409 Inst.setOpcode(ARM::tADDi3); 5410 return true; 5411 } 5412 break; 5413 case ARM::tSUBi8: 5414 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5415 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5416 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5417 // to encoding T1 if <Rd> is omitted." 5418 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5419 Inst.setOpcode(ARM::tSUBi3); 5420 return true; 5421 } 5422 break; 5423 case ARM::t2ADDrr: { 5424 // If the destination and first source operand are the same, and 5425 // there's no setting of the flags, use encoding T2 instead of T3. 5426 // Note that this is only for ADD, not SUB. This mirrors the system 5427 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 5428 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 5429 Inst.getOperand(5).getReg() != 0 || 5430 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5431 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 5432 break; 5433 MCInst TmpInst; 5434 TmpInst.setOpcode(ARM::tADDhirr); 5435 TmpInst.addOperand(Inst.getOperand(0)); 5436 TmpInst.addOperand(Inst.getOperand(0)); 5437 TmpInst.addOperand(Inst.getOperand(2)); 5438 TmpInst.addOperand(Inst.getOperand(3)); 5439 TmpInst.addOperand(Inst.getOperand(4)); 5440 Inst = TmpInst; 5441 return true; 5442 } 5443 case ARM::tB: 5444 // A Thumb conditional branch outside of an IT block is a tBcc. 5445 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 5446 Inst.setOpcode(ARM::tBcc); 5447 return true; 5448 } 5449 break; 5450 case ARM::t2B: 5451 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 5452 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 5453 Inst.setOpcode(ARM::t2Bcc); 5454 return true; 5455 } 5456 break; 5457 case ARM::t2Bcc: 5458 // If the conditional is AL or we're in an IT block, we really want t2B. 5459 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 5460 Inst.setOpcode(ARM::t2B); 5461 return true; 5462 } 5463 break; 5464 case ARM::tBcc: 5465 // If the conditional is AL, we really want tB. 5466 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 5467 Inst.setOpcode(ARM::tB); 5468 return true; 5469 } 5470 break; 5471 case ARM::tLDMIA: { 5472 // If the register list contains any high registers, or if the writeback 5473 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 5474 // instead if we're in Thumb2. Otherwise, this should have generated 5475 // an error in validateInstruction(). 5476 unsigned Rn = Inst.getOperand(0).getReg(); 5477 bool hasWritebackToken = 5478 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5479 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 5480 bool listContainsBase; 5481 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 5482 (!listContainsBase && !hasWritebackToken) || 5483 (listContainsBase && hasWritebackToken)) { 5484 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5485 assert (isThumbTwo()); 5486 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 5487 // If we're switching to the updating version, we need to insert 5488 // the writeback tied operand. 5489 if (hasWritebackToken) 5490 Inst.insert(Inst.begin(), 5491 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 5492 return true; 5493 } 5494 break; 5495 } 5496 case ARM::tSTMIA_UPD: { 5497 // If the register list contains any high registers, we need to use 5498 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5499 // should have generated an error in validateInstruction(). 5500 unsigned Rn = Inst.getOperand(0).getReg(); 5501 bool listContainsBase; 5502 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 5503 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5504 assert (isThumbTwo()); 5505 Inst.setOpcode(ARM::t2STMIA_UPD); 5506 return true; 5507 } 5508 break; 5509 } 5510 case ARM::tPOP: { 5511 bool listContainsBase; 5512 // If the register list contains any high registers, we need to use 5513 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5514 // should have generated an error in validateInstruction(). 5515 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 5516 return false; 5517 assert (isThumbTwo()); 5518 Inst.setOpcode(ARM::t2LDMIA_UPD); 5519 // Add the base register and writeback operands. 5520 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5521 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5522 return true; 5523 } 5524 case ARM::tPUSH: { 5525 bool listContainsBase; 5526 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 5527 return false; 5528 assert (isThumbTwo()); 5529 Inst.setOpcode(ARM::t2STMDB_UPD); 5530 // Add the base register and writeback operands. 5531 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5532 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5533 return true; 5534 } 5535 case ARM::t2MOVi: { 5536 // If we can use the 16-bit encoding and the user didn't explicitly 5537 // request the 32-bit variant, transform it here. 5538 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5539 Inst.getOperand(1).getImm() <= 255 && 5540 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 5541 Inst.getOperand(4).getReg() == ARM::CPSR) || 5542 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 5543 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5544 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5545 // The operands aren't in the same order for tMOVi8... 5546 MCInst TmpInst; 5547 TmpInst.setOpcode(ARM::tMOVi8); 5548 TmpInst.addOperand(Inst.getOperand(0)); 5549 TmpInst.addOperand(Inst.getOperand(4)); 5550 TmpInst.addOperand(Inst.getOperand(1)); 5551 TmpInst.addOperand(Inst.getOperand(2)); 5552 TmpInst.addOperand(Inst.getOperand(3)); 5553 Inst = TmpInst; 5554 return true; 5555 } 5556 break; 5557 } 5558 case ARM::t2MOVr: { 5559 // If we can use the 16-bit encoding and the user didn't explicitly 5560 // request the 32-bit variant, transform it here. 5561 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5562 isARMLowRegister(Inst.getOperand(1).getReg()) && 5563 Inst.getOperand(2).getImm() == ARMCC::AL && 5564 Inst.getOperand(4).getReg() == ARM::CPSR && 5565 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5566 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5567 // The operands aren't the same for tMOV[S]r... (no cc_out) 5568 MCInst TmpInst; 5569 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 5570 TmpInst.addOperand(Inst.getOperand(0)); 5571 TmpInst.addOperand(Inst.getOperand(1)); 5572 TmpInst.addOperand(Inst.getOperand(2)); 5573 TmpInst.addOperand(Inst.getOperand(3)); 5574 Inst = TmpInst; 5575 return true; 5576 } 5577 break; 5578 } 5579 case ARM::t2SXTH: 5580 case ARM::t2SXTB: 5581 case ARM::t2UXTH: 5582 case ARM::t2UXTB: { 5583 // If we can use the 16-bit encoding and the user didn't explicitly 5584 // request the 32-bit variant, transform it here. 5585 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5586 isARMLowRegister(Inst.getOperand(1).getReg()) && 5587 Inst.getOperand(2).getImm() == 0 && 5588 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5589 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5590 unsigned NewOpc; 5591 switch (Inst.getOpcode()) { 5592 default: llvm_unreachable("Illegal opcode!"); 5593 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 5594 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 5595 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 5596 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 5597 } 5598 // The operands aren't the same for thumb1 (no rotate operand). 5599 MCInst TmpInst; 5600 TmpInst.setOpcode(NewOpc); 5601 TmpInst.addOperand(Inst.getOperand(0)); 5602 TmpInst.addOperand(Inst.getOperand(1)); 5603 TmpInst.addOperand(Inst.getOperand(3)); 5604 TmpInst.addOperand(Inst.getOperand(4)); 5605 Inst = TmpInst; 5606 return true; 5607 } 5608 break; 5609 } 5610 case ARM::t2IT: { 5611 // The mask bits for all but the first condition are represented as 5612 // the low bit of the condition code value implies 't'. We currently 5613 // always have 1 implies 't', so XOR toggle the bits if the low bit 5614 // of the condition code is zero. The encoding also expects the low 5615 // bit of the condition to be encoded as bit 4 of the mask operand, 5616 // so mask that in if needed 5617 MCOperand &MO = Inst.getOperand(1); 5618 unsigned Mask = MO.getImm(); 5619 unsigned OrigMask = Mask; 5620 unsigned TZ = CountTrailingZeros_32(Mask); 5621 if ((Inst.getOperand(0).getImm() & 1) == 0) { 5622 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 5623 for (unsigned i = 3; i != TZ; --i) 5624 Mask ^= 1 << i; 5625 } else 5626 Mask |= 0x10; 5627 MO.setImm(Mask); 5628 5629 // Set up the IT block state according to the IT instruction we just 5630 // matched. 5631 assert(!inITBlock() && "nested IT blocks?!"); 5632 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 5633 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 5634 ITState.CurPosition = 0; 5635 ITState.FirstCond = true; 5636 break; 5637 } 5638 } 5639 return false; 5640} 5641 5642unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 5643 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 5644 // suffix depending on whether they're in an IT block or not. 5645 unsigned Opc = Inst.getOpcode(); 5646 const MCInstrDesc &MCID = getInstDesc(Opc); 5647 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 5648 assert(MCID.hasOptionalDef() && 5649 "optionally flag setting instruction missing optional def operand"); 5650 assert(MCID.NumOperands == Inst.getNumOperands() && 5651 "operand count mismatch!"); 5652 // Find the optional-def operand (cc_out). 5653 unsigned OpNo; 5654 for (OpNo = 0; 5655 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 5656 ++OpNo) 5657 ; 5658 // If we're parsing Thumb1, reject it completely. 5659 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 5660 return Match_MnemonicFail; 5661 // If we're parsing Thumb2, which form is legal depends on whether we're 5662 // in an IT block. 5663 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 5664 !inITBlock()) 5665 return Match_RequiresITBlock; 5666 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 5667 inITBlock()) 5668 return Match_RequiresNotITBlock; 5669 } 5670 // Some high-register supporting Thumb1 encodings only allow both registers 5671 // to be from r0-r7 when in Thumb2. 5672 else if (Opc == ARM::tADDhirr && isThumbOne() && 5673 isARMLowRegister(Inst.getOperand(1).getReg()) && 5674 isARMLowRegister(Inst.getOperand(2).getReg())) 5675 return Match_RequiresThumb2; 5676 // Others only require ARMv6 or later. 5677 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 5678 isARMLowRegister(Inst.getOperand(0).getReg()) && 5679 isARMLowRegister(Inst.getOperand(1).getReg())) 5680 return Match_RequiresV6; 5681 return Match_Success; 5682} 5683 5684bool ARMAsmParser:: 5685MatchAndEmitInstruction(SMLoc IDLoc, 5686 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 5687 MCStreamer &Out) { 5688 MCInst Inst; 5689 unsigned ErrorInfo; 5690 unsigned MatchResult; 5691 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 5692 switch (MatchResult) { 5693 default: break; 5694 case Match_Success: 5695 // Context sensitive operand constraints aren't handled by the matcher, 5696 // so check them here. 5697 if (validateInstruction(Inst, Operands)) { 5698 // Still progress the IT block, otherwise one wrong condition causes 5699 // nasty cascading errors. 5700 forwardITPosition(); 5701 return true; 5702 } 5703 5704 // Some instructions need post-processing to, for example, tweak which 5705 // encoding is selected. Loop on it while changes happen so the 5706 // individual transformations can chain off each other. E.g., 5707 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 5708 while (processInstruction(Inst, Operands)) 5709 ; 5710 5711 // Only move forward at the very end so that everything in validate 5712 // and process gets a consistent answer about whether we're in an IT 5713 // block. 5714 forwardITPosition(); 5715 5716 Out.EmitInstruction(Inst); 5717 return false; 5718 case Match_MissingFeature: 5719 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 5720 return true; 5721 case Match_InvalidOperand: { 5722 SMLoc ErrorLoc = IDLoc; 5723 if (ErrorInfo != ~0U) { 5724 if (ErrorInfo >= Operands.size()) 5725 return Error(IDLoc, "too few operands for instruction"); 5726 5727 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 5728 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 5729 } 5730 5731 return Error(ErrorLoc, "invalid operand for instruction"); 5732 } 5733 case Match_MnemonicFail: 5734 return Error(IDLoc, "invalid instruction"); 5735 case Match_ConversionFail: 5736 // The converter function will have already emited a diagnostic. 5737 return true; 5738 case Match_RequiresNotITBlock: 5739 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 5740 case Match_RequiresITBlock: 5741 return Error(IDLoc, "instruction only valid inside IT block"); 5742 case Match_RequiresV6: 5743 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 5744 case Match_RequiresThumb2: 5745 return Error(IDLoc, "instruction variant requires Thumb2"); 5746 } 5747 5748 llvm_unreachable("Implement any new match types added!"); 5749 return true; 5750} 5751 5752/// parseDirective parses the arm specific directives 5753bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 5754 StringRef IDVal = DirectiveID.getIdentifier(); 5755 if (IDVal == ".word") 5756 return parseDirectiveWord(4, DirectiveID.getLoc()); 5757 else if (IDVal == ".thumb") 5758 return parseDirectiveThumb(DirectiveID.getLoc()); 5759 else if (IDVal == ".arm") 5760 return parseDirectiveARM(DirectiveID.getLoc()); 5761 else if (IDVal == ".thumb_func") 5762 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 5763 else if (IDVal == ".code") 5764 return parseDirectiveCode(DirectiveID.getLoc()); 5765 else if (IDVal == ".syntax") 5766 return parseDirectiveSyntax(DirectiveID.getLoc()); 5767 return true; 5768} 5769 5770/// parseDirectiveWord 5771/// ::= .word [ expression (, expression)* ] 5772bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 5773 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5774 for (;;) { 5775 const MCExpr *Value; 5776 if (getParser().ParseExpression(Value)) 5777 return true; 5778 5779 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 5780 5781 if (getLexer().is(AsmToken::EndOfStatement)) 5782 break; 5783 5784 // FIXME: Improve diagnostic. 5785 if (getLexer().isNot(AsmToken::Comma)) 5786 return Error(L, "unexpected token in directive"); 5787 Parser.Lex(); 5788 } 5789 } 5790 5791 Parser.Lex(); 5792 return false; 5793} 5794 5795/// parseDirectiveThumb 5796/// ::= .thumb 5797bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 5798 if (getLexer().isNot(AsmToken::EndOfStatement)) 5799 return Error(L, "unexpected token in directive"); 5800 Parser.Lex(); 5801 5802 if (!isThumb()) 5803 SwitchMode(); 5804 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5805 return false; 5806} 5807 5808/// parseDirectiveARM 5809/// ::= .arm 5810bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 5811 if (getLexer().isNot(AsmToken::EndOfStatement)) 5812 return Error(L, "unexpected token in directive"); 5813 Parser.Lex(); 5814 5815 if (isThumb()) 5816 SwitchMode(); 5817 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5818 return false; 5819} 5820 5821/// parseDirectiveThumbFunc 5822/// ::= .thumbfunc symbol_name 5823bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 5824 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 5825 bool isMachO = MAI.hasSubsectionsViaSymbols(); 5826 StringRef Name; 5827 5828 // Darwin asm has function name after .thumb_func direction 5829 // ELF doesn't 5830 if (isMachO) { 5831 const AsmToken &Tok = Parser.getTok(); 5832 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 5833 return Error(L, "unexpected token in .thumb_func directive"); 5834 Name = Tok.getIdentifier(); 5835 Parser.Lex(); // Consume the identifier token. 5836 } 5837 5838 if (getLexer().isNot(AsmToken::EndOfStatement)) 5839 return Error(L, "unexpected token in directive"); 5840 Parser.Lex(); 5841 5842 // FIXME: assuming function name will be the line following .thumb_func 5843 if (!isMachO) { 5844 Name = Parser.getTok().getIdentifier(); 5845 } 5846 5847 // Mark symbol as a thumb symbol. 5848 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 5849 getParser().getStreamer().EmitThumbFunc(Func); 5850 return false; 5851} 5852 5853/// parseDirectiveSyntax 5854/// ::= .syntax unified | divided 5855bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 5856 const AsmToken &Tok = Parser.getTok(); 5857 if (Tok.isNot(AsmToken::Identifier)) 5858 return Error(L, "unexpected token in .syntax directive"); 5859 StringRef Mode = Tok.getString(); 5860 if (Mode == "unified" || Mode == "UNIFIED") 5861 Parser.Lex(); 5862 else if (Mode == "divided" || Mode == "DIVIDED") 5863 return Error(L, "'.syntax divided' arm asssembly not supported"); 5864 else 5865 return Error(L, "unrecognized syntax mode in .syntax directive"); 5866 5867 if (getLexer().isNot(AsmToken::EndOfStatement)) 5868 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5869 Parser.Lex(); 5870 5871 // TODO tell the MC streamer the mode 5872 // getParser().getStreamer().Emit???(); 5873 return false; 5874} 5875 5876/// parseDirectiveCode 5877/// ::= .code 16 | 32 5878bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 5879 const AsmToken &Tok = Parser.getTok(); 5880 if (Tok.isNot(AsmToken::Integer)) 5881 return Error(L, "unexpected token in .code directive"); 5882 int64_t Val = Parser.getTok().getIntVal(); 5883 if (Val == 16) 5884 Parser.Lex(); 5885 else if (Val == 32) 5886 Parser.Lex(); 5887 else 5888 return Error(L, "invalid operand to .code directive"); 5889 5890 if (getLexer().isNot(AsmToken::EndOfStatement)) 5891 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5892 Parser.Lex(); 5893 5894 if (Val == 16) { 5895 if (!isThumb()) 5896 SwitchMode(); 5897 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5898 } else { 5899 if (isThumb()) 5900 SwitchMode(); 5901 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5902 } 5903 5904 return false; 5905} 5906 5907extern "C" void LLVMInitializeARMAsmLexer(); 5908 5909/// Force static initialization. 5910extern "C" void LLVMInitializeARMAsmParser() { 5911 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 5912 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 5913 LLVMInitializeARMAsmLexer(); 5914} 5915 5916#define GET_REGISTER_MATCHER 5917#define GET_MATCHER_IMPLEMENTATION 5918#include "ARMGenAsmMatcher.inc" 5919