ARMAsmParser.cpp revision ca3cd419a52c1dedee133d79772ef97f30e5d20b
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 43 44class ARMAsmParser : public MCTargetAsmParser { 45 MCSubtargetInfo &STI; 46 MCAsmParser &Parser; 47 const MCRegisterInfo *MRI; 48 49 // Map of register aliases registers via the .req directive. 50 StringMap<unsigned> RegisterReqs; 51 52 struct { 53 ARMCC::CondCodes Cond; // Condition for IT block. 54 unsigned Mask:4; // Condition mask for instructions. 55 // Starting at first 1 (from lsb). 56 // '1' condition as indicated in IT. 57 // '0' inverse of condition (else). 58 // Count of instructions in IT block is 59 // 4 - trailingzeroes(mask) 60 61 bool FirstCond; // Explicit flag for when we're parsing the 62 // First instruction in the IT block. It's 63 // implied in the mask, so needs special 64 // handling. 65 66 unsigned CurPosition; // Current position in parsing of IT 67 // block. In range [0,3]. Initialized 68 // according to count of instructions in block. 69 // ~0U if no active IT block. 70 } ITState; 71 bool inITBlock() { return ITState.CurPosition != ~0U;} 72 void forwardITPosition() { 73 if (!inITBlock()) return; 74 // Move to the next instruction in the IT block, if there is one. If not, 75 // mark the block as done. 76 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 77 if (++ITState.CurPosition == 5 - TZ) 78 ITState.CurPosition = ~0U; // Done with the IT block after this. 79 } 80 81 82 MCAsmParser &getParser() const { return Parser; } 83 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 84 85 bool Warning(SMLoc L, const Twine &Msg, 86 ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) { 87 return Parser.Warning(L, Msg, Ranges); 88 } 89 bool Error(SMLoc L, const Twine &Msg, 90 ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) { 91 return Parser.Error(L, Msg, Ranges); 92 } 93 94 int tryParseRegister(); 95 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 96 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 97 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 98 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 99 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 100 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 101 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 102 unsigned &ShiftAmount); 103 bool parseDirectiveWord(unsigned Size, SMLoc L); 104 bool parseDirectiveThumb(SMLoc L); 105 bool parseDirectiveARM(SMLoc L); 106 bool parseDirectiveThumbFunc(SMLoc L); 107 bool parseDirectiveCode(SMLoc L); 108 bool parseDirectiveSyntax(SMLoc L); 109 bool parseDirectiveReq(StringRef Name, SMLoc L); 110 bool parseDirectiveUnreq(SMLoc L); 111 bool parseDirectiveArch(SMLoc L); 112 bool parseDirectiveEabiAttr(SMLoc L); 113 114 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 115 bool &CarrySetting, unsigned &ProcessorIMod, 116 StringRef &ITMask); 117 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 118 bool &CanAcceptPredicationCode); 119 120 bool isThumb() const { 121 // FIXME: Can tablegen auto-generate this? 122 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 123 } 124 bool isThumbOne() const { 125 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 126 } 127 bool isThumbTwo() const { 128 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 129 } 130 bool hasV6Ops() const { 131 return STI.getFeatureBits() & ARM::HasV6Ops; 132 } 133 bool hasV7Ops() const { 134 return STI.getFeatureBits() & ARM::HasV7Ops; 135 } 136 void SwitchMode() { 137 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 138 setAvailableFeatures(FB); 139 } 140 bool isMClass() const { 141 return STI.getFeatureBits() & ARM::FeatureMClass; 142 } 143 144 /// @name Auto-generated Match Functions 145 /// { 146 147#define GET_ASSEMBLER_HEADER 148#include "ARMGenAsmMatcher.inc" 149 150 /// } 151 152 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 153 OperandMatchResultTy parseCoprocNumOperand( 154 SmallVectorImpl<MCParsedAsmOperand*>&); 155 OperandMatchResultTy parseCoprocRegOperand( 156 SmallVectorImpl<MCParsedAsmOperand*>&); 157 OperandMatchResultTy parseCoprocOptionOperand( 158 SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseMemBarrierOptOperand( 160 SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseProcIFlagsOperand( 162 SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseMSRMaskOperand( 164 SmallVectorImpl<MCParsedAsmOperand*>&); 165 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 166 StringRef Op, int Low, int High); 167 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 168 return parsePKHImm(O, "lsl", 0, 31); 169 } 170 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 171 return parsePKHImm(O, "asr", 1, 32); 172 } 173 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 174 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 175 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 176 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 177 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 178 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 179 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 180 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 181 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index); 182 183 // Asm Match Converter Methods 184 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 209 const SmallVectorImpl<MCParsedAsmOperand*> &); 210 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 211 const SmallVectorImpl<MCParsedAsmOperand*> &); 212 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 213 const SmallVectorImpl<MCParsedAsmOperand*> &); 214 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 215 const SmallVectorImpl<MCParsedAsmOperand*> &); 216 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 217 const SmallVectorImpl<MCParsedAsmOperand*> &); 218 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 219 const SmallVectorImpl<MCParsedAsmOperand*> &); 220 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 221 const SmallVectorImpl<MCParsedAsmOperand*> &); 222 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 223 const SmallVectorImpl<MCParsedAsmOperand*> &); 224 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 225 const SmallVectorImpl<MCParsedAsmOperand*> &); 226 227 bool validateInstruction(MCInst &Inst, 228 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 229 bool processInstruction(MCInst &Inst, 230 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 231 bool shouldOmitCCOutOperand(StringRef Mnemonic, 232 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 233 234public: 235 enum ARMMatchResultTy { 236 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 237 Match_RequiresNotITBlock, 238 Match_RequiresV6, 239 Match_RequiresThumb2 240 }; 241 242 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 243 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 244 MCAsmParserExtension::Initialize(_Parser); 245 246 // Cache the MCRegisterInfo. 247 MRI = &getContext().getRegisterInfo(); 248 249 // Initialize the set of available features. 250 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 251 252 // Not in an ITBlock to start with. 253 ITState.CurPosition = ~0U; 254 } 255 256 // Implementation of the MCTargetAsmParser interface: 257 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 258 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 259 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 260 bool ParseDirective(AsmToken DirectiveID); 261 262 unsigned checkTargetMatchPredicate(MCInst &Inst); 263 264 bool MatchAndEmitInstruction(SMLoc IDLoc, 265 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 266 MCStreamer &Out); 267}; 268} // end anonymous namespace 269 270namespace { 271 272/// ARMOperand - Instances of this class represent a parsed ARM machine 273/// instruction. 274class ARMOperand : public MCParsedAsmOperand { 275 enum KindTy { 276 k_CondCode, 277 k_CCOut, 278 k_ITCondMask, 279 k_CoprocNum, 280 k_CoprocReg, 281 k_CoprocOption, 282 k_Immediate, 283 k_MemBarrierOpt, 284 k_Memory, 285 k_PostIndexRegister, 286 k_MSRMask, 287 k_ProcIFlags, 288 k_VectorIndex, 289 k_Register, 290 k_RegisterList, 291 k_DPRRegisterList, 292 k_SPRRegisterList, 293 k_VectorList, 294 k_VectorListAllLanes, 295 k_VectorListIndexed, 296 k_ShiftedRegister, 297 k_ShiftedImmediate, 298 k_ShifterImmediate, 299 k_RotateImmediate, 300 k_BitfieldDescriptor, 301 k_Token 302 } Kind; 303 304 SMLoc StartLoc, EndLoc; 305 SmallVector<unsigned, 8> Registers; 306 307 union { 308 struct { 309 ARMCC::CondCodes Val; 310 } CC; 311 312 struct { 313 unsigned Val; 314 } Cop; 315 316 struct { 317 unsigned Val; 318 } CoprocOption; 319 320 struct { 321 unsigned Mask:4; 322 } ITMask; 323 324 struct { 325 ARM_MB::MemBOpt Val; 326 } MBOpt; 327 328 struct { 329 ARM_PROC::IFlags Val; 330 } IFlags; 331 332 struct { 333 unsigned Val; 334 } MMask; 335 336 struct { 337 const char *Data; 338 unsigned Length; 339 } Tok; 340 341 struct { 342 unsigned RegNum; 343 } Reg; 344 345 // A vector register list is a sequential list of 1 to 4 registers. 346 struct { 347 unsigned RegNum; 348 unsigned Count; 349 unsigned LaneIndex; 350 bool isDoubleSpaced; 351 } VectorList; 352 353 struct { 354 unsigned Val; 355 } VectorIndex; 356 357 struct { 358 const MCExpr *Val; 359 } Imm; 360 361 /// Combined record for all forms of ARM address expressions. 362 struct { 363 unsigned BaseRegNum; 364 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 365 // was specified. 366 const MCConstantExpr *OffsetImm; // Offset immediate value 367 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 368 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 369 unsigned ShiftImm; // shift for OffsetReg. 370 unsigned Alignment; // 0 = no alignment specified 371 // n = alignment in bytes (2, 4, 8, 16, or 32) 372 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 373 } Memory; 374 375 struct { 376 unsigned RegNum; 377 bool isAdd; 378 ARM_AM::ShiftOpc ShiftTy; 379 unsigned ShiftImm; 380 } PostIdxReg; 381 382 struct { 383 bool isASR; 384 unsigned Imm; 385 } ShifterImm; 386 struct { 387 ARM_AM::ShiftOpc ShiftTy; 388 unsigned SrcReg; 389 unsigned ShiftReg; 390 unsigned ShiftImm; 391 } RegShiftedReg; 392 struct { 393 ARM_AM::ShiftOpc ShiftTy; 394 unsigned SrcReg; 395 unsigned ShiftImm; 396 } RegShiftedImm; 397 struct { 398 unsigned Imm; 399 } RotImm; 400 struct { 401 unsigned LSB; 402 unsigned Width; 403 } Bitfield; 404 }; 405 406 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 407public: 408 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 409 Kind = o.Kind; 410 StartLoc = o.StartLoc; 411 EndLoc = o.EndLoc; 412 switch (Kind) { 413 case k_CondCode: 414 CC = o.CC; 415 break; 416 case k_ITCondMask: 417 ITMask = o.ITMask; 418 break; 419 case k_Token: 420 Tok = o.Tok; 421 break; 422 case k_CCOut: 423 case k_Register: 424 Reg = o.Reg; 425 break; 426 case k_RegisterList: 427 case k_DPRRegisterList: 428 case k_SPRRegisterList: 429 Registers = o.Registers; 430 break; 431 case k_VectorList: 432 case k_VectorListAllLanes: 433 case k_VectorListIndexed: 434 VectorList = o.VectorList; 435 break; 436 case k_CoprocNum: 437 case k_CoprocReg: 438 Cop = o.Cop; 439 break; 440 case k_CoprocOption: 441 CoprocOption = o.CoprocOption; 442 break; 443 case k_Immediate: 444 Imm = o.Imm; 445 break; 446 case k_MemBarrierOpt: 447 MBOpt = o.MBOpt; 448 break; 449 case k_Memory: 450 Memory = o.Memory; 451 break; 452 case k_PostIndexRegister: 453 PostIdxReg = o.PostIdxReg; 454 break; 455 case k_MSRMask: 456 MMask = o.MMask; 457 break; 458 case k_ProcIFlags: 459 IFlags = o.IFlags; 460 break; 461 case k_ShifterImmediate: 462 ShifterImm = o.ShifterImm; 463 break; 464 case k_ShiftedRegister: 465 RegShiftedReg = o.RegShiftedReg; 466 break; 467 case k_ShiftedImmediate: 468 RegShiftedImm = o.RegShiftedImm; 469 break; 470 case k_RotateImmediate: 471 RotImm = o.RotImm; 472 break; 473 case k_BitfieldDescriptor: 474 Bitfield = o.Bitfield; 475 break; 476 case k_VectorIndex: 477 VectorIndex = o.VectorIndex; 478 break; 479 } 480 } 481 482 /// getStartLoc - Get the location of the first token of this operand. 483 SMLoc getStartLoc() const { return StartLoc; } 484 /// getEndLoc - Get the location of the last token of this operand. 485 SMLoc getEndLoc() const { return EndLoc; } 486 487 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } 488 489 ARMCC::CondCodes getCondCode() const { 490 assert(Kind == k_CondCode && "Invalid access!"); 491 return CC.Val; 492 } 493 494 unsigned getCoproc() const { 495 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 496 return Cop.Val; 497 } 498 499 StringRef getToken() const { 500 assert(Kind == k_Token && "Invalid access!"); 501 return StringRef(Tok.Data, Tok.Length); 502 } 503 504 unsigned getReg() const { 505 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 506 return Reg.RegNum; 507 } 508 509 const SmallVectorImpl<unsigned> &getRegList() const { 510 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 511 Kind == k_SPRRegisterList) && "Invalid access!"); 512 return Registers; 513 } 514 515 const MCExpr *getImm() const { 516 assert(isImm() && "Invalid access!"); 517 return Imm.Val; 518 } 519 520 unsigned getVectorIndex() const { 521 assert(Kind == k_VectorIndex && "Invalid access!"); 522 return VectorIndex.Val; 523 } 524 525 ARM_MB::MemBOpt getMemBarrierOpt() const { 526 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 527 return MBOpt.Val; 528 } 529 530 ARM_PROC::IFlags getProcIFlags() const { 531 assert(Kind == k_ProcIFlags && "Invalid access!"); 532 return IFlags.Val; 533 } 534 535 unsigned getMSRMask() const { 536 assert(Kind == k_MSRMask && "Invalid access!"); 537 return MMask.Val; 538 } 539 540 bool isCoprocNum() const { return Kind == k_CoprocNum; } 541 bool isCoprocReg() const { return Kind == k_CoprocReg; } 542 bool isCoprocOption() const { return Kind == k_CoprocOption; } 543 bool isCondCode() const { return Kind == k_CondCode; } 544 bool isCCOut() const { return Kind == k_CCOut; } 545 bool isITMask() const { return Kind == k_ITCondMask; } 546 bool isITCondCode() const { return Kind == k_CondCode; } 547 bool isImm() const { return Kind == k_Immediate; } 548 bool isFPImm() const { 549 if (!isImm()) return false; 550 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 551 if (!CE) return false; 552 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 553 return Val != -1; 554 } 555 bool isFBits16() const { 556 if (!isImm()) return false; 557 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 558 if (!CE) return false; 559 int64_t Value = CE->getValue(); 560 return Value >= 0 && Value <= 16; 561 } 562 bool isFBits32() const { 563 if (!isImm()) return false; 564 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 565 if (!CE) return false; 566 int64_t Value = CE->getValue(); 567 return Value >= 1 && Value <= 32; 568 } 569 bool isImm8s4() const { 570 if (!isImm()) return false; 571 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 572 if (!CE) return false; 573 int64_t Value = CE->getValue(); 574 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 575 } 576 bool isImm0_1020s4() const { 577 if (!isImm()) return false; 578 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 579 if (!CE) return false; 580 int64_t Value = CE->getValue(); 581 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 582 } 583 bool isImm0_508s4() const { 584 if (!isImm()) return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 589 } 590 bool isImm0_508s4Neg() const { 591 if (!isImm()) return false; 592 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 593 if (!CE) return false; 594 int64_t Value = -CE->getValue(); 595 // explicitly exclude zero. we want that to use the normal 0_508 version. 596 return ((Value & 3) == 0) && Value > 0 && Value <= 508; 597 } 598 bool isImm0_255() const { 599 if (!isImm()) return false; 600 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 601 if (!CE) return false; 602 int64_t Value = CE->getValue(); 603 return Value >= 0 && Value < 256; 604 } 605 bool isImm0_4095() const { 606 if (!isImm()) return false; 607 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 608 if (!CE) return false; 609 int64_t Value = CE->getValue(); 610 return Value >= 0 && Value < 4096; 611 } 612 bool isImm0_4095Neg() const { 613 if (!isImm()) return false; 614 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 615 if (!CE) return false; 616 int64_t Value = -CE->getValue(); 617 return Value > 0 && Value < 4096; 618 } 619 bool isImm0_1() const { 620 if (!isImm()) return false; 621 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 622 if (!CE) return false; 623 int64_t Value = CE->getValue(); 624 return Value >= 0 && Value < 2; 625 } 626 bool isImm0_3() const { 627 if (!isImm()) return false; 628 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 629 if (!CE) return false; 630 int64_t Value = CE->getValue(); 631 return Value >= 0 && Value < 4; 632 } 633 bool isImm0_7() const { 634 if (!isImm()) return false; 635 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 636 if (!CE) return false; 637 int64_t Value = CE->getValue(); 638 return Value >= 0 && Value < 8; 639 } 640 bool isImm0_15() const { 641 if (!isImm()) return false; 642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 643 if (!CE) return false; 644 int64_t Value = CE->getValue(); 645 return Value >= 0 && Value < 16; 646 } 647 bool isImm0_31() const { 648 if (!isImm()) return false; 649 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 650 if (!CE) return false; 651 int64_t Value = CE->getValue(); 652 return Value >= 0 && Value < 32; 653 } 654 bool isImm0_63() const { 655 if (!isImm()) return false; 656 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 657 if (!CE) return false; 658 int64_t Value = CE->getValue(); 659 return Value >= 0 && Value < 64; 660 } 661 bool isImm8() const { 662 if (!isImm()) return false; 663 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 664 if (!CE) return false; 665 int64_t Value = CE->getValue(); 666 return Value == 8; 667 } 668 bool isImm16() const { 669 if (!isImm()) return false; 670 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 671 if (!CE) return false; 672 int64_t Value = CE->getValue(); 673 return Value == 16; 674 } 675 bool isImm32() const { 676 if (!isImm()) return false; 677 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 678 if (!CE) return false; 679 int64_t Value = CE->getValue(); 680 return Value == 32; 681 } 682 bool isShrImm8() const { 683 if (!isImm()) return false; 684 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 685 if (!CE) return false; 686 int64_t Value = CE->getValue(); 687 return Value > 0 && Value <= 8; 688 } 689 bool isShrImm16() const { 690 if (!isImm()) return false; 691 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 692 if (!CE) return false; 693 int64_t Value = CE->getValue(); 694 return Value > 0 && Value <= 16; 695 } 696 bool isShrImm32() const { 697 if (!isImm()) return false; 698 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 699 if (!CE) return false; 700 int64_t Value = CE->getValue(); 701 return Value > 0 && Value <= 32; 702 } 703 bool isShrImm64() const { 704 if (!isImm()) return false; 705 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 706 if (!CE) return false; 707 int64_t Value = CE->getValue(); 708 return Value > 0 && Value <= 64; 709 } 710 bool isImm1_7() const { 711 if (!isImm()) return false; 712 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 713 if (!CE) return false; 714 int64_t Value = CE->getValue(); 715 return Value > 0 && Value < 8; 716 } 717 bool isImm1_15() const { 718 if (!isImm()) return false; 719 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 720 if (!CE) return false; 721 int64_t Value = CE->getValue(); 722 return Value > 0 && Value < 16; 723 } 724 bool isImm1_31() const { 725 if (!isImm()) return false; 726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 727 if (!CE) return false; 728 int64_t Value = CE->getValue(); 729 return Value > 0 && Value < 32; 730 } 731 bool isImm1_16() const { 732 if (!isImm()) return false; 733 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 734 if (!CE) return false; 735 int64_t Value = CE->getValue(); 736 return Value > 0 && Value < 17; 737 } 738 bool isImm1_32() const { 739 if (!isImm()) return false; 740 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 741 if (!CE) return false; 742 int64_t Value = CE->getValue(); 743 return Value > 0 && Value < 33; 744 } 745 bool isImm0_32() const { 746 if (!isImm()) return false; 747 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 748 if (!CE) return false; 749 int64_t Value = CE->getValue(); 750 return Value >= 0 && Value < 33; 751 } 752 bool isImm0_65535() const { 753 if (!isImm()) return false; 754 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 755 if (!CE) return false; 756 int64_t Value = CE->getValue(); 757 return Value >= 0 && Value < 65536; 758 } 759 bool isImm0_65535Expr() const { 760 if (!isImm()) return false; 761 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 762 // If it's not a constant expression, it'll generate a fixup and be 763 // handled later. 764 if (!CE) return true; 765 int64_t Value = CE->getValue(); 766 return Value >= 0 && Value < 65536; 767 } 768 bool isImm24bit() const { 769 if (!isImm()) return false; 770 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 771 if (!CE) return false; 772 int64_t Value = CE->getValue(); 773 return Value >= 0 && Value <= 0xffffff; 774 } 775 bool isImmThumbSR() const { 776 if (!isImm()) return false; 777 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 778 if (!CE) return false; 779 int64_t Value = CE->getValue(); 780 return Value > 0 && Value < 33; 781 } 782 bool isPKHLSLImm() const { 783 if (!isImm()) return false; 784 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 785 if (!CE) return false; 786 int64_t Value = CE->getValue(); 787 return Value >= 0 && Value < 32; 788 } 789 bool isPKHASRImm() const { 790 if (!isImm()) return false; 791 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 792 if (!CE) return false; 793 int64_t Value = CE->getValue(); 794 return Value > 0 && Value <= 32; 795 } 796 bool isARMSOImm() const { 797 if (!isImm()) return false; 798 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 799 if (!CE) return false; 800 int64_t Value = CE->getValue(); 801 return ARM_AM::getSOImmVal(Value) != -1; 802 } 803 bool isARMSOImmNot() const { 804 if (!isImm()) return false; 805 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 806 if (!CE) return false; 807 int64_t Value = CE->getValue(); 808 return ARM_AM::getSOImmVal(~Value) != -1; 809 } 810 bool isARMSOImmNeg() const { 811 if (!isImm()) return false; 812 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 813 if (!CE) return false; 814 int64_t Value = CE->getValue(); 815 // Only use this when not representable as a plain so_imm. 816 return ARM_AM::getSOImmVal(Value) == -1 && 817 ARM_AM::getSOImmVal(-Value) != -1; 818 } 819 bool isT2SOImm() const { 820 if (!isImm()) return false; 821 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 822 if (!CE) return false; 823 int64_t Value = CE->getValue(); 824 return ARM_AM::getT2SOImmVal(Value) != -1; 825 } 826 bool isT2SOImmNot() const { 827 if (!isImm()) return false; 828 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 829 if (!CE) return false; 830 int64_t Value = CE->getValue(); 831 return ARM_AM::getT2SOImmVal(~Value) != -1; 832 } 833 bool isT2SOImmNeg() const { 834 if (!isImm()) return false; 835 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 836 if (!CE) return false; 837 int64_t Value = CE->getValue(); 838 // Only use this when not representable as a plain so_imm. 839 return ARM_AM::getT2SOImmVal(Value) == -1 && 840 ARM_AM::getT2SOImmVal(-Value) != -1; 841 } 842 bool isSetEndImm() const { 843 if (!isImm()) return false; 844 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 845 if (!CE) return false; 846 int64_t Value = CE->getValue(); 847 return Value == 1 || Value == 0; 848 } 849 bool isReg() const { return Kind == k_Register; } 850 bool isRegList() const { return Kind == k_RegisterList; } 851 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 852 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 853 bool isToken() const { return Kind == k_Token; } 854 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 855 bool isMemory() const { return Kind == k_Memory; } 856 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 857 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 858 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 859 bool isRotImm() const { return Kind == k_RotateImmediate; } 860 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 861 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 862 bool isPostIdxReg() const { 863 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 864 } 865 bool isMemNoOffset(bool alignOK = false) const { 866 if (!isMemory()) 867 return false; 868 // No offset of any kind. 869 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 870 (alignOK || Memory.Alignment == 0); 871 } 872 bool isMemPCRelImm12() const { 873 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 874 return false; 875 // Base register must be PC. 876 if (Memory.BaseRegNum != ARM::PC) 877 return false; 878 // Immediate offset in range [-4095, 4095]. 879 if (!Memory.OffsetImm) return true; 880 int64_t Val = Memory.OffsetImm->getValue(); 881 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 882 } 883 bool isAlignedMemory() const { 884 return isMemNoOffset(true); 885 } 886 bool isAddrMode2() const { 887 if (!isMemory() || Memory.Alignment != 0) return false; 888 // Check for register offset. 889 if (Memory.OffsetRegNum) return true; 890 // Immediate offset in range [-4095, 4095]. 891 if (!Memory.OffsetImm) return true; 892 int64_t Val = Memory.OffsetImm->getValue(); 893 return Val > -4096 && Val < 4096; 894 } 895 bool isAM2OffsetImm() const { 896 if (!isImm()) return false; 897 // Immediate offset in range [-4095, 4095]. 898 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 899 if (!CE) return false; 900 int64_t Val = CE->getValue(); 901 return Val > -4096 && Val < 4096; 902 } 903 bool isAddrMode3() const { 904 // If we have an immediate that's not a constant, treat it as a label 905 // reference needing a fixup. If it is a constant, it's something else 906 // and we reject it. 907 if (isImm() && !isa<MCConstantExpr>(getImm())) 908 return true; 909 if (!isMemory() || Memory.Alignment != 0) return false; 910 // No shifts are legal for AM3. 911 if (Memory.ShiftType != ARM_AM::no_shift) return false; 912 // Check for register offset. 913 if (Memory.OffsetRegNum) return true; 914 // Immediate offset in range [-255, 255]. 915 if (!Memory.OffsetImm) return true; 916 int64_t Val = Memory.OffsetImm->getValue(); 917 // The #-0 offset is encoded as INT32_MIN, and we have to check 918 // for this too. 919 return (Val > -256 && Val < 256) || Val == INT32_MIN; 920 } 921 bool isAM3Offset() const { 922 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 923 return false; 924 if (Kind == k_PostIndexRegister) 925 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 926 // Immediate offset in range [-255, 255]. 927 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 928 if (!CE) return false; 929 int64_t Val = CE->getValue(); 930 // Special case, #-0 is INT32_MIN. 931 return (Val > -256 && Val < 256) || Val == INT32_MIN; 932 } 933 bool isAddrMode5() const { 934 // If we have an immediate that's not a constant, treat it as a label 935 // reference needing a fixup. If it is a constant, it's something else 936 // and we reject it. 937 if (isImm() && !isa<MCConstantExpr>(getImm())) 938 return true; 939 if (!isMemory() || Memory.Alignment != 0) return false; 940 // Check for register offset. 941 if (Memory.OffsetRegNum) return false; 942 // Immediate offset in range [-1020, 1020] and a multiple of 4. 943 if (!Memory.OffsetImm) return true; 944 int64_t Val = Memory.OffsetImm->getValue(); 945 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 946 Val == INT32_MIN; 947 } 948 bool isMemTBB() const { 949 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 950 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 951 return false; 952 return true; 953 } 954 bool isMemTBH() const { 955 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 956 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 957 Memory.Alignment != 0 ) 958 return false; 959 return true; 960 } 961 bool isMemRegOffset() const { 962 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 963 return false; 964 return true; 965 } 966 bool isT2MemRegOffset() const { 967 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 968 Memory.Alignment != 0) 969 return false; 970 // Only lsl #{0, 1, 2, 3} allowed. 971 if (Memory.ShiftType == ARM_AM::no_shift) 972 return true; 973 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 974 return false; 975 return true; 976 } 977 bool isMemThumbRR() const { 978 // Thumb reg+reg addressing is simple. Just two registers, a base and 979 // an offset. No shifts, negations or any other complicating factors. 980 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 981 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 982 return false; 983 return isARMLowRegister(Memory.BaseRegNum) && 984 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 985 } 986 bool isMemThumbRIs4() const { 987 if (!isMemory() || Memory.OffsetRegNum != 0 || 988 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 989 return false; 990 // Immediate offset, multiple of 4 in range [0, 124]. 991 if (!Memory.OffsetImm) return true; 992 int64_t Val = Memory.OffsetImm->getValue(); 993 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 994 } 995 bool isMemThumbRIs2() const { 996 if (!isMemory() || Memory.OffsetRegNum != 0 || 997 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 998 return false; 999 // Immediate offset, multiple of 4 in range [0, 62]. 1000 if (!Memory.OffsetImm) return true; 1001 int64_t Val = Memory.OffsetImm->getValue(); 1002 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 1003 } 1004 bool isMemThumbRIs1() const { 1005 if (!isMemory() || Memory.OffsetRegNum != 0 || 1006 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 1007 return false; 1008 // Immediate offset in range [0, 31]. 1009 if (!Memory.OffsetImm) return true; 1010 int64_t Val = Memory.OffsetImm->getValue(); 1011 return Val >= 0 && Val <= 31; 1012 } 1013 bool isMemThumbSPI() const { 1014 if (!isMemory() || Memory.OffsetRegNum != 0 || 1015 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 1016 return false; 1017 // Immediate offset, multiple of 4 in range [0, 1020]. 1018 if (!Memory.OffsetImm) return true; 1019 int64_t Val = Memory.OffsetImm->getValue(); 1020 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 1021 } 1022 bool isMemImm8s4Offset() const { 1023 // If we have an immediate that's not a constant, treat it as a label 1024 // reference needing a fixup. If it is a constant, it's something else 1025 // and we reject it. 1026 if (isImm() && !isa<MCConstantExpr>(getImm())) 1027 return true; 1028 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1029 return false; 1030 // Immediate offset a multiple of 4 in range [-1020, 1020]. 1031 if (!Memory.OffsetImm) return true; 1032 int64_t Val = Memory.OffsetImm->getValue(); 1033 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 1034 } 1035 bool isMemImm0_1020s4Offset() const { 1036 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1037 return false; 1038 // Immediate offset a multiple of 4 in range [0, 1020]. 1039 if (!Memory.OffsetImm) return true; 1040 int64_t Val = Memory.OffsetImm->getValue(); 1041 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 1042 } 1043 bool isMemImm8Offset() const { 1044 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1045 return false; 1046 // Base reg of PC isn't allowed for these encodings. 1047 if (Memory.BaseRegNum == ARM::PC) return false; 1048 // Immediate offset in range [-255, 255]. 1049 if (!Memory.OffsetImm) return true; 1050 int64_t Val = Memory.OffsetImm->getValue(); 1051 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 1052 } 1053 bool isMemPosImm8Offset() const { 1054 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1055 return false; 1056 // Immediate offset in range [0, 255]. 1057 if (!Memory.OffsetImm) return true; 1058 int64_t Val = Memory.OffsetImm->getValue(); 1059 return Val >= 0 && Val < 256; 1060 } 1061 bool isMemNegImm8Offset() const { 1062 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1063 return false; 1064 // Base reg of PC isn't allowed for these encodings. 1065 if (Memory.BaseRegNum == ARM::PC) return false; 1066 // Immediate offset in range [-255, -1]. 1067 if (!Memory.OffsetImm) return false; 1068 int64_t Val = Memory.OffsetImm->getValue(); 1069 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 1070 } 1071 bool isMemUImm12Offset() const { 1072 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1073 return false; 1074 // Immediate offset in range [0, 4095]. 1075 if (!Memory.OffsetImm) return true; 1076 int64_t Val = Memory.OffsetImm->getValue(); 1077 return (Val >= 0 && Val < 4096); 1078 } 1079 bool isMemImm12Offset() const { 1080 // If we have an immediate that's not a constant, treat it as a label 1081 // reference needing a fixup. If it is a constant, it's something else 1082 // and we reject it. 1083 if (isImm() && !isa<MCConstantExpr>(getImm())) 1084 return true; 1085 1086 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1087 return false; 1088 // Immediate offset in range [-4095, 4095]. 1089 if (!Memory.OffsetImm) return true; 1090 int64_t Val = Memory.OffsetImm->getValue(); 1091 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 1092 } 1093 bool isPostIdxImm8() const { 1094 if (!isImm()) return false; 1095 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1096 if (!CE) return false; 1097 int64_t Val = CE->getValue(); 1098 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 1099 } 1100 bool isPostIdxImm8s4() const { 1101 if (!isImm()) return false; 1102 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1103 if (!CE) return false; 1104 int64_t Val = CE->getValue(); 1105 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1106 (Val == INT32_MIN); 1107 } 1108 1109 bool isMSRMask() const { return Kind == k_MSRMask; } 1110 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1111 1112 // NEON operands. 1113 bool isSingleSpacedVectorList() const { 1114 return Kind == k_VectorList && !VectorList.isDoubleSpaced; 1115 } 1116 bool isDoubleSpacedVectorList() const { 1117 return Kind == k_VectorList && VectorList.isDoubleSpaced; 1118 } 1119 bool isVecListOneD() const { 1120 if (!isSingleSpacedVectorList()) return false; 1121 return VectorList.Count == 1; 1122 } 1123 1124 bool isVecListDPair() const { 1125 if (!isSingleSpacedVectorList()) return false; 1126 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 1127 .contains(VectorList.RegNum)); 1128 } 1129 1130 bool isVecListThreeD() const { 1131 if (!isSingleSpacedVectorList()) return false; 1132 return VectorList.Count == 3; 1133 } 1134 1135 bool isVecListFourD() const { 1136 if (!isSingleSpacedVectorList()) return false; 1137 return VectorList.Count == 4; 1138 } 1139 1140 bool isVecListDPairSpaced() const { 1141 if (isSingleSpacedVectorList()) return false; 1142 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID] 1143 .contains(VectorList.RegNum)); 1144 } 1145 1146 bool isVecListThreeQ() const { 1147 if (!isDoubleSpacedVectorList()) return false; 1148 return VectorList.Count == 3; 1149 } 1150 1151 bool isVecListFourQ() const { 1152 if (!isDoubleSpacedVectorList()) return false; 1153 return VectorList.Count == 4; 1154 } 1155 1156 bool isSingleSpacedVectorAllLanes() const { 1157 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced; 1158 } 1159 bool isDoubleSpacedVectorAllLanes() const { 1160 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced; 1161 } 1162 bool isVecListOneDAllLanes() const { 1163 if (!isSingleSpacedVectorAllLanes()) return false; 1164 return VectorList.Count == 1; 1165 } 1166 1167 bool isVecListDPairAllLanes() const { 1168 if (!isSingleSpacedVectorAllLanes()) return false; 1169 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 1170 .contains(VectorList.RegNum)); 1171 } 1172 1173 bool isVecListDPairSpacedAllLanes() const { 1174 if (!isDoubleSpacedVectorAllLanes()) return false; 1175 return VectorList.Count == 2; 1176 } 1177 1178 bool isVecListThreeDAllLanes() const { 1179 if (!isSingleSpacedVectorAllLanes()) return false; 1180 return VectorList.Count == 3; 1181 } 1182 1183 bool isVecListThreeQAllLanes() const { 1184 if (!isDoubleSpacedVectorAllLanes()) return false; 1185 return VectorList.Count == 3; 1186 } 1187 1188 bool isVecListFourDAllLanes() const { 1189 if (!isSingleSpacedVectorAllLanes()) return false; 1190 return VectorList.Count == 4; 1191 } 1192 1193 bool isVecListFourQAllLanes() const { 1194 if (!isDoubleSpacedVectorAllLanes()) return false; 1195 return VectorList.Count == 4; 1196 } 1197 1198 bool isSingleSpacedVectorIndexed() const { 1199 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced; 1200 } 1201 bool isDoubleSpacedVectorIndexed() const { 1202 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced; 1203 } 1204 bool isVecListOneDByteIndexed() const { 1205 if (!isSingleSpacedVectorIndexed()) return false; 1206 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1207 } 1208 1209 bool isVecListOneDHWordIndexed() const { 1210 if (!isSingleSpacedVectorIndexed()) return false; 1211 return VectorList.Count == 1 && VectorList.LaneIndex <= 3; 1212 } 1213 1214 bool isVecListOneDWordIndexed() const { 1215 if (!isSingleSpacedVectorIndexed()) return false; 1216 return VectorList.Count == 1 && VectorList.LaneIndex <= 1; 1217 } 1218 1219 bool isVecListTwoDByteIndexed() const { 1220 if (!isSingleSpacedVectorIndexed()) return false; 1221 return VectorList.Count == 2 && VectorList.LaneIndex <= 7; 1222 } 1223 1224 bool isVecListTwoDHWordIndexed() const { 1225 if (!isSingleSpacedVectorIndexed()) return false; 1226 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 1227 } 1228 1229 bool isVecListTwoQWordIndexed() const { 1230 if (!isDoubleSpacedVectorIndexed()) return false; 1231 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 1232 } 1233 1234 bool isVecListTwoQHWordIndexed() const { 1235 if (!isDoubleSpacedVectorIndexed()) return false; 1236 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 1237 } 1238 1239 bool isVecListTwoDWordIndexed() const { 1240 if (!isSingleSpacedVectorIndexed()) return false; 1241 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 1242 } 1243 1244 bool isVecListThreeDByteIndexed() const { 1245 if (!isSingleSpacedVectorIndexed()) return false; 1246 return VectorList.Count == 3 && VectorList.LaneIndex <= 7; 1247 } 1248 1249 bool isVecListThreeDHWordIndexed() const { 1250 if (!isSingleSpacedVectorIndexed()) return false; 1251 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 1252 } 1253 1254 bool isVecListThreeQWordIndexed() const { 1255 if (!isDoubleSpacedVectorIndexed()) return false; 1256 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 1257 } 1258 1259 bool isVecListThreeQHWordIndexed() const { 1260 if (!isDoubleSpacedVectorIndexed()) return false; 1261 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 1262 } 1263 1264 bool isVecListThreeDWordIndexed() const { 1265 if (!isSingleSpacedVectorIndexed()) return false; 1266 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 1267 } 1268 1269 bool isVecListFourDByteIndexed() const { 1270 if (!isSingleSpacedVectorIndexed()) return false; 1271 return VectorList.Count == 4 && VectorList.LaneIndex <= 7; 1272 } 1273 1274 bool isVecListFourDHWordIndexed() const { 1275 if (!isSingleSpacedVectorIndexed()) return false; 1276 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 1277 } 1278 1279 bool isVecListFourQWordIndexed() const { 1280 if (!isDoubleSpacedVectorIndexed()) return false; 1281 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 1282 } 1283 1284 bool isVecListFourQHWordIndexed() const { 1285 if (!isDoubleSpacedVectorIndexed()) return false; 1286 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 1287 } 1288 1289 bool isVecListFourDWordIndexed() const { 1290 if (!isSingleSpacedVectorIndexed()) return false; 1291 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 1292 } 1293 1294 bool isVectorIndex8() const { 1295 if (Kind != k_VectorIndex) return false; 1296 return VectorIndex.Val < 8; 1297 } 1298 bool isVectorIndex16() const { 1299 if (Kind != k_VectorIndex) return false; 1300 return VectorIndex.Val < 4; 1301 } 1302 bool isVectorIndex32() const { 1303 if (Kind != k_VectorIndex) return false; 1304 return VectorIndex.Val < 2; 1305 } 1306 1307 bool isNEONi8splat() const { 1308 if (!isImm()) return false; 1309 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1310 // Must be a constant. 1311 if (!CE) return false; 1312 int64_t Value = CE->getValue(); 1313 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1314 // value. 1315 return Value >= 0 && Value < 256; 1316 } 1317 1318 bool isNEONi16splat() const { 1319 if (!isImm()) return false; 1320 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1321 // Must be a constant. 1322 if (!CE) return false; 1323 int64_t Value = CE->getValue(); 1324 // i16 value in the range [0,255] or [0x0100, 0xff00] 1325 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1326 } 1327 1328 bool isNEONi32splat() const { 1329 if (!isImm()) return false; 1330 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1331 // Must be a constant. 1332 if (!CE) return false; 1333 int64_t Value = CE->getValue(); 1334 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1335 return (Value >= 0 && Value < 256) || 1336 (Value >= 0x0100 && Value <= 0xff00) || 1337 (Value >= 0x010000 && Value <= 0xff0000) || 1338 (Value >= 0x01000000 && Value <= 0xff000000); 1339 } 1340 1341 bool isNEONi32vmov() const { 1342 if (!isImm()) return false; 1343 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1344 // Must be a constant. 1345 if (!CE) return false; 1346 int64_t Value = CE->getValue(); 1347 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1348 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1349 return (Value >= 0 && Value < 256) || 1350 (Value >= 0x0100 && Value <= 0xff00) || 1351 (Value >= 0x010000 && Value <= 0xff0000) || 1352 (Value >= 0x01000000 && Value <= 0xff000000) || 1353 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1354 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1355 } 1356 bool isNEONi32vmovNeg() const { 1357 if (!isImm()) return false; 1358 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1359 // Must be a constant. 1360 if (!CE) return false; 1361 int64_t Value = ~CE->getValue(); 1362 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1363 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1364 return (Value >= 0 && Value < 256) || 1365 (Value >= 0x0100 && Value <= 0xff00) || 1366 (Value >= 0x010000 && Value <= 0xff0000) || 1367 (Value >= 0x01000000 && Value <= 0xff000000) || 1368 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1369 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1370 } 1371 1372 bool isNEONi64splat() const { 1373 if (!isImm()) return false; 1374 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1375 // Must be a constant. 1376 if (!CE) return false; 1377 uint64_t Value = CE->getValue(); 1378 // i64 value with each byte being either 0 or 0xff. 1379 for (unsigned i = 0; i < 8; ++i) 1380 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1381 return true; 1382 } 1383 1384 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1385 // Add as immediates when possible. Null MCExpr = 0. 1386 if (Expr == 0) 1387 Inst.addOperand(MCOperand::CreateImm(0)); 1388 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1389 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1390 else 1391 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1392 } 1393 1394 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1395 assert(N == 2 && "Invalid number of operands!"); 1396 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1397 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1398 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1399 } 1400 1401 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1402 assert(N == 1 && "Invalid number of operands!"); 1403 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1404 } 1405 1406 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1407 assert(N == 1 && "Invalid number of operands!"); 1408 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1409 } 1410 1411 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1412 assert(N == 1 && "Invalid number of operands!"); 1413 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1414 } 1415 1416 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1417 assert(N == 1 && "Invalid number of operands!"); 1418 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1419 } 1420 1421 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1422 assert(N == 1 && "Invalid number of operands!"); 1423 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1424 } 1425 1426 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1427 assert(N == 1 && "Invalid number of operands!"); 1428 Inst.addOperand(MCOperand::CreateReg(getReg())); 1429 } 1430 1431 void addRegOperands(MCInst &Inst, unsigned N) const { 1432 assert(N == 1 && "Invalid number of operands!"); 1433 Inst.addOperand(MCOperand::CreateReg(getReg())); 1434 } 1435 1436 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1437 assert(N == 3 && "Invalid number of operands!"); 1438 assert(isRegShiftedReg() && 1439 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1440 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1441 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1442 Inst.addOperand(MCOperand::CreateImm( 1443 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1444 } 1445 1446 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1447 assert(N == 2 && "Invalid number of operands!"); 1448 assert(isRegShiftedImm() && 1449 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1450 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1451 // Shift of #32 is encoded as 0 where permitted 1452 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm); 1453 Inst.addOperand(MCOperand::CreateImm( 1454 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm))); 1455 } 1456 1457 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1458 assert(N == 1 && "Invalid number of operands!"); 1459 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1460 ShifterImm.Imm)); 1461 } 1462 1463 void addRegListOperands(MCInst &Inst, unsigned N) const { 1464 assert(N == 1 && "Invalid number of operands!"); 1465 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1466 for (SmallVectorImpl<unsigned>::const_iterator 1467 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1468 Inst.addOperand(MCOperand::CreateReg(*I)); 1469 } 1470 1471 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1472 addRegListOperands(Inst, N); 1473 } 1474 1475 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1476 addRegListOperands(Inst, N); 1477 } 1478 1479 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1480 assert(N == 1 && "Invalid number of operands!"); 1481 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1482 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1483 } 1484 1485 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1486 assert(N == 1 && "Invalid number of operands!"); 1487 // Munge the lsb/width into a bitfield mask. 1488 unsigned lsb = Bitfield.LSB; 1489 unsigned width = Bitfield.Width; 1490 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1491 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1492 (32 - (lsb + width))); 1493 Inst.addOperand(MCOperand::CreateImm(Mask)); 1494 } 1495 1496 void addImmOperands(MCInst &Inst, unsigned N) const { 1497 assert(N == 1 && "Invalid number of operands!"); 1498 addExpr(Inst, getImm()); 1499 } 1500 1501 void addFBits16Operands(MCInst &Inst, unsigned N) const { 1502 assert(N == 1 && "Invalid number of operands!"); 1503 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1504 Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue())); 1505 } 1506 1507 void addFBits32Operands(MCInst &Inst, unsigned N) const { 1508 assert(N == 1 && "Invalid number of operands!"); 1509 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1510 Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue())); 1511 } 1512 1513 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1514 assert(N == 1 && "Invalid number of operands!"); 1515 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1516 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 1517 Inst.addOperand(MCOperand::CreateImm(Val)); 1518 } 1519 1520 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1521 assert(N == 1 && "Invalid number of operands!"); 1522 // FIXME: We really want to scale the value here, but the LDRD/STRD 1523 // instruction don't encode operands that way yet. 1524 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1525 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1526 } 1527 1528 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1529 assert(N == 1 && "Invalid number of operands!"); 1530 // The immediate is scaled by four in the encoding and is stored 1531 // in the MCInst as such. Lop off the low two bits here. 1532 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1533 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1534 } 1535 1536 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const { 1537 assert(N == 1 && "Invalid number of operands!"); 1538 // The immediate is scaled by four in the encoding and is stored 1539 // in the MCInst as such. Lop off the low two bits here. 1540 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1541 Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4))); 1542 } 1543 1544 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1545 assert(N == 1 && "Invalid number of operands!"); 1546 // The immediate is scaled by four in the encoding and is stored 1547 // in the MCInst as such. Lop off the low two bits here. 1548 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1549 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1550 } 1551 1552 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1553 assert(N == 1 && "Invalid number of operands!"); 1554 // The constant encodes as the immediate-1, and we store in the instruction 1555 // the bits as encoded, so subtract off one here. 1556 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1557 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1558 } 1559 1560 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1561 assert(N == 1 && "Invalid number of operands!"); 1562 // The constant encodes as the immediate-1, and we store in the instruction 1563 // the bits as encoded, so subtract off one here. 1564 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1565 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1566 } 1567 1568 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1569 assert(N == 1 && "Invalid number of operands!"); 1570 // The constant encodes as the immediate, except for 32, which encodes as 1571 // zero. 1572 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1573 unsigned Imm = CE->getValue(); 1574 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1575 } 1576 1577 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1578 assert(N == 1 && "Invalid number of operands!"); 1579 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1580 // the instruction as well. 1581 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1582 int Val = CE->getValue(); 1583 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1584 } 1585 1586 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1587 assert(N == 1 && "Invalid number of operands!"); 1588 // The operand is actually a t2_so_imm, but we have its bitwise 1589 // negation in the assembly source, so twiddle it here. 1590 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1591 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1592 } 1593 1594 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 1595 assert(N == 1 && "Invalid number of operands!"); 1596 // The operand is actually a t2_so_imm, but we have its 1597 // negation in the assembly source, so twiddle it here. 1598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1599 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1600 } 1601 1602 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const { 1603 assert(N == 1 && "Invalid number of operands!"); 1604 // The operand is actually an imm0_4095, but we have its 1605 // negation in the assembly source, so twiddle it here. 1606 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1607 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1608 } 1609 1610 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1611 assert(N == 1 && "Invalid number of operands!"); 1612 // The operand is actually a so_imm, but we have its bitwise 1613 // negation in the assembly source, so twiddle it here. 1614 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1615 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1616 } 1617 1618 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const { 1619 assert(N == 1 && "Invalid number of operands!"); 1620 // The operand is actually a so_imm, but we have its 1621 // negation in the assembly source, so twiddle it here. 1622 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1623 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1624 } 1625 1626 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1627 assert(N == 1 && "Invalid number of operands!"); 1628 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1629 } 1630 1631 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1632 assert(N == 1 && "Invalid number of operands!"); 1633 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1634 } 1635 1636 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const { 1637 assert(N == 1 && "Invalid number of operands!"); 1638 int32_t Imm = Memory.OffsetImm->getValue(); 1639 // FIXME: Handle #-0 1640 if (Imm == INT32_MIN) Imm = 0; 1641 Inst.addOperand(MCOperand::CreateImm(Imm)); 1642 } 1643 1644 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1645 assert(N == 2 && "Invalid number of operands!"); 1646 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1647 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1648 } 1649 1650 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1651 assert(N == 3 && "Invalid number of operands!"); 1652 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1653 if (!Memory.OffsetRegNum) { 1654 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1655 // Special case for #-0 1656 if (Val == INT32_MIN) Val = 0; 1657 if (Val < 0) Val = -Val; 1658 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1659 } else { 1660 // For register offset, we encode the shift type and negation flag 1661 // here. 1662 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1663 Memory.ShiftImm, Memory.ShiftType); 1664 } 1665 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1666 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1667 Inst.addOperand(MCOperand::CreateImm(Val)); 1668 } 1669 1670 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1671 assert(N == 2 && "Invalid number of operands!"); 1672 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1673 assert(CE && "non-constant AM2OffsetImm operand!"); 1674 int32_t Val = CE->getValue(); 1675 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1676 // Special case for #-0 1677 if (Val == INT32_MIN) Val = 0; 1678 if (Val < 0) Val = -Val; 1679 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1680 Inst.addOperand(MCOperand::CreateReg(0)); 1681 Inst.addOperand(MCOperand::CreateImm(Val)); 1682 } 1683 1684 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1685 assert(N == 3 && "Invalid number of operands!"); 1686 // If we have an immediate that's not a constant, treat it as a label 1687 // reference needing a fixup. If it is a constant, it's something else 1688 // and we reject it. 1689 if (isImm()) { 1690 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1691 Inst.addOperand(MCOperand::CreateReg(0)); 1692 Inst.addOperand(MCOperand::CreateImm(0)); 1693 return; 1694 } 1695 1696 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1697 if (!Memory.OffsetRegNum) { 1698 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1699 // Special case for #-0 1700 if (Val == INT32_MIN) Val = 0; 1701 if (Val < 0) Val = -Val; 1702 Val = ARM_AM::getAM3Opc(AddSub, Val); 1703 } else { 1704 // For register offset, we encode the shift type and negation flag 1705 // here. 1706 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1707 } 1708 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1709 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1710 Inst.addOperand(MCOperand::CreateImm(Val)); 1711 } 1712 1713 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1714 assert(N == 2 && "Invalid number of operands!"); 1715 if (Kind == k_PostIndexRegister) { 1716 int32_t Val = 1717 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1718 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1719 Inst.addOperand(MCOperand::CreateImm(Val)); 1720 return; 1721 } 1722 1723 // Constant offset. 1724 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1725 int32_t Val = CE->getValue(); 1726 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1727 // Special case for #-0 1728 if (Val == INT32_MIN) Val = 0; 1729 if (Val < 0) Val = -Val; 1730 Val = ARM_AM::getAM3Opc(AddSub, Val); 1731 Inst.addOperand(MCOperand::CreateReg(0)); 1732 Inst.addOperand(MCOperand::CreateImm(Val)); 1733 } 1734 1735 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1736 assert(N == 2 && "Invalid number of operands!"); 1737 // If we have an immediate that's not a constant, treat it as a label 1738 // reference needing a fixup. If it is a constant, it's something else 1739 // and we reject it. 1740 if (isImm()) { 1741 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1742 Inst.addOperand(MCOperand::CreateImm(0)); 1743 return; 1744 } 1745 1746 // The lower two bits are always zero and as such are not encoded. 1747 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1748 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1749 // Special case for #-0 1750 if (Val == INT32_MIN) Val = 0; 1751 if (Val < 0) Val = -Val; 1752 Val = ARM_AM::getAM5Opc(AddSub, Val); 1753 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1754 Inst.addOperand(MCOperand::CreateImm(Val)); 1755 } 1756 1757 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1758 assert(N == 2 && "Invalid number of operands!"); 1759 // If we have an immediate that's not a constant, treat it as a label 1760 // reference needing a fixup. If it is a constant, it's something else 1761 // and we reject it. 1762 if (isImm()) { 1763 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1764 Inst.addOperand(MCOperand::CreateImm(0)); 1765 return; 1766 } 1767 1768 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1769 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1770 Inst.addOperand(MCOperand::CreateImm(Val)); 1771 } 1772 1773 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1774 assert(N == 2 && "Invalid number of operands!"); 1775 // The lower two bits are always zero and as such are not encoded. 1776 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1777 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1778 Inst.addOperand(MCOperand::CreateImm(Val)); 1779 } 1780 1781 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1782 assert(N == 2 && "Invalid number of operands!"); 1783 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1784 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1785 Inst.addOperand(MCOperand::CreateImm(Val)); 1786 } 1787 1788 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1789 addMemImm8OffsetOperands(Inst, N); 1790 } 1791 1792 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1793 addMemImm8OffsetOperands(Inst, N); 1794 } 1795 1796 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1797 assert(N == 2 && "Invalid number of operands!"); 1798 // If this is an immediate, it's a label reference. 1799 if (isImm()) { 1800 addExpr(Inst, getImm()); 1801 Inst.addOperand(MCOperand::CreateImm(0)); 1802 return; 1803 } 1804 1805 // Otherwise, it's a normal memory reg+offset. 1806 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1807 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1808 Inst.addOperand(MCOperand::CreateImm(Val)); 1809 } 1810 1811 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1812 assert(N == 2 && "Invalid number of operands!"); 1813 // If this is an immediate, it's a label reference. 1814 if (isImm()) { 1815 addExpr(Inst, getImm()); 1816 Inst.addOperand(MCOperand::CreateImm(0)); 1817 return; 1818 } 1819 1820 // Otherwise, it's a normal memory reg+offset. 1821 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1822 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1823 Inst.addOperand(MCOperand::CreateImm(Val)); 1824 } 1825 1826 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1827 assert(N == 2 && "Invalid number of operands!"); 1828 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1829 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1830 } 1831 1832 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1833 assert(N == 2 && "Invalid number of operands!"); 1834 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1835 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1836 } 1837 1838 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1839 assert(N == 3 && "Invalid number of operands!"); 1840 unsigned Val = 1841 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1842 Memory.ShiftImm, Memory.ShiftType); 1843 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1844 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1845 Inst.addOperand(MCOperand::CreateImm(Val)); 1846 } 1847 1848 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1849 assert(N == 3 && "Invalid number of operands!"); 1850 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1851 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1852 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1853 } 1854 1855 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1856 assert(N == 2 && "Invalid number of operands!"); 1857 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1858 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1859 } 1860 1861 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1862 assert(N == 2 && "Invalid number of operands!"); 1863 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1864 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1865 Inst.addOperand(MCOperand::CreateImm(Val)); 1866 } 1867 1868 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1869 assert(N == 2 && "Invalid number of operands!"); 1870 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1871 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1872 Inst.addOperand(MCOperand::CreateImm(Val)); 1873 } 1874 1875 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1876 assert(N == 2 && "Invalid number of operands!"); 1877 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1878 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1879 Inst.addOperand(MCOperand::CreateImm(Val)); 1880 } 1881 1882 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1883 assert(N == 2 && "Invalid number of operands!"); 1884 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1885 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1886 Inst.addOperand(MCOperand::CreateImm(Val)); 1887 } 1888 1889 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1890 assert(N == 1 && "Invalid number of operands!"); 1891 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1892 assert(CE && "non-constant post-idx-imm8 operand!"); 1893 int Imm = CE->getValue(); 1894 bool isAdd = Imm >= 0; 1895 if (Imm == INT32_MIN) Imm = 0; 1896 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1897 Inst.addOperand(MCOperand::CreateImm(Imm)); 1898 } 1899 1900 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1901 assert(N == 1 && "Invalid number of operands!"); 1902 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1903 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1904 int Imm = CE->getValue(); 1905 bool isAdd = Imm >= 0; 1906 if (Imm == INT32_MIN) Imm = 0; 1907 // Immediate is scaled by 4. 1908 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1909 Inst.addOperand(MCOperand::CreateImm(Imm)); 1910 } 1911 1912 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1913 assert(N == 2 && "Invalid number of operands!"); 1914 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1915 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1916 } 1917 1918 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1919 assert(N == 2 && "Invalid number of operands!"); 1920 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1921 // The sign, shift type, and shift amount are encoded in a single operand 1922 // using the AM2 encoding helpers. 1923 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1924 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1925 PostIdxReg.ShiftTy); 1926 Inst.addOperand(MCOperand::CreateImm(Imm)); 1927 } 1928 1929 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1930 assert(N == 1 && "Invalid number of operands!"); 1931 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1932 } 1933 1934 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1935 assert(N == 1 && "Invalid number of operands!"); 1936 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1937 } 1938 1939 void addVecListOperands(MCInst &Inst, unsigned N) const { 1940 assert(N == 1 && "Invalid number of operands!"); 1941 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1942 } 1943 1944 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 1945 assert(N == 2 && "Invalid number of operands!"); 1946 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1947 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 1948 } 1949 1950 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1951 assert(N == 1 && "Invalid number of operands!"); 1952 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1953 } 1954 1955 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1956 assert(N == 1 && "Invalid number of operands!"); 1957 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1958 } 1959 1960 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1961 assert(N == 1 && "Invalid number of operands!"); 1962 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1963 } 1964 1965 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1966 assert(N == 1 && "Invalid number of operands!"); 1967 // The immediate encodes the type of constant as well as the value. 1968 // Mask in that this is an i8 splat. 1969 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1970 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1971 } 1972 1973 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1974 assert(N == 1 && "Invalid number of operands!"); 1975 // The immediate encodes the type of constant as well as the value. 1976 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1977 unsigned Value = CE->getValue(); 1978 if (Value >= 256) 1979 Value = (Value >> 8) | 0xa00; 1980 else 1981 Value |= 0x800; 1982 Inst.addOperand(MCOperand::CreateImm(Value)); 1983 } 1984 1985 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1986 assert(N == 1 && "Invalid number of operands!"); 1987 // The immediate encodes the type of constant as well as the value. 1988 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1989 unsigned Value = CE->getValue(); 1990 if (Value >= 256 && Value <= 0xff00) 1991 Value = (Value >> 8) | 0x200; 1992 else if (Value > 0xffff && Value <= 0xff0000) 1993 Value = (Value >> 16) | 0x400; 1994 else if (Value > 0xffffff) 1995 Value = (Value >> 24) | 0x600; 1996 Inst.addOperand(MCOperand::CreateImm(Value)); 1997 } 1998 1999 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 2000 assert(N == 1 && "Invalid number of operands!"); 2001 // The immediate encodes the type of constant as well as the value. 2002 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2003 unsigned Value = CE->getValue(); 2004 if (Value >= 256 && Value <= 0xffff) 2005 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 2006 else if (Value > 0xffff && Value <= 0xffffff) 2007 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 2008 else if (Value > 0xffffff) 2009 Value = (Value >> 24) | 0x600; 2010 Inst.addOperand(MCOperand::CreateImm(Value)); 2011 } 2012 2013 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const { 2014 assert(N == 1 && "Invalid number of operands!"); 2015 // The immediate encodes the type of constant as well as the value. 2016 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2017 unsigned Value = ~CE->getValue(); 2018 if (Value >= 256 && Value <= 0xffff) 2019 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 2020 else if (Value > 0xffff && Value <= 0xffffff) 2021 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 2022 else if (Value > 0xffffff) 2023 Value = (Value >> 24) | 0x600; 2024 Inst.addOperand(MCOperand::CreateImm(Value)); 2025 } 2026 2027 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 2028 assert(N == 1 && "Invalid number of operands!"); 2029 // The immediate encodes the type of constant as well as the value. 2030 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2031 uint64_t Value = CE->getValue(); 2032 unsigned Imm = 0; 2033 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 2034 Imm |= (Value & 1) << i; 2035 } 2036 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 2037 } 2038 2039 virtual void print(raw_ostream &OS) const; 2040 2041 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 2042 ARMOperand *Op = new ARMOperand(k_ITCondMask); 2043 Op->ITMask.Mask = Mask; 2044 Op->StartLoc = S; 2045 Op->EndLoc = S; 2046 return Op; 2047 } 2048 2049 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 2050 ARMOperand *Op = new ARMOperand(k_CondCode); 2051 Op->CC.Val = CC; 2052 Op->StartLoc = S; 2053 Op->EndLoc = S; 2054 return Op; 2055 } 2056 2057 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 2058 ARMOperand *Op = new ARMOperand(k_CoprocNum); 2059 Op->Cop.Val = CopVal; 2060 Op->StartLoc = S; 2061 Op->EndLoc = S; 2062 return Op; 2063 } 2064 2065 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 2066 ARMOperand *Op = new ARMOperand(k_CoprocReg); 2067 Op->Cop.Val = CopVal; 2068 Op->StartLoc = S; 2069 Op->EndLoc = S; 2070 return Op; 2071 } 2072 2073 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 2074 ARMOperand *Op = new ARMOperand(k_CoprocOption); 2075 Op->Cop.Val = Val; 2076 Op->StartLoc = S; 2077 Op->EndLoc = E; 2078 return Op; 2079 } 2080 2081 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 2082 ARMOperand *Op = new ARMOperand(k_CCOut); 2083 Op->Reg.RegNum = RegNum; 2084 Op->StartLoc = S; 2085 Op->EndLoc = S; 2086 return Op; 2087 } 2088 2089 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 2090 ARMOperand *Op = new ARMOperand(k_Token); 2091 Op->Tok.Data = Str.data(); 2092 Op->Tok.Length = Str.size(); 2093 Op->StartLoc = S; 2094 Op->EndLoc = S; 2095 return Op; 2096 } 2097 2098 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 2099 ARMOperand *Op = new ARMOperand(k_Register); 2100 Op->Reg.RegNum = RegNum; 2101 Op->StartLoc = S; 2102 Op->EndLoc = E; 2103 return Op; 2104 } 2105 2106 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 2107 unsigned SrcReg, 2108 unsigned ShiftReg, 2109 unsigned ShiftImm, 2110 SMLoc S, SMLoc E) { 2111 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 2112 Op->RegShiftedReg.ShiftTy = ShTy; 2113 Op->RegShiftedReg.SrcReg = SrcReg; 2114 Op->RegShiftedReg.ShiftReg = ShiftReg; 2115 Op->RegShiftedReg.ShiftImm = ShiftImm; 2116 Op->StartLoc = S; 2117 Op->EndLoc = E; 2118 return Op; 2119 } 2120 2121 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 2122 unsigned SrcReg, 2123 unsigned ShiftImm, 2124 SMLoc S, SMLoc E) { 2125 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 2126 Op->RegShiftedImm.ShiftTy = ShTy; 2127 Op->RegShiftedImm.SrcReg = SrcReg; 2128 Op->RegShiftedImm.ShiftImm = ShiftImm; 2129 Op->StartLoc = S; 2130 Op->EndLoc = E; 2131 return Op; 2132 } 2133 2134 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 2135 SMLoc S, SMLoc E) { 2136 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 2137 Op->ShifterImm.isASR = isASR; 2138 Op->ShifterImm.Imm = Imm; 2139 Op->StartLoc = S; 2140 Op->EndLoc = E; 2141 return Op; 2142 } 2143 2144 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 2145 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 2146 Op->RotImm.Imm = Imm; 2147 Op->StartLoc = S; 2148 Op->EndLoc = E; 2149 return Op; 2150 } 2151 2152 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 2153 SMLoc S, SMLoc E) { 2154 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 2155 Op->Bitfield.LSB = LSB; 2156 Op->Bitfield.Width = Width; 2157 Op->StartLoc = S; 2158 Op->EndLoc = E; 2159 return Op; 2160 } 2161 2162 static ARMOperand * 2163 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 2164 SMLoc StartLoc, SMLoc EndLoc) { 2165 KindTy Kind = k_RegisterList; 2166 2167 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 2168 Kind = k_DPRRegisterList; 2169 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 2170 contains(Regs.front().first)) 2171 Kind = k_SPRRegisterList; 2172 2173 ARMOperand *Op = new ARMOperand(Kind); 2174 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 2175 I = Regs.begin(), E = Regs.end(); I != E; ++I) 2176 Op->Registers.push_back(I->first); 2177 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 2178 Op->StartLoc = StartLoc; 2179 Op->EndLoc = EndLoc; 2180 return Op; 2181 } 2182 2183 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 2184 bool isDoubleSpaced, SMLoc S, SMLoc E) { 2185 ARMOperand *Op = new ARMOperand(k_VectorList); 2186 Op->VectorList.RegNum = RegNum; 2187 Op->VectorList.Count = Count; 2188 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2189 Op->StartLoc = S; 2190 Op->EndLoc = E; 2191 return Op; 2192 } 2193 2194 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, 2195 bool isDoubleSpaced, 2196 SMLoc S, SMLoc E) { 2197 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); 2198 Op->VectorList.RegNum = RegNum; 2199 Op->VectorList.Count = Count; 2200 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2201 Op->StartLoc = S; 2202 Op->EndLoc = E; 2203 return Op; 2204 } 2205 2206 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, 2207 unsigned Index, 2208 bool isDoubleSpaced, 2209 SMLoc S, SMLoc E) { 2210 ARMOperand *Op = new ARMOperand(k_VectorListIndexed); 2211 Op->VectorList.RegNum = RegNum; 2212 Op->VectorList.Count = Count; 2213 Op->VectorList.LaneIndex = Index; 2214 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2215 Op->StartLoc = S; 2216 Op->EndLoc = E; 2217 return Op; 2218 } 2219 2220 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 2221 MCContext &Ctx) { 2222 ARMOperand *Op = new ARMOperand(k_VectorIndex); 2223 Op->VectorIndex.Val = Idx; 2224 Op->StartLoc = S; 2225 Op->EndLoc = E; 2226 return Op; 2227 } 2228 2229 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 2230 ARMOperand *Op = new ARMOperand(k_Immediate); 2231 Op->Imm.Val = Val; 2232 Op->StartLoc = S; 2233 Op->EndLoc = E; 2234 return Op; 2235 } 2236 2237 static ARMOperand *CreateMem(unsigned BaseRegNum, 2238 const MCConstantExpr *OffsetImm, 2239 unsigned OffsetRegNum, 2240 ARM_AM::ShiftOpc ShiftType, 2241 unsigned ShiftImm, 2242 unsigned Alignment, 2243 bool isNegative, 2244 SMLoc S, SMLoc E) { 2245 ARMOperand *Op = new ARMOperand(k_Memory); 2246 Op->Memory.BaseRegNum = BaseRegNum; 2247 Op->Memory.OffsetImm = OffsetImm; 2248 Op->Memory.OffsetRegNum = OffsetRegNum; 2249 Op->Memory.ShiftType = ShiftType; 2250 Op->Memory.ShiftImm = ShiftImm; 2251 Op->Memory.Alignment = Alignment; 2252 Op->Memory.isNegative = isNegative; 2253 Op->StartLoc = S; 2254 Op->EndLoc = E; 2255 return Op; 2256 } 2257 2258 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 2259 ARM_AM::ShiftOpc ShiftTy, 2260 unsigned ShiftImm, 2261 SMLoc S, SMLoc E) { 2262 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 2263 Op->PostIdxReg.RegNum = RegNum; 2264 Op->PostIdxReg.isAdd = isAdd; 2265 Op->PostIdxReg.ShiftTy = ShiftTy; 2266 Op->PostIdxReg.ShiftImm = ShiftImm; 2267 Op->StartLoc = S; 2268 Op->EndLoc = E; 2269 return Op; 2270 } 2271 2272 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 2273 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 2274 Op->MBOpt.Val = Opt; 2275 Op->StartLoc = S; 2276 Op->EndLoc = S; 2277 return Op; 2278 } 2279 2280 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 2281 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 2282 Op->IFlags.Val = IFlags; 2283 Op->StartLoc = S; 2284 Op->EndLoc = S; 2285 return Op; 2286 } 2287 2288 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 2289 ARMOperand *Op = new ARMOperand(k_MSRMask); 2290 Op->MMask.Val = MMask; 2291 Op->StartLoc = S; 2292 Op->EndLoc = S; 2293 return Op; 2294 } 2295}; 2296 2297} // end anonymous namespace. 2298 2299void ARMOperand::print(raw_ostream &OS) const { 2300 switch (Kind) { 2301 case k_CondCode: 2302 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 2303 break; 2304 case k_CCOut: 2305 OS << "<ccout " << getReg() << ">"; 2306 break; 2307 case k_ITCondMask: { 2308 static const char *MaskStr[] = { 2309 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 2310 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 2311 }; 2312 assert((ITMask.Mask & 0xf) == ITMask.Mask); 2313 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 2314 break; 2315 } 2316 case k_CoprocNum: 2317 OS << "<coprocessor number: " << getCoproc() << ">"; 2318 break; 2319 case k_CoprocReg: 2320 OS << "<coprocessor register: " << getCoproc() << ">"; 2321 break; 2322 case k_CoprocOption: 2323 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 2324 break; 2325 case k_MSRMask: 2326 OS << "<mask: " << getMSRMask() << ">"; 2327 break; 2328 case k_Immediate: 2329 getImm()->print(OS); 2330 break; 2331 case k_MemBarrierOpt: 2332 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 2333 break; 2334 case k_Memory: 2335 OS << "<memory " 2336 << " base:" << Memory.BaseRegNum; 2337 OS << ">"; 2338 break; 2339 case k_PostIndexRegister: 2340 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 2341 << PostIdxReg.RegNum; 2342 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 2343 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 2344 << PostIdxReg.ShiftImm; 2345 OS << ">"; 2346 break; 2347 case k_ProcIFlags: { 2348 OS << "<ARM_PROC::"; 2349 unsigned IFlags = getProcIFlags(); 2350 for (int i=2; i >= 0; --i) 2351 if (IFlags & (1 << i)) 2352 OS << ARM_PROC::IFlagsToString(1 << i); 2353 OS << ">"; 2354 break; 2355 } 2356 case k_Register: 2357 OS << "<register " << getReg() << ">"; 2358 break; 2359 case k_ShifterImmediate: 2360 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2361 << " #" << ShifterImm.Imm << ">"; 2362 break; 2363 case k_ShiftedRegister: 2364 OS << "<so_reg_reg " 2365 << RegShiftedReg.SrcReg << " " 2366 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 2367 << " " << RegShiftedReg.ShiftReg << ">"; 2368 break; 2369 case k_ShiftedImmediate: 2370 OS << "<so_reg_imm " 2371 << RegShiftedImm.SrcReg << " " 2372 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 2373 << " #" << RegShiftedImm.ShiftImm << ">"; 2374 break; 2375 case k_RotateImmediate: 2376 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2377 break; 2378 case k_BitfieldDescriptor: 2379 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2380 << ", width: " << Bitfield.Width << ">"; 2381 break; 2382 case k_RegisterList: 2383 case k_DPRRegisterList: 2384 case k_SPRRegisterList: { 2385 OS << "<register_list "; 2386 2387 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2388 for (SmallVectorImpl<unsigned>::const_iterator 2389 I = RegList.begin(), E = RegList.end(); I != E; ) { 2390 OS << *I; 2391 if (++I < E) OS << ", "; 2392 } 2393 2394 OS << ">"; 2395 break; 2396 } 2397 case k_VectorList: 2398 OS << "<vector_list " << VectorList.Count << " * " 2399 << VectorList.RegNum << ">"; 2400 break; 2401 case k_VectorListAllLanes: 2402 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 2403 << VectorList.RegNum << ">"; 2404 break; 2405 case k_VectorListIndexed: 2406 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 2407 << VectorList.Count << " * " << VectorList.RegNum << ">"; 2408 break; 2409 case k_Token: 2410 OS << "'" << getToken() << "'"; 2411 break; 2412 case k_VectorIndex: 2413 OS << "<vectorindex " << getVectorIndex() << ">"; 2414 break; 2415 } 2416} 2417 2418/// @name Auto-generated Match Functions 2419/// { 2420 2421static unsigned MatchRegisterName(StringRef Name); 2422 2423/// } 2424 2425bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2426 SMLoc &StartLoc, SMLoc &EndLoc) { 2427 StartLoc = Parser.getTok().getLoc(); 2428 RegNo = tryParseRegister(); 2429 EndLoc = Parser.getTok().getLoc(); 2430 2431 return (RegNo == (unsigned)-1); 2432} 2433 2434/// Try to parse a register name. The token must be an Identifier when called, 2435/// and if it is a register name the token is eaten and the register number is 2436/// returned. Otherwise return -1. 2437/// 2438int ARMAsmParser::tryParseRegister() { 2439 const AsmToken &Tok = Parser.getTok(); 2440 if (Tok.isNot(AsmToken::Identifier)) return -1; 2441 2442 std::string lowerCase = Tok.getString().lower(); 2443 unsigned RegNum = MatchRegisterName(lowerCase); 2444 if (!RegNum) { 2445 RegNum = StringSwitch<unsigned>(lowerCase) 2446 .Case("r13", ARM::SP) 2447 .Case("r14", ARM::LR) 2448 .Case("r15", ARM::PC) 2449 .Case("ip", ARM::R12) 2450 // Additional register name aliases for 'gas' compatibility. 2451 .Case("a1", ARM::R0) 2452 .Case("a2", ARM::R1) 2453 .Case("a3", ARM::R2) 2454 .Case("a4", ARM::R3) 2455 .Case("v1", ARM::R4) 2456 .Case("v2", ARM::R5) 2457 .Case("v3", ARM::R6) 2458 .Case("v4", ARM::R7) 2459 .Case("v5", ARM::R8) 2460 .Case("v6", ARM::R9) 2461 .Case("v7", ARM::R10) 2462 .Case("v8", ARM::R11) 2463 .Case("sb", ARM::R9) 2464 .Case("sl", ARM::R10) 2465 .Case("fp", ARM::R11) 2466 .Default(0); 2467 } 2468 if (!RegNum) { 2469 // Check for aliases registered via .req. Canonicalize to lower case. 2470 // That's more consistent since register names are case insensitive, and 2471 // it's how the original entry was passed in from MC/MCParser/AsmParser. 2472 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase); 2473 // If no match, return failure. 2474 if (Entry == RegisterReqs.end()) 2475 return -1; 2476 Parser.Lex(); // Eat identifier token. 2477 return Entry->getValue(); 2478 } 2479 2480 Parser.Lex(); // Eat identifier token. 2481 2482 return RegNum; 2483} 2484 2485// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2486// If a recoverable error occurs, return 1. If an irrecoverable error 2487// occurs, return -1. An irrecoverable error is one where tokens have been 2488// consumed in the process of trying to parse the shifter (i.e., when it is 2489// indeed a shifter operand, but malformed). 2490int ARMAsmParser::tryParseShiftRegister( 2491 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2492 SMLoc S = Parser.getTok().getLoc(); 2493 const AsmToken &Tok = Parser.getTok(); 2494 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2495 2496 std::string lowerCase = Tok.getString().lower(); 2497 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2498 .Case("asl", ARM_AM::lsl) 2499 .Case("lsl", ARM_AM::lsl) 2500 .Case("lsr", ARM_AM::lsr) 2501 .Case("asr", ARM_AM::asr) 2502 .Case("ror", ARM_AM::ror) 2503 .Case("rrx", ARM_AM::rrx) 2504 .Default(ARM_AM::no_shift); 2505 2506 if (ShiftTy == ARM_AM::no_shift) 2507 return 1; 2508 2509 Parser.Lex(); // Eat the operator. 2510 2511 // The source register for the shift has already been added to the 2512 // operand list, so we need to pop it off and combine it into the shifted 2513 // register operand instead. 2514 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2515 if (!PrevOp->isReg()) 2516 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2517 int SrcReg = PrevOp->getReg(); 2518 int64_t Imm = 0; 2519 int ShiftReg = 0; 2520 if (ShiftTy == ARM_AM::rrx) { 2521 // RRX Doesn't have an explicit shift amount. The encoder expects 2522 // the shift register to be the same as the source register. Seems odd, 2523 // but OK. 2524 ShiftReg = SrcReg; 2525 } else { 2526 // Figure out if this is shifted by a constant or a register (for non-RRX). 2527 if (Parser.getTok().is(AsmToken::Hash) || 2528 Parser.getTok().is(AsmToken::Dollar)) { 2529 Parser.Lex(); // Eat hash. 2530 SMLoc ImmLoc = Parser.getTok().getLoc(); 2531 const MCExpr *ShiftExpr = 0; 2532 if (getParser().ParseExpression(ShiftExpr)) { 2533 Error(ImmLoc, "invalid immediate shift value"); 2534 return -1; 2535 } 2536 // The expression must be evaluatable as an immediate. 2537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2538 if (!CE) { 2539 Error(ImmLoc, "invalid immediate shift value"); 2540 return -1; 2541 } 2542 // Range check the immediate. 2543 // lsl, ror: 0 <= imm <= 31 2544 // lsr, asr: 0 <= imm <= 32 2545 Imm = CE->getValue(); 2546 if (Imm < 0 || 2547 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2548 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2549 Error(ImmLoc, "immediate shift value out of range"); 2550 return -1; 2551 } 2552 // shift by zero is a nop. Always send it through as lsl. 2553 // ('as' compatibility) 2554 if (Imm == 0) 2555 ShiftTy = ARM_AM::lsl; 2556 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2557 ShiftReg = tryParseRegister(); 2558 SMLoc L = Parser.getTok().getLoc(); 2559 if (ShiftReg == -1) { 2560 Error (L, "expected immediate or register in shift operand"); 2561 return -1; 2562 } 2563 } else { 2564 Error (Parser.getTok().getLoc(), 2565 "expected immediate or register in shift operand"); 2566 return -1; 2567 } 2568 } 2569 2570 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2571 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2572 ShiftReg, Imm, 2573 S, Parser.getTok().getLoc())); 2574 else 2575 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2576 S, Parser.getTok().getLoc())); 2577 2578 return 0; 2579} 2580 2581 2582/// Try to parse a register name. The token must be an Identifier when called. 2583/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2584/// if there is a "writeback". 'true' if it's not a register. 2585/// 2586/// TODO this is likely to change to allow different register types and or to 2587/// parse for a specific register type. 2588bool ARMAsmParser:: 2589tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2590 SMLoc S = Parser.getTok().getLoc(); 2591 int RegNo = tryParseRegister(); 2592 if (RegNo == -1) 2593 return true; 2594 2595 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2596 2597 const AsmToken &ExclaimTok = Parser.getTok(); 2598 if (ExclaimTok.is(AsmToken::Exclaim)) { 2599 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2600 ExclaimTok.getLoc())); 2601 Parser.Lex(); // Eat exclaim token 2602 return false; 2603 } 2604 2605 // Also check for an index operand. This is only legal for vector registers, 2606 // but that'll get caught OK in operand matching, so we don't need to 2607 // explicitly filter everything else out here. 2608 if (Parser.getTok().is(AsmToken::LBrac)) { 2609 SMLoc SIdx = Parser.getTok().getLoc(); 2610 Parser.Lex(); // Eat left bracket token. 2611 2612 const MCExpr *ImmVal; 2613 if (getParser().ParseExpression(ImmVal)) 2614 return true; 2615 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2616 if (!MCE) 2617 return TokError("immediate value expected for vector index"); 2618 2619 SMLoc E = Parser.getTok().getLoc(); 2620 if (Parser.getTok().isNot(AsmToken::RBrac)) 2621 return Error(E, "']' expected"); 2622 2623 Parser.Lex(); // Eat right bracket token. 2624 2625 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2626 SIdx, E, 2627 getContext())); 2628 } 2629 2630 return false; 2631} 2632 2633/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2634/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2635/// "c5", ... 2636static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2637 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2638 // but efficient. 2639 switch (Name.size()) { 2640 default: return -1; 2641 case 2: 2642 if (Name[0] != CoprocOp) 2643 return -1; 2644 switch (Name[1]) { 2645 default: return -1; 2646 case '0': return 0; 2647 case '1': return 1; 2648 case '2': return 2; 2649 case '3': return 3; 2650 case '4': return 4; 2651 case '5': return 5; 2652 case '6': return 6; 2653 case '7': return 7; 2654 case '8': return 8; 2655 case '9': return 9; 2656 } 2657 case 3: 2658 if (Name[0] != CoprocOp || Name[1] != '1') 2659 return -1; 2660 switch (Name[2]) { 2661 default: return -1; 2662 case '0': return 10; 2663 case '1': return 11; 2664 case '2': return 12; 2665 case '3': return 13; 2666 case '4': return 14; 2667 case '5': return 15; 2668 } 2669 } 2670} 2671 2672/// parseITCondCode - Try to parse a condition code for an IT instruction. 2673ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2674parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2675 SMLoc S = Parser.getTok().getLoc(); 2676 const AsmToken &Tok = Parser.getTok(); 2677 if (!Tok.is(AsmToken::Identifier)) 2678 return MatchOperand_NoMatch; 2679 unsigned CC = StringSwitch<unsigned>(Tok.getString().lower()) 2680 .Case("eq", ARMCC::EQ) 2681 .Case("ne", ARMCC::NE) 2682 .Case("hs", ARMCC::HS) 2683 .Case("cs", ARMCC::HS) 2684 .Case("lo", ARMCC::LO) 2685 .Case("cc", ARMCC::LO) 2686 .Case("mi", ARMCC::MI) 2687 .Case("pl", ARMCC::PL) 2688 .Case("vs", ARMCC::VS) 2689 .Case("vc", ARMCC::VC) 2690 .Case("hi", ARMCC::HI) 2691 .Case("ls", ARMCC::LS) 2692 .Case("ge", ARMCC::GE) 2693 .Case("lt", ARMCC::LT) 2694 .Case("gt", ARMCC::GT) 2695 .Case("le", ARMCC::LE) 2696 .Case("al", ARMCC::AL) 2697 .Default(~0U); 2698 if (CC == ~0U) 2699 return MatchOperand_NoMatch; 2700 Parser.Lex(); // Eat the token. 2701 2702 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2703 2704 return MatchOperand_Success; 2705} 2706 2707/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2708/// token must be an Identifier when called, and if it is a coprocessor 2709/// number, the token is eaten and the operand is added to the operand list. 2710ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2711parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2712 SMLoc S = Parser.getTok().getLoc(); 2713 const AsmToken &Tok = Parser.getTok(); 2714 if (Tok.isNot(AsmToken::Identifier)) 2715 return MatchOperand_NoMatch; 2716 2717 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2718 if (Num == -1) 2719 return MatchOperand_NoMatch; 2720 2721 Parser.Lex(); // Eat identifier token. 2722 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2723 return MatchOperand_Success; 2724} 2725 2726/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2727/// token must be an Identifier when called, and if it is a coprocessor 2728/// number, the token is eaten and the operand is added to the operand list. 2729ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2730parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2731 SMLoc S = Parser.getTok().getLoc(); 2732 const AsmToken &Tok = Parser.getTok(); 2733 if (Tok.isNot(AsmToken::Identifier)) 2734 return MatchOperand_NoMatch; 2735 2736 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2737 if (Reg == -1) 2738 return MatchOperand_NoMatch; 2739 2740 Parser.Lex(); // Eat identifier token. 2741 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2742 return MatchOperand_Success; 2743} 2744 2745/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2746/// coproc_option : '{' imm0_255 '}' 2747ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2748parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2749 SMLoc S = Parser.getTok().getLoc(); 2750 2751 // If this isn't a '{', this isn't a coprocessor immediate operand. 2752 if (Parser.getTok().isNot(AsmToken::LCurly)) 2753 return MatchOperand_NoMatch; 2754 Parser.Lex(); // Eat the '{' 2755 2756 const MCExpr *Expr; 2757 SMLoc Loc = Parser.getTok().getLoc(); 2758 if (getParser().ParseExpression(Expr)) { 2759 Error(Loc, "illegal expression"); 2760 return MatchOperand_ParseFail; 2761 } 2762 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2763 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2764 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2765 return MatchOperand_ParseFail; 2766 } 2767 int Val = CE->getValue(); 2768 2769 // Check for and consume the closing '}' 2770 if (Parser.getTok().isNot(AsmToken::RCurly)) 2771 return MatchOperand_ParseFail; 2772 SMLoc E = Parser.getTok().getLoc(); 2773 Parser.Lex(); // Eat the '}' 2774 2775 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2776 return MatchOperand_Success; 2777} 2778 2779// For register list parsing, we need to map from raw GPR register numbering 2780// to the enumeration values. The enumeration values aren't sorted by 2781// register number due to our using "sp", "lr" and "pc" as canonical names. 2782static unsigned getNextRegister(unsigned Reg) { 2783 // If this is a GPR, we need to do it manually, otherwise we can rely 2784 // on the sort ordering of the enumeration since the other reg-classes 2785 // are sane. 2786 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2787 return Reg + 1; 2788 switch(Reg) { 2789 default: llvm_unreachable("Invalid GPR number!"); 2790 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2791 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2792 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2793 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2794 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2795 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2796 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2797 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2798 } 2799} 2800 2801// Return the low-subreg of a given Q register. 2802static unsigned getDRegFromQReg(unsigned QReg) { 2803 switch (QReg) { 2804 default: llvm_unreachable("expected a Q register!"); 2805 case ARM::Q0: return ARM::D0; 2806 case ARM::Q1: return ARM::D2; 2807 case ARM::Q2: return ARM::D4; 2808 case ARM::Q3: return ARM::D6; 2809 case ARM::Q4: return ARM::D8; 2810 case ARM::Q5: return ARM::D10; 2811 case ARM::Q6: return ARM::D12; 2812 case ARM::Q7: return ARM::D14; 2813 case ARM::Q8: return ARM::D16; 2814 case ARM::Q9: return ARM::D18; 2815 case ARM::Q10: return ARM::D20; 2816 case ARM::Q11: return ARM::D22; 2817 case ARM::Q12: return ARM::D24; 2818 case ARM::Q13: return ARM::D26; 2819 case ARM::Q14: return ARM::D28; 2820 case ARM::Q15: return ARM::D30; 2821 } 2822} 2823 2824/// Parse a register list. 2825bool ARMAsmParser:: 2826parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2827 assert(Parser.getTok().is(AsmToken::LCurly) && 2828 "Token is not a Left Curly Brace"); 2829 SMLoc S = Parser.getTok().getLoc(); 2830 Parser.Lex(); // Eat '{' token. 2831 SMLoc RegLoc = Parser.getTok().getLoc(); 2832 2833 // Check the first register in the list to see what register class 2834 // this is a list of. 2835 int Reg = tryParseRegister(); 2836 if (Reg == -1) 2837 return Error(RegLoc, "register expected"); 2838 2839 // The reglist instructions have at most 16 registers, so reserve 2840 // space for that many. 2841 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2842 2843 // Allow Q regs and just interpret them as the two D sub-registers. 2844 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2845 Reg = getDRegFromQReg(Reg); 2846 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2847 ++Reg; 2848 } 2849 const MCRegisterClass *RC; 2850 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2851 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2852 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2853 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2854 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2855 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2856 else 2857 return Error(RegLoc, "invalid register in register list"); 2858 2859 // Store the register. 2860 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2861 2862 // This starts immediately after the first register token in the list, 2863 // so we can see either a comma or a minus (range separator) as a legal 2864 // next token. 2865 while (Parser.getTok().is(AsmToken::Comma) || 2866 Parser.getTok().is(AsmToken::Minus)) { 2867 if (Parser.getTok().is(AsmToken::Minus)) { 2868 Parser.Lex(); // Eat the minus. 2869 SMLoc EndLoc = Parser.getTok().getLoc(); 2870 int EndReg = tryParseRegister(); 2871 if (EndReg == -1) 2872 return Error(EndLoc, "register expected"); 2873 // Allow Q regs and just interpret them as the two D sub-registers. 2874 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2875 EndReg = getDRegFromQReg(EndReg) + 1; 2876 // If the register is the same as the start reg, there's nothing 2877 // more to do. 2878 if (Reg == EndReg) 2879 continue; 2880 // The register must be in the same register class as the first. 2881 if (!RC->contains(EndReg)) 2882 return Error(EndLoc, "invalid register in register list"); 2883 // Ranges must go from low to high. 2884 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2885 return Error(EndLoc, "bad range in register list"); 2886 2887 // Add all the registers in the range to the register list. 2888 while (Reg != EndReg) { 2889 Reg = getNextRegister(Reg); 2890 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2891 } 2892 continue; 2893 } 2894 Parser.Lex(); // Eat the comma. 2895 RegLoc = Parser.getTok().getLoc(); 2896 int OldReg = Reg; 2897 const AsmToken RegTok = Parser.getTok(); 2898 Reg = tryParseRegister(); 2899 if (Reg == -1) 2900 return Error(RegLoc, "register expected"); 2901 // Allow Q regs and just interpret them as the two D sub-registers. 2902 bool isQReg = false; 2903 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2904 Reg = getDRegFromQReg(Reg); 2905 isQReg = true; 2906 } 2907 // The register must be in the same register class as the first. 2908 if (!RC->contains(Reg)) 2909 return Error(RegLoc, "invalid register in register list"); 2910 // List must be monotonically increasing. 2911 if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) { 2912 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2913 Warning(RegLoc, "register list not in ascending order"); 2914 else 2915 return Error(RegLoc, "register list not in ascending order"); 2916 } 2917 if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) { 2918 Warning(RegLoc, "duplicated register (" + RegTok.getString() + 2919 ") in register list"); 2920 continue; 2921 } 2922 // VFP register lists must also be contiguous. 2923 // It's OK to use the enumeration values directly here rather, as the 2924 // VFP register classes have the enum sorted properly. 2925 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2926 Reg != OldReg + 1) 2927 return Error(RegLoc, "non-contiguous register range"); 2928 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2929 if (isQReg) 2930 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2931 } 2932 2933 SMLoc E = Parser.getTok().getLoc(); 2934 if (Parser.getTok().isNot(AsmToken::RCurly)) 2935 return Error(E, "'}' expected"); 2936 Parser.Lex(); // Eat '}' token. 2937 2938 // Push the register list operand. 2939 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2940 2941 // The ARM system instruction variants for LDM/STM have a '^' token here. 2942 if (Parser.getTok().is(AsmToken::Caret)) { 2943 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); 2944 Parser.Lex(); // Eat '^' token. 2945 } 2946 2947 return false; 2948} 2949 2950// Helper function to parse the lane index for vector lists. 2951ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2952parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) { 2953 Index = 0; // Always return a defined index value. 2954 if (Parser.getTok().is(AsmToken::LBrac)) { 2955 Parser.Lex(); // Eat the '['. 2956 if (Parser.getTok().is(AsmToken::RBrac)) { 2957 // "Dn[]" is the 'all lanes' syntax. 2958 LaneKind = AllLanes; 2959 Parser.Lex(); // Eat the ']'. 2960 return MatchOperand_Success; 2961 } 2962 2963 // There's an optional '#' token here. Normally there wouldn't be, but 2964 // inline assemble puts one in, and it's friendly to accept that. 2965 if (Parser.getTok().is(AsmToken::Hash)) 2966 Parser.Lex(); // Eat the '#' 2967 2968 const MCExpr *LaneIndex; 2969 SMLoc Loc = Parser.getTok().getLoc(); 2970 if (getParser().ParseExpression(LaneIndex)) { 2971 Error(Loc, "illegal expression"); 2972 return MatchOperand_ParseFail; 2973 } 2974 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex); 2975 if (!CE) { 2976 Error(Loc, "lane index must be empty or an integer"); 2977 return MatchOperand_ParseFail; 2978 } 2979 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2980 Error(Parser.getTok().getLoc(), "']' expected"); 2981 return MatchOperand_ParseFail; 2982 } 2983 Parser.Lex(); // Eat the ']'. 2984 int64_t Val = CE->getValue(); 2985 2986 // FIXME: Make this range check context sensitive for .8, .16, .32. 2987 if (Val < 0 || Val > 7) { 2988 Error(Parser.getTok().getLoc(), "lane index out of range"); 2989 return MatchOperand_ParseFail; 2990 } 2991 Index = Val; 2992 LaneKind = IndexedLane; 2993 return MatchOperand_Success; 2994 } 2995 LaneKind = NoLanes; 2996 return MatchOperand_Success; 2997} 2998 2999// parse a vector register list 3000ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3001parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3002 VectorLaneTy LaneKind; 3003 unsigned LaneIndex; 3004 SMLoc S = Parser.getTok().getLoc(); 3005 // As an extension (to match gas), support a plain D register or Q register 3006 // (without encosing curly braces) as a single or double entry list, 3007 // respectively. 3008 if (Parser.getTok().is(AsmToken::Identifier)) { 3009 int Reg = tryParseRegister(); 3010 if (Reg == -1) 3011 return MatchOperand_NoMatch; 3012 SMLoc E = Parser.getTok().getLoc(); 3013 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 3014 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 3015 if (Res != MatchOperand_Success) 3016 return Res; 3017 switch (LaneKind) { 3018 case NoLanes: 3019 E = Parser.getTok().getLoc(); 3020 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E)); 3021 break; 3022 case AllLanes: 3023 E = Parser.getTok().getLoc(); 3024 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false, 3025 S, E)); 3026 break; 3027 case IndexedLane: 3028 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 3029 LaneIndex, 3030 false, S, E)); 3031 break; 3032 } 3033 return MatchOperand_Success; 3034 } 3035 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3036 Reg = getDRegFromQReg(Reg); 3037 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 3038 if (Res != MatchOperand_Success) 3039 return Res; 3040 switch (LaneKind) { 3041 case NoLanes: 3042 E = Parser.getTok().getLoc(); 3043 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 3044 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 3045 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E)); 3046 break; 3047 case AllLanes: 3048 E = Parser.getTok().getLoc(); 3049 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 3050 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 3051 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false, 3052 S, E)); 3053 break; 3054 case IndexedLane: 3055 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 3056 LaneIndex, 3057 false, S, E)); 3058 break; 3059 } 3060 return MatchOperand_Success; 3061 } 3062 Error(S, "vector register expected"); 3063 return MatchOperand_ParseFail; 3064 } 3065 3066 if (Parser.getTok().isNot(AsmToken::LCurly)) 3067 return MatchOperand_NoMatch; 3068 3069 Parser.Lex(); // Eat '{' token. 3070 SMLoc RegLoc = Parser.getTok().getLoc(); 3071 3072 int Reg = tryParseRegister(); 3073 if (Reg == -1) { 3074 Error(RegLoc, "register expected"); 3075 return MatchOperand_ParseFail; 3076 } 3077 unsigned Count = 1; 3078 int Spacing = 0; 3079 unsigned FirstReg = Reg; 3080 // The list is of D registers, but we also allow Q regs and just interpret 3081 // them as the two D sub-registers. 3082 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3083 FirstReg = Reg = getDRegFromQReg(Reg); 3084 Spacing = 1; // double-spacing requires explicit D registers, otherwise 3085 // it's ambiguous with four-register single spaced. 3086 ++Reg; 3087 ++Count; 3088 } 3089 if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success) 3090 return MatchOperand_ParseFail; 3091 3092 while (Parser.getTok().is(AsmToken::Comma) || 3093 Parser.getTok().is(AsmToken::Minus)) { 3094 if (Parser.getTok().is(AsmToken::Minus)) { 3095 if (!Spacing) 3096 Spacing = 1; // Register range implies a single spaced list. 3097 else if (Spacing == 2) { 3098 Error(Parser.getTok().getLoc(), 3099 "sequential registers in double spaced list"); 3100 return MatchOperand_ParseFail; 3101 } 3102 Parser.Lex(); // Eat the minus. 3103 SMLoc EndLoc = Parser.getTok().getLoc(); 3104 int EndReg = tryParseRegister(); 3105 if (EndReg == -1) { 3106 Error(EndLoc, "register expected"); 3107 return MatchOperand_ParseFail; 3108 } 3109 // Allow Q regs and just interpret them as the two D sub-registers. 3110 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 3111 EndReg = getDRegFromQReg(EndReg) + 1; 3112 // If the register is the same as the start reg, there's nothing 3113 // more to do. 3114 if (Reg == EndReg) 3115 continue; 3116 // The register must be in the same register class as the first. 3117 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 3118 Error(EndLoc, "invalid register in register list"); 3119 return MatchOperand_ParseFail; 3120 } 3121 // Ranges must go from low to high. 3122 if (Reg > EndReg) { 3123 Error(EndLoc, "bad range in register list"); 3124 return MatchOperand_ParseFail; 3125 } 3126 // Parse the lane specifier if present. 3127 VectorLaneTy NextLaneKind; 3128 unsigned NextLaneIndex; 3129 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 3130 return MatchOperand_ParseFail; 3131 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3132 Error(EndLoc, "mismatched lane index in register list"); 3133 return MatchOperand_ParseFail; 3134 } 3135 EndLoc = Parser.getTok().getLoc(); 3136 3137 // Add all the registers in the range to the register list. 3138 Count += EndReg - Reg; 3139 Reg = EndReg; 3140 continue; 3141 } 3142 Parser.Lex(); // Eat the comma. 3143 RegLoc = Parser.getTok().getLoc(); 3144 int OldReg = Reg; 3145 Reg = tryParseRegister(); 3146 if (Reg == -1) { 3147 Error(RegLoc, "register expected"); 3148 return MatchOperand_ParseFail; 3149 } 3150 // vector register lists must be contiguous. 3151 // It's OK to use the enumeration values directly here rather, as the 3152 // VFP register classes have the enum sorted properly. 3153 // 3154 // The list is of D registers, but we also allow Q regs and just interpret 3155 // them as the two D sub-registers. 3156 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3157 if (!Spacing) 3158 Spacing = 1; // Register range implies a single spaced list. 3159 else if (Spacing == 2) { 3160 Error(RegLoc, 3161 "invalid register in double-spaced list (must be 'D' register')"); 3162 return MatchOperand_ParseFail; 3163 } 3164 Reg = getDRegFromQReg(Reg); 3165 if (Reg != OldReg + 1) { 3166 Error(RegLoc, "non-contiguous register range"); 3167 return MatchOperand_ParseFail; 3168 } 3169 ++Reg; 3170 Count += 2; 3171 // Parse the lane specifier if present. 3172 VectorLaneTy NextLaneKind; 3173 unsigned NextLaneIndex; 3174 SMLoc EndLoc = Parser.getTok().getLoc(); 3175 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 3176 return MatchOperand_ParseFail; 3177 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3178 Error(EndLoc, "mismatched lane index in register list"); 3179 return MatchOperand_ParseFail; 3180 } 3181 continue; 3182 } 3183 // Normal D register. 3184 // Figure out the register spacing (single or double) of the list if 3185 // we don't know it already. 3186 if (!Spacing) 3187 Spacing = 1 + (Reg == OldReg + 2); 3188 3189 // Just check that it's contiguous and keep going. 3190 if (Reg != OldReg + Spacing) { 3191 Error(RegLoc, "non-contiguous register range"); 3192 return MatchOperand_ParseFail; 3193 } 3194 ++Count; 3195 // Parse the lane specifier if present. 3196 VectorLaneTy NextLaneKind; 3197 unsigned NextLaneIndex; 3198 SMLoc EndLoc = Parser.getTok().getLoc(); 3199 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 3200 return MatchOperand_ParseFail; 3201 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3202 Error(EndLoc, "mismatched lane index in register list"); 3203 return MatchOperand_ParseFail; 3204 } 3205 } 3206 3207 SMLoc E = Parser.getTok().getLoc(); 3208 if (Parser.getTok().isNot(AsmToken::RCurly)) { 3209 Error(E, "'}' expected"); 3210 return MatchOperand_ParseFail; 3211 } 3212 Parser.Lex(); // Eat '}' token. 3213 3214 switch (LaneKind) { 3215 case NoLanes: 3216 // Two-register operands have been converted to the 3217 // composite register classes. 3218 if (Count == 2) { 3219 const MCRegisterClass *RC = (Spacing == 1) ? 3220 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 3221 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 3222 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 3223 } 3224 3225 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, 3226 (Spacing == 2), S, E)); 3227 break; 3228 case AllLanes: 3229 // Two-register operands have been converted to the 3230 // composite register classes. 3231 if (Count == 2) { 3232 const MCRegisterClass *RC = (Spacing == 1) ? 3233 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 3234 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 3235 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 3236 } 3237 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 3238 (Spacing == 2), 3239 S, E)); 3240 break; 3241 case IndexedLane: 3242 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 3243 LaneIndex, 3244 (Spacing == 2), 3245 S, E)); 3246 break; 3247 } 3248 return MatchOperand_Success; 3249} 3250 3251/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 3252ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3253parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3254 SMLoc S = Parser.getTok().getLoc(); 3255 const AsmToken &Tok = Parser.getTok(); 3256 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 3257 StringRef OptStr = Tok.getString(); 3258 3259 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 3260 .Case("sy", ARM_MB::SY) 3261 .Case("st", ARM_MB::ST) 3262 .Case("sh", ARM_MB::ISH) 3263 .Case("ish", ARM_MB::ISH) 3264 .Case("shst", ARM_MB::ISHST) 3265 .Case("ishst", ARM_MB::ISHST) 3266 .Case("nsh", ARM_MB::NSH) 3267 .Case("un", ARM_MB::NSH) 3268 .Case("nshst", ARM_MB::NSHST) 3269 .Case("unst", ARM_MB::NSHST) 3270 .Case("osh", ARM_MB::OSH) 3271 .Case("oshst", ARM_MB::OSHST) 3272 .Default(~0U); 3273 3274 if (Opt == ~0U) 3275 return MatchOperand_NoMatch; 3276 3277 Parser.Lex(); // Eat identifier token. 3278 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 3279 return MatchOperand_Success; 3280} 3281 3282/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 3283ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3284parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3285 SMLoc S = Parser.getTok().getLoc(); 3286 const AsmToken &Tok = Parser.getTok(); 3287 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 3288 StringRef IFlagsStr = Tok.getString(); 3289 3290 // An iflags string of "none" is interpreted to mean that none of the AIF 3291 // bits are set. Not a terribly useful instruction, but a valid encoding. 3292 unsigned IFlags = 0; 3293 if (IFlagsStr != "none") { 3294 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 3295 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 3296 .Case("a", ARM_PROC::A) 3297 .Case("i", ARM_PROC::I) 3298 .Case("f", ARM_PROC::F) 3299 .Default(~0U); 3300 3301 // If some specific iflag is already set, it means that some letter is 3302 // present more than once, this is not acceptable. 3303 if (Flag == ~0U || (IFlags & Flag)) 3304 return MatchOperand_NoMatch; 3305 3306 IFlags |= Flag; 3307 } 3308 } 3309 3310 Parser.Lex(); // Eat identifier token. 3311 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 3312 return MatchOperand_Success; 3313} 3314 3315/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 3316ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3317parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3318 SMLoc S = Parser.getTok().getLoc(); 3319 const AsmToken &Tok = Parser.getTok(); 3320 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 3321 StringRef Mask = Tok.getString(); 3322 3323 if (isMClass()) { 3324 // See ARMv6-M 10.1.1 3325 std::string Name = Mask.lower(); 3326 unsigned FlagsVal = StringSwitch<unsigned>(Name) 3327 .Case("apsr", 0) 3328 .Case("iapsr", 1) 3329 .Case("eapsr", 2) 3330 .Case("xpsr", 3) 3331 .Case("ipsr", 5) 3332 .Case("epsr", 6) 3333 .Case("iepsr", 7) 3334 .Case("msp", 8) 3335 .Case("psp", 9) 3336 .Case("primask", 16) 3337 .Case("basepri", 17) 3338 .Case("basepri_max", 18) 3339 .Case("faultmask", 19) 3340 .Case("control", 20) 3341 .Default(~0U); 3342 3343 if (FlagsVal == ~0U) 3344 return MatchOperand_NoMatch; 3345 3346 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 3347 // basepri, basepri_max and faultmask only valid for V7m. 3348 return MatchOperand_NoMatch; 3349 3350 Parser.Lex(); // Eat identifier token. 3351 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3352 return MatchOperand_Success; 3353 } 3354 3355 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 3356 size_t Start = 0, Next = Mask.find('_'); 3357 StringRef Flags = ""; 3358 std::string SpecReg = Mask.slice(Start, Next).lower(); 3359 if (Next != StringRef::npos) 3360 Flags = Mask.slice(Next+1, Mask.size()); 3361 3362 // FlagsVal contains the complete mask: 3363 // 3-0: Mask 3364 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3365 unsigned FlagsVal = 0; 3366 3367 if (SpecReg == "apsr") { 3368 FlagsVal = StringSwitch<unsigned>(Flags) 3369 .Case("nzcvq", 0x8) // same as CPSR_f 3370 .Case("g", 0x4) // same as CPSR_s 3371 .Case("nzcvqg", 0xc) // same as CPSR_fs 3372 .Default(~0U); 3373 3374 if (FlagsVal == ~0U) { 3375 if (!Flags.empty()) 3376 return MatchOperand_NoMatch; 3377 else 3378 FlagsVal = 8; // No flag 3379 } 3380 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 3381 // cpsr_all is an alias for cpsr_fc, as is plain cpsr. 3382 if (Flags == "all" || Flags == "") 3383 Flags = "fc"; 3384 for (int i = 0, e = Flags.size(); i != e; ++i) { 3385 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 3386 .Case("c", 1) 3387 .Case("x", 2) 3388 .Case("s", 4) 3389 .Case("f", 8) 3390 .Default(~0U); 3391 3392 // If some specific flag is already set, it means that some letter is 3393 // present more than once, this is not acceptable. 3394 if (FlagsVal == ~0U || (FlagsVal & Flag)) 3395 return MatchOperand_NoMatch; 3396 FlagsVal |= Flag; 3397 } 3398 } else // No match for special register. 3399 return MatchOperand_NoMatch; 3400 3401 // Special register without flags is NOT equivalent to "fc" flags. 3402 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 3403 // two lines would enable gas compatibility at the expense of breaking 3404 // round-tripping. 3405 // 3406 // if (!FlagsVal) 3407 // FlagsVal = 0x9; 3408 3409 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3410 if (SpecReg == "spsr") 3411 FlagsVal |= 16; 3412 3413 Parser.Lex(); // Eat identifier token. 3414 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3415 return MatchOperand_Success; 3416} 3417 3418ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3419parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 3420 int Low, int High) { 3421 const AsmToken &Tok = Parser.getTok(); 3422 if (Tok.isNot(AsmToken::Identifier)) { 3423 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3424 return MatchOperand_ParseFail; 3425 } 3426 StringRef ShiftName = Tok.getString(); 3427 std::string LowerOp = Op.lower(); 3428 std::string UpperOp = Op.upper(); 3429 if (ShiftName != LowerOp && ShiftName != UpperOp) { 3430 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3431 return MatchOperand_ParseFail; 3432 } 3433 Parser.Lex(); // Eat shift type token. 3434 3435 // There must be a '#' and a shift amount. 3436 if (Parser.getTok().isNot(AsmToken::Hash) && 3437 Parser.getTok().isNot(AsmToken::Dollar)) { 3438 Error(Parser.getTok().getLoc(), "'#' expected"); 3439 return MatchOperand_ParseFail; 3440 } 3441 Parser.Lex(); // Eat hash token. 3442 3443 const MCExpr *ShiftAmount; 3444 SMLoc Loc = Parser.getTok().getLoc(); 3445 if (getParser().ParseExpression(ShiftAmount)) { 3446 Error(Loc, "illegal expression"); 3447 return MatchOperand_ParseFail; 3448 } 3449 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3450 if (!CE) { 3451 Error(Loc, "constant expression expected"); 3452 return MatchOperand_ParseFail; 3453 } 3454 int Val = CE->getValue(); 3455 if (Val < Low || Val > High) { 3456 Error(Loc, "immediate value out of range"); 3457 return MatchOperand_ParseFail; 3458 } 3459 3460 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 3461 3462 return MatchOperand_Success; 3463} 3464 3465ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3466parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3467 const AsmToken &Tok = Parser.getTok(); 3468 SMLoc S = Tok.getLoc(); 3469 if (Tok.isNot(AsmToken::Identifier)) { 3470 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3471 return MatchOperand_ParseFail; 3472 } 3473 int Val = StringSwitch<int>(Tok.getString()) 3474 .Case("be", 1) 3475 .Case("le", 0) 3476 .Default(-1); 3477 Parser.Lex(); // Eat the token. 3478 3479 if (Val == -1) { 3480 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3481 return MatchOperand_ParseFail; 3482 } 3483 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 3484 getContext()), 3485 S, Parser.getTok().getLoc())); 3486 return MatchOperand_Success; 3487} 3488 3489/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 3490/// instructions. Legal values are: 3491/// lsl #n 'n' in [0,31] 3492/// asr #n 'n' in [1,32] 3493/// n == 32 encoded as n == 0. 3494ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3495parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3496 const AsmToken &Tok = Parser.getTok(); 3497 SMLoc S = Tok.getLoc(); 3498 if (Tok.isNot(AsmToken::Identifier)) { 3499 Error(S, "shift operator 'asr' or 'lsl' expected"); 3500 return MatchOperand_ParseFail; 3501 } 3502 StringRef ShiftName = Tok.getString(); 3503 bool isASR; 3504 if (ShiftName == "lsl" || ShiftName == "LSL") 3505 isASR = false; 3506 else if (ShiftName == "asr" || ShiftName == "ASR") 3507 isASR = true; 3508 else { 3509 Error(S, "shift operator 'asr' or 'lsl' expected"); 3510 return MatchOperand_ParseFail; 3511 } 3512 Parser.Lex(); // Eat the operator. 3513 3514 // A '#' and a shift amount. 3515 if (Parser.getTok().isNot(AsmToken::Hash) && 3516 Parser.getTok().isNot(AsmToken::Dollar)) { 3517 Error(Parser.getTok().getLoc(), "'#' expected"); 3518 return MatchOperand_ParseFail; 3519 } 3520 Parser.Lex(); // Eat hash token. 3521 3522 const MCExpr *ShiftAmount; 3523 SMLoc E = Parser.getTok().getLoc(); 3524 if (getParser().ParseExpression(ShiftAmount)) { 3525 Error(E, "malformed shift expression"); 3526 return MatchOperand_ParseFail; 3527 } 3528 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3529 if (!CE) { 3530 Error(E, "shift amount must be an immediate"); 3531 return MatchOperand_ParseFail; 3532 } 3533 3534 int64_t Val = CE->getValue(); 3535 if (isASR) { 3536 // Shift amount must be in [1,32] 3537 if (Val < 1 || Val > 32) { 3538 Error(E, "'asr' shift amount must be in range [1,32]"); 3539 return MatchOperand_ParseFail; 3540 } 3541 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 3542 if (isThumb() && Val == 32) { 3543 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 3544 return MatchOperand_ParseFail; 3545 } 3546 if (Val == 32) Val = 0; 3547 } else { 3548 // Shift amount must be in [1,32] 3549 if (Val < 0 || Val > 31) { 3550 Error(E, "'lsr' shift amount must be in range [0,31]"); 3551 return MatchOperand_ParseFail; 3552 } 3553 } 3554 3555 E = Parser.getTok().getLoc(); 3556 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 3557 3558 return MatchOperand_Success; 3559} 3560 3561/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 3562/// of instructions. Legal values are: 3563/// ror #n 'n' in {0, 8, 16, 24} 3564ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3565parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3566 const AsmToken &Tok = Parser.getTok(); 3567 SMLoc S = Tok.getLoc(); 3568 if (Tok.isNot(AsmToken::Identifier)) 3569 return MatchOperand_NoMatch; 3570 StringRef ShiftName = Tok.getString(); 3571 if (ShiftName != "ror" && ShiftName != "ROR") 3572 return MatchOperand_NoMatch; 3573 Parser.Lex(); // Eat the operator. 3574 3575 // A '#' and a rotate amount. 3576 if (Parser.getTok().isNot(AsmToken::Hash) && 3577 Parser.getTok().isNot(AsmToken::Dollar)) { 3578 Error(Parser.getTok().getLoc(), "'#' expected"); 3579 return MatchOperand_ParseFail; 3580 } 3581 Parser.Lex(); // Eat hash token. 3582 3583 const MCExpr *ShiftAmount; 3584 SMLoc E = Parser.getTok().getLoc(); 3585 if (getParser().ParseExpression(ShiftAmount)) { 3586 Error(E, "malformed rotate expression"); 3587 return MatchOperand_ParseFail; 3588 } 3589 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3590 if (!CE) { 3591 Error(E, "rotate amount must be an immediate"); 3592 return MatchOperand_ParseFail; 3593 } 3594 3595 int64_t Val = CE->getValue(); 3596 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 3597 // normally, zero is represented in asm by omitting the rotate operand 3598 // entirely. 3599 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 3600 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 3601 return MatchOperand_ParseFail; 3602 } 3603 3604 E = Parser.getTok().getLoc(); 3605 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 3606 3607 return MatchOperand_Success; 3608} 3609 3610ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3611parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3612 SMLoc S = Parser.getTok().getLoc(); 3613 // The bitfield descriptor is really two operands, the LSB and the width. 3614 if (Parser.getTok().isNot(AsmToken::Hash) && 3615 Parser.getTok().isNot(AsmToken::Dollar)) { 3616 Error(Parser.getTok().getLoc(), "'#' expected"); 3617 return MatchOperand_ParseFail; 3618 } 3619 Parser.Lex(); // Eat hash token. 3620 3621 const MCExpr *LSBExpr; 3622 SMLoc E = Parser.getTok().getLoc(); 3623 if (getParser().ParseExpression(LSBExpr)) { 3624 Error(E, "malformed immediate expression"); 3625 return MatchOperand_ParseFail; 3626 } 3627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 3628 if (!CE) { 3629 Error(E, "'lsb' operand must be an immediate"); 3630 return MatchOperand_ParseFail; 3631 } 3632 3633 int64_t LSB = CE->getValue(); 3634 // The LSB must be in the range [0,31] 3635 if (LSB < 0 || LSB > 31) { 3636 Error(E, "'lsb' operand must be in the range [0,31]"); 3637 return MatchOperand_ParseFail; 3638 } 3639 E = Parser.getTok().getLoc(); 3640 3641 // Expect another immediate operand. 3642 if (Parser.getTok().isNot(AsmToken::Comma)) { 3643 Error(Parser.getTok().getLoc(), "too few operands"); 3644 return MatchOperand_ParseFail; 3645 } 3646 Parser.Lex(); // Eat hash token. 3647 if (Parser.getTok().isNot(AsmToken::Hash) && 3648 Parser.getTok().isNot(AsmToken::Dollar)) { 3649 Error(Parser.getTok().getLoc(), "'#' expected"); 3650 return MatchOperand_ParseFail; 3651 } 3652 Parser.Lex(); // Eat hash token. 3653 3654 const MCExpr *WidthExpr; 3655 if (getParser().ParseExpression(WidthExpr)) { 3656 Error(E, "malformed immediate expression"); 3657 return MatchOperand_ParseFail; 3658 } 3659 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3660 if (!CE) { 3661 Error(E, "'width' operand must be an immediate"); 3662 return MatchOperand_ParseFail; 3663 } 3664 3665 int64_t Width = CE->getValue(); 3666 // The LSB must be in the range [1,32-lsb] 3667 if (Width < 1 || Width > 32 - LSB) { 3668 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3669 return MatchOperand_ParseFail; 3670 } 3671 E = Parser.getTok().getLoc(); 3672 3673 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3674 3675 return MatchOperand_Success; 3676} 3677 3678ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3679parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3680 // Check for a post-index addressing register operand. Specifically: 3681 // postidx_reg := '+' register {, shift} 3682 // | '-' register {, shift} 3683 // | register {, shift} 3684 3685 // This method must return MatchOperand_NoMatch without consuming any tokens 3686 // in the case where there is no match, as other alternatives take other 3687 // parse methods. 3688 AsmToken Tok = Parser.getTok(); 3689 SMLoc S = Tok.getLoc(); 3690 bool haveEaten = false; 3691 bool isAdd = true; 3692 int Reg = -1; 3693 if (Tok.is(AsmToken::Plus)) { 3694 Parser.Lex(); // Eat the '+' token. 3695 haveEaten = true; 3696 } else if (Tok.is(AsmToken::Minus)) { 3697 Parser.Lex(); // Eat the '-' token. 3698 isAdd = false; 3699 haveEaten = true; 3700 } 3701 if (Parser.getTok().is(AsmToken::Identifier)) 3702 Reg = tryParseRegister(); 3703 if (Reg == -1) { 3704 if (!haveEaten) 3705 return MatchOperand_NoMatch; 3706 Error(Parser.getTok().getLoc(), "register expected"); 3707 return MatchOperand_ParseFail; 3708 } 3709 SMLoc E = Parser.getTok().getLoc(); 3710 3711 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3712 unsigned ShiftImm = 0; 3713 if (Parser.getTok().is(AsmToken::Comma)) { 3714 Parser.Lex(); // Eat the ','. 3715 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3716 return MatchOperand_ParseFail; 3717 } 3718 3719 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3720 ShiftImm, S, E)); 3721 3722 return MatchOperand_Success; 3723} 3724 3725ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3726parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3727 // Check for a post-index addressing register operand. Specifically: 3728 // am3offset := '+' register 3729 // | '-' register 3730 // | register 3731 // | # imm 3732 // | # + imm 3733 // | # - imm 3734 3735 // This method must return MatchOperand_NoMatch without consuming any tokens 3736 // in the case where there is no match, as other alternatives take other 3737 // parse methods. 3738 AsmToken Tok = Parser.getTok(); 3739 SMLoc S = Tok.getLoc(); 3740 3741 // Do immediates first, as we always parse those if we have a '#'. 3742 if (Parser.getTok().is(AsmToken::Hash) || 3743 Parser.getTok().is(AsmToken::Dollar)) { 3744 Parser.Lex(); // Eat the '#'. 3745 // Explicitly look for a '-', as we need to encode negative zero 3746 // differently. 3747 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3748 const MCExpr *Offset; 3749 if (getParser().ParseExpression(Offset)) 3750 return MatchOperand_ParseFail; 3751 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3752 if (!CE) { 3753 Error(S, "constant expression expected"); 3754 return MatchOperand_ParseFail; 3755 } 3756 SMLoc E = Tok.getLoc(); 3757 // Negative zero is encoded as the flag value INT32_MIN. 3758 int32_t Val = CE->getValue(); 3759 if (isNegative && Val == 0) 3760 Val = INT32_MIN; 3761 3762 Operands.push_back( 3763 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3764 3765 return MatchOperand_Success; 3766 } 3767 3768 3769 bool haveEaten = false; 3770 bool isAdd = true; 3771 int Reg = -1; 3772 if (Tok.is(AsmToken::Plus)) { 3773 Parser.Lex(); // Eat the '+' token. 3774 haveEaten = true; 3775 } else if (Tok.is(AsmToken::Minus)) { 3776 Parser.Lex(); // Eat the '-' token. 3777 isAdd = false; 3778 haveEaten = true; 3779 } 3780 if (Parser.getTok().is(AsmToken::Identifier)) 3781 Reg = tryParseRegister(); 3782 if (Reg == -1) { 3783 if (!haveEaten) 3784 return MatchOperand_NoMatch; 3785 Error(Parser.getTok().getLoc(), "register expected"); 3786 return MatchOperand_ParseFail; 3787 } 3788 SMLoc E = Parser.getTok().getLoc(); 3789 3790 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3791 0, S, E)); 3792 3793 return MatchOperand_Success; 3794} 3795 3796/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3797/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3798/// when they refer multiple MIOperands inside a single one. 3799bool ARMAsmParser:: 3800cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3801 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3802 // Rt, Rt2 3803 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3804 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3805 // Create a writeback register dummy placeholder. 3806 Inst.addOperand(MCOperand::CreateReg(0)); 3807 // addr 3808 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3809 // pred 3810 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3811 return true; 3812} 3813 3814/// cvtT2StrdPre - Convert parsed operands to MCInst. 3815/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3816/// when they refer multiple MIOperands inside a single one. 3817bool ARMAsmParser:: 3818cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3819 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3820 // Create a writeback register dummy placeholder. 3821 Inst.addOperand(MCOperand::CreateReg(0)); 3822 // Rt, Rt2 3823 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3824 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3825 // addr 3826 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3827 // pred 3828 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3829 return true; 3830} 3831 3832/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3833/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3834/// when they refer multiple MIOperands inside a single one. 3835bool ARMAsmParser:: 3836cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3837 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3838 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3839 3840 // Create a writeback register dummy placeholder. 3841 Inst.addOperand(MCOperand::CreateImm(0)); 3842 3843 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3844 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3845 return true; 3846} 3847 3848/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3849/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3850/// when they refer multiple MIOperands inside a single one. 3851bool ARMAsmParser:: 3852cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3853 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3854 // Create a writeback register dummy placeholder. 3855 Inst.addOperand(MCOperand::CreateImm(0)); 3856 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3857 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3858 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3859 return true; 3860} 3861 3862/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3863/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3864/// when they refer multiple MIOperands inside a single one. 3865bool ARMAsmParser:: 3866cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3867 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3868 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3869 3870 // Create a writeback register dummy placeholder. 3871 Inst.addOperand(MCOperand::CreateImm(0)); 3872 3873 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3874 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3875 return true; 3876} 3877 3878/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3879/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3880/// when they refer multiple MIOperands inside a single one. 3881bool ARMAsmParser:: 3882cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3883 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3884 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3885 3886 // Create a writeback register dummy placeholder. 3887 Inst.addOperand(MCOperand::CreateImm(0)); 3888 3889 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3890 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3891 return true; 3892} 3893 3894 3895/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3896/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3897/// when they refer multiple MIOperands inside a single one. 3898bool ARMAsmParser:: 3899cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3900 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3901 // Create a writeback register dummy placeholder. 3902 Inst.addOperand(MCOperand::CreateImm(0)); 3903 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3904 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3905 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3906 return true; 3907} 3908 3909/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3910/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3911/// when they refer multiple MIOperands inside a single one. 3912bool ARMAsmParser:: 3913cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3914 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3915 // Create a writeback register dummy placeholder. 3916 Inst.addOperand(MCOperand::CreateImm(0)); 3917 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3918 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3919 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3920 return true; 3921} 3922 3923/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3924/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3925/// when they refer multiple MIOperands inside a single one. 3926bool ARMAsmParser:: 3927cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3928 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3929 // Create a writeback register dummy placeholder. 3930 Inst.addOperand(MCOperand::CreateImm(0)); 3931 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3932 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3933 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3934 return true; 3935} 3936 3937/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3938/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3939/// when they refer multiple MIOperands inside a single one. 3940bool ARMAsmParser:: 3941cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3942 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3943 // Rt 3944 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3945 // Create a writeback register dummy placeholder. 3946 Inst.addOperand(MCOperand::CreateImm(0)); 3947 // addr 3948 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3949 // offset 3950 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3951 // pred 3952 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3953 return true; 3954} 3955 3956/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3957/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3958/// when they refer multiple MIOperands inside a single one. 3959bool ARMAsmParser:: 3960cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3961 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3962 // Rt 3963 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3964 // Create a writeback register dummy placeholder. 3965 Inst.addOperand(MCOperand::CreateImm(0)); 3966 // addr 3967 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3968 // offset 3969 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3970 // pred 3971 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3972 return true; 3973} 3974 3975/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3976/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3977/// when they refer multiple MIOperands inside a single one. 3978bool ARMAsmParser:: 3979cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3980 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3981 // Create a writeback register dummy placeholder. 3982 Inst.addOperand(MCOperand::CreateImm(0)); 3983 // Rt 3984 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3985 // addr 3986 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3987 // offset 3988 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3989 // pred 3990 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3991 return true; 3992} 3993 3994/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3995/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3996/// when they refer multiple MIOperands inside a single one. 3997bool ARMAsmParser:: 3998cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3999 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4000 // Create a writeback register dummy placeholder. 4001 Inst.addOperand(MCOperand::CreateImm(0)); 4002 // Rt 4003 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 4004 // addr 4005 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 4006 // offset 4007 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 4008 // pred 4009 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4010 return true; 4011} 4012 4013/// cvtLdrdPre - Convert parsed operands to MCInst. 4014/// Needed here because the Asm Gen Matcher can't handle properly tied operands 4015/// when they refer multiple MIOperands inside a single one. 4016bool ARMAsmParser:: 4017cvtLdrdPre(MCInst &Inst, unsigned Opcode, 4018 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4019 // Rt, Rt2 4020 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 4021 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 4022 // Create a writeback register dummy placeholder. 4023 Inst.addOperand(MCOperand::CreateImm(0)); 4024 // addr 4025 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 4026 // pred 4027 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4028 return true; 4029} 4030 4031/// cvtStrdPre - Convert parsed operands to MCInst. 4032/// Needed here because the Asm Gen Matcher can't handle properly tied operands 4033/// when they refer multiple MIOperands inside a single one. 4034bool ARMAsmParser:: 4035cvtStrdPre(MCInst &Inst, unsigned Opcode, 4036 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4037 // Create a writeback register dummy placeholder. 4038 Inst.addOperand(MCOperand::CreateImm(0)); 4039 // Rt, Rt2 4040 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 4041 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 4042 // addr 4043 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 4044 // pred 4045 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4046 return true; 4047} 4048 4049/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 4050/// Needed here because the Asm Gen Matcher can't handle properly tied operands 4051/// when they refer multiple MIOperands inside a single one. 4052bool ARMAsmParser:: 4053cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 4054 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4055 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 4056 // Create a writeback register dummy placeholder. 4057 Inst.addOperand(MCOperand::CreateImm(0)); 4058 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 4059 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4060 return true; 4061} 4062 4063/// cvtThumbMultiple- Convert parsed operands to MCInst. 4064/// Needed here because the Asm Gen Matcher can't handle properly tied operands 4065/// when they refer multiple MIOperands inside a single one. 4066bool ARMAsmParser:: 4067cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 4068 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4069 // The second source operand must be the same register as the destination 4070 // operand. 4071 if (Operands.size() == 6 && 4072 (((ARMOperand*)Operands[3])->getReg() != 4073 ((ARMOperand*)Operands[5])->getReg()) && 4074 (((ARMOperand*)Operands[3])->getReg() != 4075 ((ARMOperand*)Operands[4])->getReg())) { 4076 Error(Operands[3]->getStartLoc(), 4077 "destination register must match source register"); 4078 return false; 4079 } 4080 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 4081 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 4082 // If we have a three-operand form, make sure to set Rn to be the operand 4083 // that isn't the same as Rd. 4084 unsigned RegOp = 4; 4085 if (Operands.size() == 6 && 4086 ((ARMOperand*)Operands[4])->getReg() == 4087 ((ARMOperand*)Operands[3])->getReg()) 4088 RegOp = 5; 4089 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 4090 Inst.addOperand(Inst.getOperand(0)); 4091 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 4092 4093 return true; 4094} 4095 4096bool ARMAsmParser:: 4097cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 4098 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4099 // Vd 4100 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 4101 // Create a writeback register dummy placeholder. 4102 Inst.addOperand(MCOperand::CreateImm(0)); 4103 // Vn 4104 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 4105 // pred 4106 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4107 return true; 4108} 4109 4110bool ARMAsmParser:: 4111cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 4112 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4113 // Vd 4114 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 4115 // Create a writeback register dummy placeholder. 4116 Inst.addOperand(MCOperand::CreateImm(0)); 4117 // Vn 4118 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 4119 // Vm 4120 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 4121 // pred 4122 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4123 return true; 4124} 4125 4126bool ARMAsmParser:: 4127cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 4128 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4129 // Create a writeback register dummy placeholder. 4130 Inst.addOperand(MCOperand::CreateImm(0)); 4131 // Vn 4132 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 4133 // Vt 4134 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 4135 // pred 4136 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4137 return true; 4138} 4139 4140bool ARMAsmParser:: 4141cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 4142 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4143 // Create a writeback register dummy placeholder. 4144 Inst.addOperand(MCOperand::CreateImm(0)); 4145 // Vn 4146 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 4147 // Vm 4148 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 4149 // Vt 4150 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 4151 // pred 4152 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4153 return true; 4154} 4155 4156/// Parse an ARM memory expression, return false if successful else return true 4157/// or an error. The first token must be a '[' when called. 4158bool ARMAsmParser:: 4159parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4160 SMLoc S, E; 4161 assert(Parser.getTok().is(AsmToken::LBrac) && 4162 "Token is not a Left Bracket"); 4163 S = Parser.getTok().getLoc(); 4164 Parser.Lex(); // Eat left bracket token. 4165 4166 const AsmToken &BaseRegTok = Parser.getTok(); 4167 int BaseRegNum = tryParseRegister(); 4168 if (BaseRegNum == -1) 4169 return Error(BaseRegTok.getLoc(), "register expected"); 4170 4171 // The next token must either be a comma or a closing bracket. 4172 const AsmToken &Tok = Parser.getTok(); 4173 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 4174 return Error(Tok.getLoc(), "malformed memory operand"); 4175 4176 if (Tok.is(AsmToken::RBrac)) { 4177 E = Tok.getLoc(); 4178 Parser.Lex(); // Eat right bracket token. 4179 4180 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 4181 0, 0, false, S, E)); 4182 4183 // If there's a pre-indexing writeback marker, '!', just add it as a token 4184 // operand. It's rather odd, but syntactically valid. 4185 if (Parser.getTok().is(AsmToken::Exclaim)) { 4186 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4187 Parser.Lex(); // Eat the '!'. 4188 } 4189 4190 return false; 4191 } 4192 4193 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 4194 Parser.Lex(); // Eat the comma. 4195 4196 // If we have a ':', it's an alignment specifier. 4197 if (Parser.getTok().is(AsmToken::Colon)) { 4198 Parser.Lex(); // Eat the ':'. 4199 E = Parser.getTok().getLoc(); 4200 4201 const MCExpr *Expr; 4202 if (getParser().ParseExpression(Expr)) 4203 return true; 4204 4205 // The expression has to be a constant. Memory references with relocations 4206 // don't come through here, as they use the <label> forms of the relevant 4207 // instructions. 4208 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4209 if (!CE) 4210 return Error (E, "constant expression expected"); 4211 4212 unsigned Align = 0; 4213 switch (CE->getValue()) { 4214 default: 4215 return Error(E, 4216 "alignment specifier must be 16, 32, 64, 128, or 256 bits"); 4217 case 16: Align = 2; break; 4218 case 32: Align = 4; break; 4219 case 64: Align = 8; break; 4220 case 128: Align = 16; break; 4221 case 256: Align = 32; break; 4222 } 4223 4224 // Now we should have the closing ']' 4225 E = Parser.getTok().getLoc(); 4226 if (Parser.getTok().isNot(AsmToken::RBrac)) 4227 return Error(E, "']' expected"); 4228 Parser.Lex(); // Eat right bracket token. 4229 4230 // Don't worry about range checking the value here. That's handled by 4231 // the is*() predicates. 4232 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 4233 ARM_AM::no_shift, 0, Align, 4234 false, S, E)); 4235 4236 // If there's a pre-indexing writeback marker, '!', just add it as a token 4237 // operand. 4238 if (Parser.getTok().is(AsmToken::Exclaim)) { 4239 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4240 Parser.Lex(); // Eat the '!'. 4241 } 4242 4243 return false; 4244 } 4245 4246 // If we have a '#', it's an immediate offset, else assume it's a register 4247 // offset. Be friendly and also accept a plain integer (without a leading 4248 // hash) for gas compatibility. 4249 if (Parser.getTok().is(AsmToken::Hash) || 4250 Parser.getTok().is(AsmToken::Dollar) || 4251 Parser.getTok().is(AsmToken::Integer)) { 4252 if (Parser.getTok().isNot(AsmToken::Integer)) 4253 Parser.Lex(); // Eat the '#'. 4254 E = Parser.getTok().getLoc(); 4255 4256 bool isNegative = getParser().getTok().is(AsmToken::Minus); 4257 const MCExpr *Offset; 4258 if (getParser().ParseExpression(Offset)) 4259 return true; 4260 4261 // The expression has to be a constant. Memory references with relocations 4262 // don't come through here, as they use the <label> forms of the relevant 4263 // instructions. 4264 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 4265 if (!CE) 4266 return Error (E, "constant expression expected"); 4267 4268 // If the constant was #-0, represent it as INT32_MIN. 4269 int32_t Val = CE->getValue(); 4270 if (isNegative && Val == 0) 4271 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 4272 4273 // Now we should have the closing ']' 4274 E = Parser.getTok().getLoc(); 4275 if (Parser.getTok().isNot(AsmToken::RBrac)) 4276 return Error(E, "']' expected"); 4277 Parser.Lex(); // Eat right bracket token. 4278 4279 // Don't worry about range checking the value here. That's handled by 4280 // the is*() predicates. 4281 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 4282 ARM_AM::no_shift, 0, 0, 4283 false, S, E)); 4284 4285 // If there's a pre-indexing writeback marker, '!', just add it as a token 4286 // operand. 4287 if (Parser.getTok().is(AsmToken::Exclaim)) { 4288 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4289 Parser.Lex(); // Eat the '!'. 4290 } 4291 4292 return false; 4293 } 4294 4295 // The register offset is optionally preceded by a '+' or '-' 4296 bool isNegative = false; 4297 if (Parser.getTok().is(AsmToken::Minus)) { 4298 isNegative = true; 4299 Parser.Lex(); // Eat the '-'. 4300 } else if (Parser.getTok().is(AsmToken::Plus)) { 4301 // Nothing to do. 4302 Parser.Lex(); // Eat the '+'. 4303 } 4304 4305 E = Parser.getTok().getLoc(); 4306 int OffsetRegNum = tryParseRegister(); 4307 if (OffsetRegNum == -1) 4308 return Error(E, "register expected"); 4309 4310 // If there's a shift operator, handle it. 4311 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 4312 unsigned ShiftImm = 0; 4313 if (Parser.getTok().is(AsmToken::Comma)) { 4314 Parser.Lex(); // Eat the ','. 4315 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 4316 return true; 4317 } 4318 4319 // Now we should have the closing ']' 4320 E = Parser.getTok().getLoc(); 4321 if (Parser.getTok().isNot(AsmToken::RBrac)) 4322 return Error(E, "']' expected"); 4323 Parser.Lex(); // Eat right bracket token. 4324 4325 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 4326 ShiftType, ShiftImm, 0, isNegative, 4327 S, E)); 4328 4329 // If there's a pre-indexing writeback marker, '!', just add it as a token 4330 // operand. 4331 if (Parser.getTok().is(AsmToken::Exclaim)) { 4332 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4333 Parser.Lex(); // Eat the '!'. 4334 } 4335 4336 return false; 4337} 4338 4339/// parseMemRegOffsetShift - one of these two: 4340/// ( lsl | lsr | asr | ror ) , # shift_amount 4341/// rrx 4342/// return true if it parses a shift otherwise it returns false. 4343bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 4344 unsigned &Amount) { 4345 SMLoc Loc = Parser.getTok().getLoc(); 4346 const AsmToken &Tok = Parser.getTok(); 4347 if (Tok.isNot(AsmToken::Identifier)) 4348 return true; 4349 StringRef ShiftName = Tok.getString(); 4350 if (ShiftName == "lsl" || ShiftName == "LSL" || 4351 ShiftName == "asl" || ShiftName == "ASL") 4352 St = ARM_AM::lsl; 4353 else if (ShiftName == "lsr" || ShiftName == "LSR") 4354 St = ARM_AM::lsr; 4355 else if (ShiftName == "asr" || ShiftName == "ASR") 4356 St = ARM_AM::asr; 4357 else if (ShiftName == "ror" || ShiftName == "ROR") 4358 St = ARM_AM::ror; 4359 else if (ShiftName == "rrx" || ShiftName == "RRX") 4360 St = ARM_AM::rrx; 4361 else 4362 return Error(Loc, "illegal shift operator"); 4363 Parser.Lex(); // Eat shift type token. 4364 4365 // rrx stands alone. 4366 Amount = 0; 4367 if (St != ARM_AM::rrx) { 4368 Loc = Parser.getTok().getLoc(); 4369 // A '#' and a shift amount. 4370 const AsmToken &HashTok = Parser.getTok(); 4371 if (HashTok.isNot(AsmToken::Hash) && 4372 HashTok.isNot(AsmToken::Dollar)) 4373 return Error(HashTok.getLoc(), "'#' expected"); 4374 Parser.Lex(); // Eat hash token. 4375 4376 const MCExpr *Expr; 4377 if (getParser().ParseExpression(Expr)) 4378 return true; 4379 // Range check the immediate. 4380 // lsl, ror: 0 <= imm <= 31 4381 // lsr, asr: 0 <= imm <= 32 4382 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4383 if (!CE) 4384 return Error(Loc, "shift amount must be an immediate"); 4385 int64_t Imm = CE->getValue(); 4386 if (Imm < 0 || 4387 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 4388 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 4389 return Error(Loc, "immediate shift value out of range"); 4390 Amount = Imm; 4391 } 4392 4393 return false; 4394} 4395 4396/// parseFPImm - A floating point immediate expression operand. 4397ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 4398parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4399 // Anything that can accept a floating point constant as an operand 4400 // needs to go through here, as the regular ParseExpression is 4401 // integer only. 4402 // 4403 // This routine still creates a generic Immediate operand, containing 4404 // a bitcast of the 64-bit floating point value. The various operands 4405 // that accept floats can check whether the value is valid for them 4406 // via the standard is*() predicates. 4407 4408 SMLoc S = Parser.getTok().getLoc(); 4409 4410 if (Parser.getTok().isNot(AsmToken::Hash) && 4411 Parser.getTok().isNot(AsmToken::Dollar)) 4412 return MatchOperand_NoMatch; 4413 4414 // Disambiguate the VMOV forms that can accept an FP immediate. 4415 // vmov.f32 <sreg>, #imm 4416 // vmov.f64 <dreg>, #imm 4417 // vmov.f32 <dreg>, #imm @ vector f32x2 4418 // vmov.f32 <qreg>, #imm @ vector f32x4 4419 // 4420 // There are also the NEON VMOV instructions which expect an 4421 // integer constant. Make sure we don't try to parse an FPImm 4422 // for these: 4423 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 4424 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 4425 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 4426 TyOp->getToken() != ".f64")) 4427 return MatchOperand_NoMatch; 4428 4429 Parser.Lex(); // Eat the '#'. 4430 4431 // Handle negation, as that still comes through as a separate token. 4432 bool isNegative = false; 4433 if (Parser.getTok().is(AsmToken::Minus)) { 4434 isNegative = true; 4435 Parser.Lex(); 4436 } 4437 const AsmToken &Tok = Parser.getTok(); 4438 SMLoc Loc = Tok.getLoc(); 4439 if (Tok.is(AsmToken::Real)) { 4440 APFloat RealVal(APFloat::IEEEsingle, Tok.getString()); 4441 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 4442 // If we had a '-' in front, toggle the sign bit. 4443 IntVal ^= (uint64_t)isNegative << 31; 4444 Parser.Lex(); // Eat the token. 4445 Operands.push_back(ARMOperand::CreateImm( 4446 MCConstantExpr::Create(IntVal, getContext()), 4447 S, Parser.getTok().getLoc())); 4448 return MatchOperand_Success; 4449 } 4450 // Also handle plain integers. Instructions which allow floating point 4451 // immediates also allow a raw encoded 8-bit value. 4452 if (Tok.is(AsmToken::Integer)) { 4453 int64_t Val = Tok.getIntVal(); 4454 Parser.Lex(); // Eat the token. 4455 if (Val > 255 || Val < 0) { 4456 Error(Loc, "encoded floating point value out of range"); 4457 return MatchOperand_ParseFail; 4458 } 4459 double RealVal = ARM_AM::getFPImmFloat(Val); 4460 Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue(); 4461 Operands.push_back(ARMOperand::CreateImm( 4462 MCConstantExpr::Create(Val, getContext()), S, 4463 Parser.getTok().getLoc())); 4464 return MatchOperand_Success; 4465 } 4466 4467 Error(Loc, "invalid floating point immediate"); 4468 return MatchOperand_ParseFail; 4469} 4470 4471/// Parse a arm instruction operand. For now this parses the operand regardless 4472/// of the mnemonic. 4473bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4474 StringRef Mnemonic) { 4475 SMLoc S, E; 4476 4477 // Check if the current operand has a custom associated parser, if so, try to 4478 // custom parse the operand, or fallback to the general approach. 4479 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 4480 if (ResTy == MatchOperand_Success) 4481 return false; 4482 // If there wasn't a custom match, try the generic matcher below. Otherwise, 4483 // there was a match, but an error occurred, in which case, just return that 4484 // the operand parsing failed. 4485 if (ResTy == MatchOperand_ParseFail) 4486 return true; 4487 4488 switch (getLexer().getKind()) { 4489 default: 4490 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 4491 return true; 4492 case AsmToken::Identifier: { 4493 if (!tryParseRegisterWithWriteBack(Operands)) 4494 return false; 4495 int Res = tryParseShiftRegister(Operands); 4496 if (Res == 0) // success 4497 return false; 4498 else if (Res == -1) // irrecoverable error 4499 return true; 4500 // If this is VMRS, check for the apsr_nzcv operand. 4501 if (Mnemonic == "vmrs" && 4502 Parser.getTok().getString().equals_lower("apsr_nzcv")) { 4503 S = Parser.getTok().getLoc(); 4504 Parser.Lex(); 4505 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); 4506 return false; 4507 } 4508 4509 // Fall though for the Identifier case that is not a register or a 4510 // special name. 4511 } 4512 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 4513 case AsmToken::Integer: // things like 1f and 2b as a branch targets 4514 case AsmToken::String: // quoted label names. 4515 case AsmToken::Dot: { // . as a branch target 4516 // This was not a register so parse other operands that start with an 4517 // identifier (like labels) as expressions and create them as immediates. 4518 const MCExpr *IdVal; 4519 S = Parser.getTok().getLoc(); 4520 if (getParser().ParseExpression(IdVal)) 4521 return true; 4522 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4523 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 4524 return false; 4525 } 4526 case AsmToken::LBrac: 4527 return parseMemory(Operands); 4528 case AsmToken::LCurly: 4529 return parseRegisterList(Operands); 4530 case AsmToken::Dollar: 4531 case AsmToken::Hash: { 4532 // #42 -> immediate. 4533 S = Parser.getTok().getLoc(); 4534 Parser.Lex(); 4535 4536 if (Parser.getTok().isNot(AsmToken::Colon)) { 4537 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4538 const MCExpr *ImmVal; 4539 if (getParser().ParseExpression(ImmVal)) 4540 return true; 4541 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 4542 if (CE) { 4543 int32_t Val = CE->getValue(); 4544 if (isNegative && Val == 0) 4545 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 4546 } 4547 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4548 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 4549 return false; 4550 } 4551 // w/ a ':' after the '#', it's just like a plain ':'. 4552 // FALLTHROUGH 4553 } 4554 case AsmToken::Colon: { 4555 // ":lower16:" and ":upper16:" expression prefixes 4556 // FIXME: Check it's an expression prefix, 4557 // e.g. (FOO - :lower16:BAR) isn't legal. 4558 ARMMCExpr::VariantKind RefKind; 4559 if (parsePrefix(RefKind)) 4560 return true; 4561 4562 const MCExpr *SubExprVal; 4563 if (getParser().ParseExpression(SubExprVal)) 4564 return true; 4565 4566 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 4567 getContext()); 4568 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4569 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 4570 return false; 4571 } 4572 } 4573} 4574 4575// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 4576// :lower16: and :upper16:. 4577bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 4578 RefKind = ARMMCExpr::VK_ARM_None; 4579 4580 // :lower16: and :upper16: modifiers 4581 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 4582 Parser.Lex(); // Eat ':' 4583 4584 if (getLexer().isNot(AsmToken::Identifier)) { 4585 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 4586 return true; 4587 } 4588 4589 StringRef IDVal = Parser.getTok().getIdentifier(); 4590 if (IDVal == "lower16") { 4591 RefKind = ARMMCExpr::VK_ARM_LO16; 4592 } else if (IDVal == "upper16") { 4593 RefKind = ARMMCExpr::VK_ARM_HI16; 4594 } else { 4595 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 4596 return true; 4597 } 4598 Parser.Lex(); 4599 4600 if (getLexer().isNot(AsmToken::Colon)) { 4601 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 4602 return true; 4603 } 4604 Parser.Lex(); // Eat the last ':' 4605 return false; 4606} 4607 4608/// \brief Given a mnemonic, split out possible predication code and carry 4609/// setting letters to form a canonical mnemonic and flags. 4610// 4611// FIXME: Would be nice to autogen this. 4612// FIXME: This is a bit of a maze of special cases. 4613StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 4614 unsigned &PredicationCode, 4615 bool &CarrySetting, 4616 unsigned &ProcessorIMod, 4617 StringRef &ITMask) { 4618 PredicationCode = ARMCC::AL; 4619 CarrySetting = false; 4620 ProcessorIMod = 0; 4621 4622 // Ignore some mnemonics we know aren't predicated forms. 4623 // 4624 // FIXME: Would be nice to autogen this. 4625 if ((Mnemonic == "movs" && isThumb()) || 4626 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 4627 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 4628 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 4629 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 4630 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 4631 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 4632 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || 4633 Mnemonic == "fmuls") 4634 return Mnemonic; 4635 4636 // First, split out any predication code. Ignore mnemonics we know aren't 4637 // predicated but do have a carry-set and so weren't caught above. 4638 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 4639 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 4640 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 4641 Mnemonic != "sbcs" && Mnemonic != "rscs") { 4642 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 4643 .Case("eq", ARMCC::EQ) 4644 .Case("ne", ARMCC::NE) 4645 .Case("hs", ARMCC::HS) 4646 .Case("cs", ARMCC::HS) 4647 .Case("lo", ARMCC::LO) 4648 .Case("cc", ARMCC::LO) 4649 .Case("mi", ARMCC::MI) 4650 .Case("pl", ARMCC::PL) 4651 .Case("vs", ARMCC::VS) 4652 .Case("vc", ARMCC::VC) 4653 .Case("hi", ARMCC::HI) 4654 .Case("ls", ARMCC::LS) 4655 .Case("ge", ARMCC::GE) 4656 .Case("lt", ARMCC::LT) 4657 .Case("gt", ARMCC::GT) 4658 .Case("le", ARMCC::LE) 4659 .Case("al", ARMCC::AL) 4660 .Default(~0U); 4661 if (CC != ~0U) { 4662 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 4663 PredicationCode = CC; 4664 } 4665 } 4666 4667 // Next, determine if we have a carry setting bit. We explicitly ignore all 4668 // the instructions we know end in 's'. 4669 if (Mnemonic.endswith("s") && 4670 !(Mnemonic == "cps" || Mnemonic == "mls" || 4671 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 4672 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 4673 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 4674 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || 4675 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || 4676 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" || 4677 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || 4678 Mnemonic == "vfms" || Mnemonic == "vfnms" || 4679 (Mnemonic == "movs" && isThumb()))) { 4680 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4681 CarrySetting = true; 4682 } 4683 4684 // The "cps" instruction can have a interrupt mode operand which is glued into 4685 // the mnemonic. Check if this is the case, split it and parse the imod op 4686 if (Mnemonic.startswith("cps")) { 4687 // Split out any imod code. 4688 unsigned IMod = 4689 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4690 .Case("ie", ARM_PROC::IE) 4691 .Case("id", ARM_PROC::ID) 4692 .Default(~0U); 4693 if (IMod != ~0U) { 4694 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4695 ProcessorIMod = IMod; 4696 } 4697 } 4698 4699 // The "it" instruction has the condition mask on the end of the mnemonic. 4700 if (Mnemonic.startswith("it")) { 4701 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4702 Mnemonic = Mnemonic.slice(0, 2); 4703 } 4704 4705 return Mnemonic; 4706} 4707 4708/// \brief Given a canonical mnemonic, determine if the instruction ever allows 4709/// inclusion of carry set or predication code operands. 4710// 4711// FIXME: It would be nice to autogen this. 4712void ARMAsmParser:: 4713getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4714 bool &CanAcceptPredicationCode) { 4715 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4716 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4717 Mnemonic == "add" || Mnemonic == "adc" || 4718 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4719 Mnemonic == "orr" || Mnemonic == "mvn" || 4720 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4721 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4722 Mnemonic == "vfm" || Mnemonic == "vfnm" || 4723 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4724 Mnemonic == "mla" || Mnemonic == "smlal" || 4725 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4726 CanAcceptCarrySet = true; 4727 } else 4728 CanAcceptCarrySet = false; 4729 4730 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4731 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4732 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4733 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4734 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4735 (Mnemonic == "clrex" && !isThumb()) || 4736 (Mnemonic == "nop" && isThumbOne()) || 4737 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4738 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4739 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4740 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4741 !isThumb()) || 4742 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4743 CanAcceptPredicationCode = false; 4744 } else 4745 CanAcceptPredicationCode = true; 4746 4747 if (isThumb()) { 4748 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4749 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4750 CanAcceptPredicationCode = false; 4751 } 4752} 4753 4754bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4755 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4756 // FIXME: This is all horribly hacky. We really need a better way to deal 4757 // with optional operands like this in the matcher table. 4758 4759 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4760 // another does not. Specifically, the MOVW instruction does not. So we 4761 // special case it here and remove the defaulted (non-setting) cc_out 4762 // operand if that's the instruction we're trying to match. 4763 // 4764 // We do this as post-processing of the explicit operands rather than just 4765 // conditionally adding the cc_out in the first place because we need 4766 // to check the type of the parsed immediate operand. 4767 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4768 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4769 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4770 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4771 return true; 4772 4773 // Register-register 'add' for thumb does not have a cc_out operand 4774 // when there are only two register operands. 4775 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4776 static_cast<ARMOperand*>(Operands[3])->isReg() && 4777 static_cast<ARMOperand*>(Operands[4])->isReg() && 4778 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4779 return true; 4780 // Register-register 'add' for thumb does not have a cc_out operand 4781 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4782 // have to check the immediate range here since Thumb2 has a variant 4783 // that can handle a different range and has a cc_out operand. 4784 if (((isThumb() && Mnemonic == "add") || 4785 (isThumbTwo() && Mnemonic == "sub")) && 4786 Operands.size() == 6 && 4787 static_cast<ARMOperand*>(Operands[3])->isReg() && 4788 static_cast<ARMOperand*>(Operands[4])->isReg() && 4789 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4790 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4791 ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) || 4792 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4793 return true; 4794 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4795 // imm0_4095 variant. That's the least-preferred variant when 4796 // selecting via the generic "add" mnemonic, so to know that we 4797 // should remove the cc_out operand, we have to explicitly check that 4798 // it's not one of the other variants. Ugh. 4799 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4800 Operands.size() == 6 && 4801 static_cast<ARMOperand*>(Operands[3])->isReg() && 4802 static_cast<ARMOperand*>(Operands[4])->isReg() && 4803 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4804 // Nest conditions rather than one big 'if' statement for readability. 4805 // 4806 // If either register is a high reg, it's either one of the SP 4807 // variants (handled above) or a 32-bit encoding, so we just 4808 // check against T3. If the second register is the PC, this is an 4809 // alternate form of ADR, which uses encoding T4, so check for that too. 4810 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4811 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4812 static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC && 4813 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4814 return false; 4815 // If both registers are low, we're in an IT block, and the immediate is 4816 // in range, we should use encoding T1 instead, which has a cc_out. 4817 if (inITBlock() && 4818 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4819 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4820 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4821 return false; 4822 4823 // Otherwise, we use encoding T4, which does not have a cc_out 4824 // operand. 4825 return true; 4826 } 4827 4828 // The thumb2 multiply instruction doesn't have a CCOut register, so 4829 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4830 // use the 16-bit encoding or not. 4831 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4832 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4833 static_cast<ARMOperand*>(Operands[3])->isReg() && 4834 static_cast<ARMOperand*>(Operands[4])->isReg() && 4835 static_cast<ARMOperand*>(Operands[5])->isReg() && 4836 // If the registers aren't low regs, the destination reg isn't the 4837 // same as one of the source regs, or the cc_out operand is zero 4838 // outside of an IT block, we have to use the 32-bit encoding, so 4839 // remove the cc_out operand. 4840 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4841 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4842 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4843 !inITBlock() || 4844 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4845 static_cast<ARMOperand*>(Operands[5])->getReg() && 4846 static_cast<ARMOperand*>(Operands[3])->getReg() != 4847 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4848 return true; 4849 4850 // Also check the 'mul' syntax variant that doesn't specify an explicit 4851 // destination register. 4852 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4853 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4854 static_cast<ARMOperand*>(Operands[3])->isReg() && 4855 static_cast<ARMOperand*>(Operands[4])->isReg() && 4856 // If the registers aren't low regs or the cc_out operand is zero 4857 // outside of an IT block, we have to use the 32-bit encoding, so 4858 // remove the cc_out operand. 4859 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4860 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4861 !inITBlock())) 4862 return true; 4863 4864 4865 4866 // Register-register 'add/sub' for thumb does not have a cc_out operand 4867 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4868 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4869 // right, this will result in better diagnostics (which operand is off) 4870 // anyway. 4871 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4872 (Operands.size() == 5 || Operands.size() == 6) && 4873 static_cast<ARMOperand*>(Operands[3])->isReg() && 4874 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4875 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4876 (static_cast<ARMOperand*>(Operands[4])->isImm() || 4877 (Operands.size() == 6 && 4878 static_cast<ARMOperand*>(Operands[5])->isImm()))) 4879 return true; 4880 4881 return false; 4882} 4883 4884static bool isDataTypeToken(StringRef Tok) { 4885 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4886 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4887 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4888 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4889 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4890 Tok == ".f" || Tok == ".d"; 4891} 4892 4893// FIXME: This bit should probably be handled via an explicit match class 4894// in the .td files that matches the suffix instead of having it be 4895// a literal string token the way it is now. 4896static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4897 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4898} 4899 4900static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features); 4901/// Parse an arm instruction mnemonic followed by its operands. 4902bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4903 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4904 // Apply mnemonic aliases before doing anything else, as the destination 4905 // mnemnonic may include suffices and we want to handle them normally. 4906 // The generic tblgen'erated code does this later, at the start of 4907 // MatchInstructionImpl(), but that's too late for aliases that include 4908 // any sort of suffix. 4909 unsigned AvailableFeatures = getAvailableFeatures(); 4910 applyMnemonicAliases(Name, AvailableFeatures); 4911 4912 // First check for the ARM-specific .req directive. 4913 if (Parser.getTok().is(AsmToken::Identifier) && 4914 Parser.getTok().getIdentifier() == ".req") { 4915 parseDirectiveReq(Name, NameLoc); 4916 // We always return 'error' for this, as we're done with this 4917 // statement and don't need to match the 'instruction." 4918 return true; 4919 } 4920 4921 // Create the leading tokens for the mnemonic, split by '.' characters. 4922 size_t Start = 0, Next = Name.find('.'); 4923 StringRef Mnemonic = Name.slice(Start, Next); 4924 4925 // Split out the predication code and carry setting flag from the mnemonic. 4926 unsigned PredicationCode; 4927 unsigned ProcessorIMod; 4928 bool CarrySetting; 4929 StringRef ITMask; 4930 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4931 ProcessorIMod, ITMask); 4932 4933 // In Thumb1, only the branch (B) instruction can be predicated. 4934 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4935 Parser.EatToEndOfStatement(); 4936 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4937 } 4938 4939 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4940 4941 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4942 // is the mask as it will be for the IT encoding if the conditional 4943 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4944 // where the conditional bit0 is zero, the instruction post-processing 4945 // will adjust the mask accordingly. 4946 if (Mnemonic == "it") { 4947 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4948 if (ITMask.size() > 3) { 4949 Parser.EatToEndOfStatement(); 4950 return Error(Loc, "too many conditions on IT instruction"); 4951 } 4952 unsigned Mask = 8; 4953 for (unsigned i = ITMask.size(); i != 0; --i) { 4954 char pos = ITMask[i - 1]; 4955 if (pos != 't' && pos != 'e') { 4956 Parser.EatToEndOfStatement(); 4957 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4958 } 4959 Mask >>= 1; 4960 if (ITMask[i - 1] == 't') 4961 Mask |= 8; 4962 } 4963 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4964 } 4965 4966 // FIXME: This is all a pretty gross hack. We should automatically handle 4967 // optional operands like this via tblgen. 4968 4969 // Next, add the CCOut and ConditionCode operands, if needed. 4970 // 4971 // For mnemonics which can ever incorporate a carry setting bit or predication 4972 // code, our matching model involves us always generating CCOut and 4973 // ConditionCode operands to match the mnemonic "as written" and then we let 4974 // the matcher deal with finding the right instruction or generating an 4975 // appropriate error. 4976 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4977 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4978 4979 // If we had a carry-set on an instruction that can't do that, issue an 4980 // error. 4981 if (!CanAcceptCarrySet && CarrySetting) { 4982 Parser.EatToEndOfStatement(); 4983 return Error(NameLoc, "instruction '" + Mnemonic + 4984 "' can not set flags, but 's' suffix specified"); 4985 } 4986 // If we had a predication code on an instruction that can't do that, issue an 4987 // error. 4988 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4989 Parser.EatToEndOfStatement(); 4990 return Error(NameLoc, "instruction '" + Mnemonic + 4991 "' is not predicable, but condition code specified"); 4992 } 4993 4994 // Add the carry setting operand, if necessary. 4995 if (CanAcceptCarrySet) { 4996 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4997 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4998 Loc)); 4999 } 5000 5001 // Add the predication code operand, if necessary. 5002 if (CanAcceptPredicationCode) { 5003 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 5004 CarrySetting); 5005 Operands.push_back(ARMOperand::CreateCondCode( 5006 ARMCC::CondCodes(PredicationCode), Loc)); 5007 } 5008 5009 // Add the processor imod operand, if necessary. 5010 if (ProcessorIMod) { 5011 Operands.push_back(ARMOperand::CreateImm( 5012 MCConstantExpr::Create(ProcessorIMod, getContext()), 5013 NameLoc, NameLoc)); 5014 } 5015 5016 // Add the remaining tokens in the mnemonic. 5017 while (Next != StringRef::npos) { 5018 Start = Next; 5019 Next = Name.find('.', Start + 1); 5020 StringRef ExtraToken = Name.slice(Start, Next); 5021 5022 // Some NEON instructions have an optional datatype suffix that is 5023 // completely ignored. Check for that. 5024 if (isDataTypeToken(ExtraToken) && 5025 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 5026 continue; 5027 5028 if (ExtraToken != ".n") { 5029 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 5030 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 5031 } 5032 } 5033 5034 // Read the remaining operands. 5035 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5036 // Read the first operand. 5037 if (parseOperand(Operands, Mnemonic)) { 5038 Parser.EatToEndOfStatement(); 5039 return true; 5040 } 5041 5042 while (getLexer().is(AsmToken::Comma)) { 5043 Parser.Lex(); // Eat the comma. 5044 5045 // Parse and remember the operand. 5046 if (parseOperand(Operands, Mnemonic)) { 5047 Parser.EatToEndOfStatement(); 5048 return true; 5049 } 5050 } 5051 } 5052 5053 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5054 SMLoc Loc = getLexer().getLoc(); 5055 Parser.EatToEndOfStatement(); 5056 return Error(Loc, "unexpected token in argument list"); 5057 } 5058 5059 Parser.Lex(); // Consume the EndOfStatement 5060 5061 // Some instructions, mostly Thumb, have forms for the same mnemonic that 5062 // do and don't have a cc_out optional-def operand. With some spot-checks 5063 // of the operand list, we can figure out which variant we're trying to 5064 // parse and adjust accordingly before actually matching. We shouldn't ever 5065 // try to remove a cc_out operand that was explicitly set on the the 5066 // mnemonic, of course (CarrySetting == true). Reason number #317 the 5067 // table driven matcher doesn't fit well with the ARM instruction set. 5068 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 5069 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 5070 Operands.erase(Operands.begin() + 1); 5071 delete Op; 5072 } 5073 5074 // ARM mode 'blx' need special handling, as the register operand version 5075 // is predicable, but the label operand version is not. So, we can't rely 5076 // on the Mnemonic based checking to correctly figure out when to put 5077 // a k_CondCode operand in the list. If we're trying to match the label 5078 // version, remove the k_CondCode operand here. 5079 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 5080 static_cast<ARMOperand*>(Operands[2])->isImm()) { 5081 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 5082 Operands.erase(Operands.begin() + 1); 5083 delete Op; 5084 } 5085 5086 // The vector-compare-to-zero instructions have a literal token "#0" at 5087 // the end that comes to here as an immediate operand. Convert it to a 5088 // token to play nicely with the matcher. 5089 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 5090 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 5091 static_cast<ARMOperand*>(Operands[5])->isImm()) { 5092 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 5093 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 5094 if (CE && CE->getValue() == 0) { 5095 Operands.erase(Operands.begin() + 5); 5096 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 5097 delete Op; 5098 } 5099 } 5100 // VCMP{E} does the same thing, but with a different operand count. 5101 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 5102 static_cast<ARMOperand*>(Operands[4])->isImm()) { 5103 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 5104 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 5105 if (CE && CE->getValue() == 0) { 5106 Operands.erase(Operands.begin() + 4); 5107 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 5108 delete Op; 5109 } 5110 } 5111 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 5112 // end. Convert it to a token here. Take care not to convert those 5113 // that should hit the Thumb2 encoding. 5114 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 5115 static_cast<ARMOperand*>(Operands[3])->isReg() && 5116 static_cast<ARMOperand*>(Operands[4])->isReg() && 5117 static_cast<ARMOperand*>(Operands[5])->isImm()) { 5118 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 5119 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 5120 if (CE && CE->getValue() == 0 && 5121 (isThumbOne() || 5122 // The cc_out operand matches the IT block. 5123 ((inITBlock() != CarrySetting) && 5124 // Neither register operand is a high register. 5125 (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 5126 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){ 5127 Operands.erase(Operands.begin() + 5); 5128 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 5129 delete Op; 5130 } 5131 } 5132 5133 return false; 5134} 5135 5136// Validate context-sensitive operand constraints. 5137 5138// return 'true' if register list contains non-low GPR registers, 5139// 'false' otherwise. If Reg is in the register list or is HiReg, set 5140// 'containsReg' to true. 5141static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 5142 unsigned HiReg, bool &containsReg) { 5143 containsReg = false; 5144 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 5145 unsigned OpReg = Inst.getOperand(i).getReg(); 5146 if (OpReg == Reg) 5147 containsReg = true; 5148 // Anything other than a low register isn't legal here. 5149 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 5150 return true; 5151 } 5152 return false; 5153} 5154 5155// Check if the specified regisgter is in the register list of the inst, 5156// starting at the indicated operand number. 5157static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 5158 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 5159 unsigned OpReg = Inst.getOperand(i).getReg(); 5160 if (OpReg == Reg) 5161 return true; 5162 } 5163 return false; 5164} 5165 5166// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 5167// the ARMInsts array) instead. Getting that here requires awkward 5168// API changes, though. Better way? 5169namespace llvm { 5170extern const MCInstrDesc ARMInsts[]; 5171} 5172static const MCInstrDesc &getInstDesc(unsigned Opcode) { 5173 return ARMInsts[Opcode]; 5174} 5175 5176// FIXME: We would really like to be able to tablegen'erate this. 5177bool ARMAsmParser:: 5178validateInstruction(MCInst &Inst, 5179 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5180 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 5181 SMLoc Loc = Operands[0]->getStartLoc(); 5182 // Check the IT block state first. 5183 // NOTE: BKPT instruction has the interesting property of being 5184 // allowed in IT blocks, but not being predicable. It just always 5185 // executes. 5186 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT && 5187 Inst.getOpcode() != ARM::BKPT) { 5188 unsigned bit = 1; 5189 if (ITState.FirstCond) 5190 ITState.FirstCond = false; 5191 else 5192 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 5193 // The instruction must be predicable. 5194 if (!MCID.isPredicable()) 5195 return Error(Loc, "instructions in IT block must be predicable"); 5196 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 5197 unsigned ITCond = bit ? ITState.Cond : 5198 ARMCC::getOppositeCondition(ITState.Cond); 5199 if (Cond != ITCond) { 5200 // Find the condition code Operand to get its SMLoc information. 5201 SMLoc CondLoc; 5202 for (unsigned i = 1; i < Operands.size(); ++i) 5203 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 5204 CondLoc = Operands[i]->getStartLoc(); 5205 return Error(CondLoc, "incorrect condition in IT block; got '" + 5206 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 5207 "', but expected '" + 5208 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 5209 } 5210 // Check for non-'al' condition codes outside of the IT block. 5211 } else if (isThumbTwo() && MCID.isPredicable() && 5212 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 5213 ARMCC::AL && Inst.getOpcode() != ARM::tB && 5214 Inst.getOpcode() != ARM::t2B) 5215 return Error(Loc, "predicated instructions must be in IT block"); 5216 5217 switch (Inst.getOpcode()) { 5218 case ARM::LDRD: 5219 case ARM::LDRD_PRE: 5220 case ARM::LDRD_POST: 5221 case ARM::LDREXD: { 5222 // Rt2 must be Rt + 1. 5223 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 5224 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 5225 if (Rt2 != Rt + 1) 5226 return Error(Operands[3]->getStartLoc(), 5227 "destination operands must be sequential"); 5228 return false; 5229 } 5230 case ARM::STRD: { 5231 // Rt2 must be Rt + 1. 5232 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 5233 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 5234 if (Rt2 != Rt + 1) 5235 return Error(Operands[3]->getStartLoc(), 5236 "source operands must be sequential"); 5237 return false; 5238 } 5239 case ARM::STRD_PRE: 5240 case ARM::STRD_POST: 5241 case ARM::STREXD: { 5242 // Rt2 must be Rt + 1. 5243 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 5244 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 5245 if (Rt2 != Rt + 1) 5246 return Error(Operands[3]->getStartLoc(), 5247 "source operands must be sequential"); 5248 return false; 5249 } 5250 case ARM::SBFX: 5251 case ARM::UBFX: { 5252 // width must be in range [1, 32-lsb] 5253 unsigned lsb = Inst.getOperand(2).getImm(); 5254 unsigned widthm1 = Inst.getOperand(3).getImm(); 5255 if (widthm1 >= 32 - lsb) 5256 return Error(Operands[5]->getStartLoc(), 5257 "bitfield width must be in range [1,32-lsb]"); 5258 return false; 5259 } 5260 case ARM::tLDMIA: { 5261 // If we're parsing Thumb2, the .w variant is available and handles 5262 // most cases that are normally illegal for a Thumb1 LDM 5263 // instruction. We'll make the transformation in processInstruction() 5264 // if necessary. 5265 // 5266 // Thumb LDM instructions are writeback iff the base register is not 5267 // in the register list. 5268 unsigned Rn = Inst.getOperand(0).getReg(); 5269 bool hasWritebackToken = 5270 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5271 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 5272 bool listContainsBase; 5273 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 5274 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 5275 "registers must be in range r0-r7"); 5276 // If we should have writeback, then there should be a '!' token. 5277 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 5278 return Error(Operands[2]->getStartLoc(), 5279 "writeback operator '!' expected"); 5280 // If we should not have writeback, there must not be a '!'. This is 5281 // true even for the 32-bit wide encodings. 5282 if (listContainsBase && hasWritebackToken) 5283 return Error(Operands[3]->getStartLoc(), 5284 "writeback operator '!' not allowed when base register " 5285 "in register list"); 5286 5287 break; 5288 } 5289 case ARM::t2LDMIA_UPD: { 5290 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 5291 return Error(Operands[4]->getStartLoc(), 5292 "writeback operator '!' not allowed when base register " 5293 "in register list"); 5294 break; 5295 } 5296 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 5297 // so only issue a diagnostic for thumb1. The instructions will be 5298 // switched to the t2 encodings in processInstruction() if necessary. 5299 case ARM::tPOP: { 5300 bool listContainsBase; 5301 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 5302 !isThumbTwo()) 5303 return Error(Operands[2]->getStartLoc(), 5304 "registers must be in range r0-r7 or pc"); 5305 break; 5306 } 5307 case ARM::tPUSH: { 5308 bool listContainsBase; 5309 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 5310 !isThumbTwo()) 5311 return Error(Operands[2]->getStartLoc(), 5312 "registers must be in range r0-r7 or lr"); 5313 break; 5314 } 5315 case ARM::tSTMIA_UPD: { 5316 bool listContainsBase; 5317 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 5318 return Error(Operands[4]->getStartLoc(), 5319 "registers must be in range r0-r7"); 5320 break; 5321 } 5322 case ARM::tADDrSP: { 5323 // If the non-SP source operand and the destination operand are not the 5324 // same, we need thumb2 (for the wide encoding), or we have an error. 5325 if (!isThumbTwo() && 5326 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { 5327 return Error(Operands[4]->getStartLoc(), 5328 "source register must be the same as destination"); 5329 } 5330 break; 5331 } 5332 } 5333 5334 return false; 5335} 5336 5337static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) { 5338 switch(Opc) { 5339 default: llvm_unreachable("unexpected opcode!"); 5340 // VST1LN 5341 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 5342 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 5343 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 5344 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 5345 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 5346 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 5347 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8; 5348 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16; 5349 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32; 5350 5351 // VST2LN 5352 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 5353 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 5354 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 5355 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 5356 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 5357 5358 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 5359 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 5360 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 5361 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 5362 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 5363 5364 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8; 5365 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16; 5366 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32; 5367 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16; 5368 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32; 5369 5370 // VST3LN 5371 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 5372 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 5373 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 5374 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD; 5375 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 5376 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 5377 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 5378 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 5379 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD; 5380 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 5381 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8; 5382 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16; 5383 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32; 5384 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16; 5385 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32; 5386 5387 // VST3 5388 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 5389 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 5390 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 5391 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 5392 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 5393 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 5394 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 5395 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 5396 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 5397 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 5398 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 5399 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 5400 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8; 5401 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16; 5402 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32; 5403 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8; 5404 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16; 5405 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32; 5406 5407 // VST4LN 5408 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 5409 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 5410 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 5411 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD; 5412 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 5413 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 5414 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 5415 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 5416 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD; 5417 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 5418 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8; 5419 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16; 5420 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32; 5421 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16; 5422 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32; 5423 5424 // VST4 5425 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 5426 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 5427 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 5428 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 5429 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 5430 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 5431 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 5432 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 5433 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 5434 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 5435 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 5436 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 5437 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8; 5438 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16; 5439 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32; 5440 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8; 5441 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16; 5442 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32; 5443 } 5444} 5445 5446static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) { 5447 switch(Opc) { 5448 default: llvm_unreachable("unexpected opcode!"); 5449 // VLD1LN 5450 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 5451 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 5452 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 5453 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 5454 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 5455 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 5456 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8; 5457 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16; 5458 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32; 5459 5460 // VLD2LN 5461 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 5462 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 5463 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 5464 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD; 5465 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 5466 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 5467 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 5468 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 5469 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD; 5470 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 5471 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8; 5472 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16; 5473 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32; 5474 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16; 5475 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32; 5476 5477 // VLD3DUP 5478 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 5479 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 5480 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 5481 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD; 5482 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD; 5483 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 5484 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 5485 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 5486 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 5487 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD; 5488 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; 5489 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 5490 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8; 5491 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16; 5492 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32; 5493 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8; 5494 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16; 5495 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32; 5496 5497 // VLD3LN 5498 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 5499 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 5500 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 5501 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD; 5502 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 5503 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 5504 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 5505 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 5506 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD; 5507 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 5508 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8; 5509 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16; 5510 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32; 5511 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16; 5512 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32; 5513 5514 // VLD3 5515 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 5516 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 5517 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 5518 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 5519 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 5520 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 5521 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 5522 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 5523 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 5524 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 5525 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 5526 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 5527 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8; 5528 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16; 5529 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32; 5530 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8; 5531 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16; 5532 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32; 5533 5534 // VLD4LN 5535 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 5536 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 5537 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 5538 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD; 5539 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 5540 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 5541 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 5542 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 5543 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; 5544 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 5545 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8; 5546 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16; 5547 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32; 5548 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16; 5549 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32; 5550 5551 // VLD4DUP 5552 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 5553 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 5554 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 5555 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD; 5556 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD; 5557 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 5558 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 5559 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 5560 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 5561 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD; 5562 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD; 5563 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 5564 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8; 5565 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16; 5566 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32; 5567 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8; 5568 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16; 5569 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32; 5570 5571 // VLD4 5572 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 5573 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 5574 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 5575 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 5576 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 5577 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 5578 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 5579 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 5580 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 5581 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 5582 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 5583 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 5584 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8; 5585 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16; 5586 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32; 5587 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8; 5588 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16; 5589 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32; 5590 } 5591} 5592 5593bool ARMAsmParser:: 5594processInstruction(MCInst &Inst, 5595 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5596 switch (Inst.getOpcode()) { 5597 // Aliases for alternate PC+imm syntax of LDR instructions. 5598 case ARM::t2LDRpcrel: 5599 Inst.setOpcode(ARM::t2LDRpci); 5600 return true; 5601 case ARM::t2LDRBpcrel: 5602 Inst.setOpcode(ARM::t2LDRBpci); 5603 return true; 5604 case ARM::t2LDRHpcrel: 5605 Inst.setOpcode(ARM::t2LDRHpci); 5606 return true; 5607 case ARM::t2LDRSBpcrel: 5608 Inst.setOpcode(ARM::t2LDRSBpci); 5609 return true; 5610 case ARM::t2LDRSHpcrel: 5611 Inst.setOpcode(ARM::t2LDRSHpci); 5612 return true; 5613 // Handle NEON VST complex aliases. 5614 case ARM::VST1LNdWB_register_Asm_8: 5615 case ARM::VST1LNdWB_register_Asm_16: 5616 case ARM::VST1LNdWB_register_Asm_32: { 5617 MCInst TmpInst; 5618 // Shuffle the operands around so the lane index operand is in the 5619 // right place. 5620 unsigned Spacing; 5621 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5622 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5623 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5624 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5625 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5626 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5627 TmpInst.addOperand(Inst.getOperand(1)); // lane 5628 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5629 TmpInst.addOperand(Inst.getOperand(6)); 5630 Inst = TmpInst; 5631 return true; 5632 } 5633 5634 case ARM::VST2LNdWB_register_Asm_8: 5635 case ARM::VST2LNdWB_register_Asm_16: 5636 case ARM::VST2LNdWB_register_Asm_32: 5637 case ARM::VST2LNqWB_register_Asm_16: 5638 case ARM::VST2LNqWB_register_Asm_32: { 5639 MCInst TmpInst; 5640 // Shuffle the operands around so the lane index operand is in the 5641 // right place. 5642 unsigned Spacing; 5643 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5644 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5645 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5646 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5647 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5648 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5649 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5650 Spacing)); 5651 TmpInst.addOperand(Inst.getOperand(1)); // lane 5652 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5653 TmpInst.addOperand(Inst.getOperand(6)); 5654 Inst = TmpInst; 5655 return true; 5656 } 5657 5658 case ARM::VST3LNdWB_register_Asm_8: 5659 case ARM::VST3LNdWB_register_Asm_16: 5660 case ARM::VST3LNdWB_register_Asm_32: 5661 case ARM::VST3LNqWB_register_Asm_16: 5662 case ARM::VST3LNqWB_register_Asm_32: { 5663 MCInst TmpInst; 5664 // Shuffle the operands around so the lane index operand is in the 5665 // right place. 5666 unsigned Spacing; 5667 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5668 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5669 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5670 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5671 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5672 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5673 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5674 Spacing)); 5675 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5676 Spacing * 2)); 5677 TmpInst.addOperand(Inst.getOperand(1)); // lane 5678 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5679 TmpInst.addOperand(Inst.getOperand(6)); 5680 Inst = TmpInst; 5681 return true; 5682 } 5683 5684 case ARM::VST4LNdWB_register_Asm_8: 5685 case ARM::VST4LNdWB_register_Asm_16: 5686 case ARM::VST4LNdWB_register_Asm_32: 5687 case ARM::VST4LNqWB_register_Asm_16: 5688 case ARM::VST4LNqWB_register_Asm_32: { 5689 MCInst TmpInst; 5690 // Shuffle the operands around so the lane index operand is in the 5691 // right place. 5692 unsigned Spacing; 5693 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5694 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5695 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5696 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5697 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5698 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5699 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5700 Spacing)); 5701 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5702 Spacing * 2)); 5703 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5704 Spacing * 3)); 5705 TmpInst.addOperand(Inst.getOperand(1)); // lane 5706 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5707 TmpInst.addOperand(Inst.getOperand(6)); 5708 Inst = TmpInst; 5709 return true; 5710 } 5711 5712 case ARM::VST1LNdWB_fixed_Asm_8: 5713 case ARM::VST1LNdWB_fixed_Asm_16: 5714 case ARM::VST1LNdWB_fixed_Asm_32: { 5715 MCInst TmpInst; 5716 // Shuffle the operands around so the lane index operand is in the 5717 // right place. 5718 unsigned Spacing; 5719 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5720 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5721 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5722 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5723 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5724 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5725 TmpInst.addOperand(Inst.getOperand(1)); // lane 5726 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5727 TmpInst.addOperand(Inst.getOperand(5)); 5728 Inst = TmpInst; 5729 return true; 5730 } 5731 5732 case ARM::VST2LNdWB_fixed_Asm_8: 5733 case ARM::VST2LNdWB_fixed_Asm_16: 5734 case ARM::VST2LNdWB_fixed_Asm_32: 5735 case ARM::VST2LNqWB_fixed_Asm_16: 5736 case ARM::VST2LNqWB_fixed_Asm_32: { 5737 MCInst TmpInst; 5738 // Shuffle the operands around so the lane index operand is in the 5739 // right place. 5740 unsigned Spacing; 5741 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5742 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5743 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5744 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5745 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5746 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5747 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5748 Spacing)); 5749 TmpInst.addOperand(Inst.getOperand(1)); // lane 5750 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5751 TmpInst.addOperand(Inst.getOperand(5)); 5752 Inst = TmpInst; 5753 return true; 5754 } 5755 5756 case ARM::VST3LNdWB_fixed_Asm_8: 5757 case ARM::VST3LNdWB_fixed_Asm_16: 5758 case ARM::VST3LNdWB_fixed_Asm_32: 5759 case ARM::VST3LNqWB_fixed_Asm_16: 5760 case ARM::VST3LNqWB_fixed_Asm_32: { 5761 MCInst TmpInst; 5762 // Shuffle the operands around so the lane index operand is in the 5763 // right place. 5764 unsigned Spacing; 5765 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5766 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5767 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5768 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5769 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5770 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5771 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5772 Spacing)); 5773 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5774 Spacing * 2)); 5775 TmpInst.addOperand(Inst.getOperand(1)); // lane 5776 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5777 TmpInst.addOperand(Inst.getOperand(5)); 5778 Inst = TmpInst; 5779 return true; 5780 } 5781 5782 case ARM::VST4LNdWB_fixed_Asm_8: 5783 case ARM::VST4LNdWB_fixed_Asm_16: 5784 case ARM::VST4LNdWB_fixed_Asm_32: 5785 case ARM::VST4LNqWB_fixed_Asm_16: 5786 case ARM::VST4LNqWB_fixed_Asm_32: { 5787 MCInst TmpInst; 5788 // Shuffle the operands around so the lane index operand is in the 5789 // right place. 5790 unsigned Spacing; 5791 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5792 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5793 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5794 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5795 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5796 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5797 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5798 Spacing)); 5799 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5800 Spacing * 2)); 5801 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5802 Spacing * 3)); 5803 TmpInst.addOperand(Inst.getOperand(1)); // lane 5804 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5805 TmpInst.addOperand(Inst.getOperand(5)); 5806 Inst = TmpInst; 5807 return true; 5808 } 5809 5810 case ARM::VST1LNdAsm_8: 5811 case ARM::VST1LNdAsm_16: 5812 case ARM::VST1LNdAsm_32: { 5813 MCInst TmpInst; 5814 // Shuffle the operands around so the lane index operand is in the 5815 // right place. 5816 unsigned Spacing; 5817 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5818 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5819 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5820 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5821 TmpInst.addOperand(Inst.getOperand(1)); // lane 5822 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5823 TmpInst.addOperand(Inst.getOperand(5)); 5824 Inst = TmpInst; 5825 return true; 5826 } 5827 5828 case ARM::VST2LNdAsm_8: 5829 case ARM::VST2LNdAsm_16: 5830 case ARM::VST2LNdAsm_32: 5831 case ARM::VST2LNqAsm_16: 5832 case ARM::VST2LNqAsm_32: { 5833 MCInst TmpInst; 5834 // Shuffle the operands around so the lane index operand is in the 5835 // right place. 5836 unsigned Spacing; 5837 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5838 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5839 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5840 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5841 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5842 Spacing)); 5843 TmpInst.addOperand(Inst.getOperand(1)); // lane 5844 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5845 TmpInst.addOperand(Inst.getOperand(5)); 5846 Inst = TmpInst; 5847 return true; 5848 } 5849 5850 case ARM::VST3LNdAsm_8: 5851 case ARM::VST3LNdAsm_16: 5852 case ARM::VST3LNdAsm_32: 5853 case ARM::VST3LNqAsm_16: 5854 case ARM::VST3LNqAsm_32: { 5855 MCInst TmpInst; 5856 // Shuffle the operands around so the lane index operand is in the 5857 // right place. 5858 unsigned Spacing; 5859 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5860 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5861 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5862 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5863 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5864 Spacing)); 5865 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5866 Spacing * 2)); 5867 TmpInst.addOperand(Inst.getOperand(1)); // lane 5868 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5869 TmpInst.addOperand(Inst.getOperand(5)); 5870 Inst = TmpInst; 5871 return true; 5872 } 5873 5874 case ARM::VST4LNdAsm_8: 5875 case ARM::VST4LNdAsm_16: 5876 case ARM::VST4LNdAsm_32: 5877 case ARM::VST4LNqAsm_16: 5878 case ARM::VST4LNqAsm_32: { 5879 MCInst TmpInst; 5880 // Shuffle the operands around so the lane index operand is in the 5881 // right place. 5882 unsigned Spacing; 5883 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5884 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5885 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5886 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5887 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5888 Spacing)); 5889 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5890 Spacing * 2)); 5891 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5892 Spacing * 3)); 5893 TmpInst.addOperand(Inst.getOperand(1)); // lane 5894 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5895 TmpInst.addOperand(Inst.getOperand(5)); 5896 Inst = TmpInst; 5897 return true; 5898 } 5899 5900 // Handle NEON VLD complex aliases. 5901 case ARM::VLD1LNdWB_register_Asm_8: 5902 case ARM::VLD1LNdWB_register_Asm_16: 5903 case ARM::VLD1LNdWB_register_Asm_32: { 5904 MCInst TmpInst; 5905 // Shuffle the operands around so the lane index operand is in the 5906 // right place. 5907 unsigned Spacing; 5908 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5909 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5910 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5911 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5912 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5913 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5914 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5915 TmpInst.addOperand(Inst.getOperand(1)); // lane 5916 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5917 TmpInst.addOperand(Inst.getOperand(6)); 5918 Inst = TmpInst; 5919 return true; 5920 } 5921 5922 case ARM::VLD2LNdWB_register_Asm_8: 5923 case ARM::VLD2LNdWB_register_Asm_16: 5924 case ARM::VLD2LNdWB_register_Asm_32: 5925 case ARM::VLD2LNqWB_register_Asm_16: 5926 case ARM::VLD2LNqWB_register_Asm_32: { 5927 MCInst TmpInst; 5928 // Shuffle the operands around so the lane index operand is in the 5929 // right place. 5930 unsigned Spacing; 5931 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5932 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5933 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5934 Spacing)); 5935 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5936 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5937 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5938 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5939 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5940 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5941 Spacing)); 5942 TmpInst.addOperand(Inst.getOperand(1)); // lane 5943 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5944 TmpInst.addOperand(Inst.getOperand(6)); 5945 Inst = TmpInst; 5946 return true; 5947 } 5948 5949 case ARM::VLD3LNdWB_register_Asm_8: 5950 case ARM::VLD3LNdWB_register_Asm_16: 5951 case ARM::VLD3LNdWB_register_Asm_32: 5952 case ARM::VLD3LNqWB_register_Asm_16: 5953 case ARM::VLD3LNqWB_register_Asm_32: { 5954 MCInst TmpInst; 5955 // Shuffle the operands around so the lane index operand is in the 5956 // right place. 5957 unsigned Spacing; 5958 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5959 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5960 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5961 Spacing)); 5962 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5963 Spacing * 2)); 5964 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5965 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5966 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5967 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5968 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5969 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5970 Spacing)); 5971 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5972 Spacing * 2)); 5973 TmpInst.addOperand(Inst.getOperand(1)); // lane 5974 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5975 TmpInst.addOperand(Inst.getOperand(6)); 5976 Inst = TmpInst; 5977 return true; 5978 } 5979 5980 case ARM::VLD4LNdWB_register_Asm_8: 5981 case ARM::VLD4LNdWB_register_Asm_16: 5982 case ARM::VLD4LNdWB_register_Asm_32: 5983 case ARM::VLD4LNqWB_register_Asm_16: 5984 case ARM::VLD4LNqWB_register_Asm_32: { 5985 MCInst TmpInst; 5986 // Shuffle the operands around so the lane index operand is in the 5987 // right place. 5988 unsigned Spacing; 5989 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5990 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5991 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5992 Spacing)); 5993 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5994 Spacing * 2)); 5995 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5996 Spacing * 3)); 5997 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5998 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5999 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6000 TmpInst.addOperand(Inst.getOperand(4)); // Rm 6001 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6002 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6003 Spacing)); 6004 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6005 Spacing * 2)); 6006 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6007 Spacing * 3)); 6008 TmpInst.addOperand(Inst.getOperand(1)); // lane 6009 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 6010 TmpInst.addOperand(Inst.getOperand(6)); 6011 Inst = TmpInst; 6012 return true; 6013 } 6014 6015 case ARM::VLD1LNdWB_fixed_Asm_8: 6016 case ARM::VLD1LNdWB_fixed_Asm_16: 6017 case ARM::VLD1LNdWB_fixed_Asm_32: { 6018 MCInst TmpInst; 6019 // Shuffle the operands around so the lane index operand is in the 6020 // right place. 6021 unsigned Spacing; 6022 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6023 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6024 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6025 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6026 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6027 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6028 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6029 TmpInst.addOperand(Inst.getOperand(1)); // lane 6030 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6031 TmpInst.addOperand(Inst.getOperand(5)); 6032 Inst = TmpInst; 6033 return true; 6034 } 6035 6036 case ARM::VLD2LNdWB_fixed_Asm_8: 6037 case ARM::VLD2LNdWB_fixed_Asm_16: 6038 case ARM::VLD2LNdWB_fixed_Asm_32: 6039 case ARM::VLD2LNqWB_fixed_Asm_16: 6040 case ARM::VLD2LNqWB_fixed_Asm_32: { 6041 MCInst TmpInst; 6042 // Shuffle the operands around so the lane index operand is in the 6043 // right place. 6044 unsigned Spacing; 6045 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6046 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6047 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6048 Spacing)); 6049 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6050 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6051 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6052 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6053 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6054 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6055 Spacing)); 6056 TmpInst.addOperand(Inst.getOperand(1)); // lane 6057 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6058 TmpInst.addOperand(Inst.getOperand(5)); 6059 Inst = TmpInst; 6060 return true; 6061 } 6062 6063 case ARM::VLD3LNdWB_fixed_Asm_8: 6064 case ARM::VLD3LNdWB_fixed_Asm_16: 6065 case ARM::VLD3LNdWB_fixed_Asm_32: 6066 case ARM::VLD3LNqWB_fixed_Asm_16: 6067 case ARM::VLD3LNqWB_fixed_Asm_32: { 6068 MCInst TmpInst; 6069 // Shuffle the operands around so the lane index operand is in the 6070 // right place. 6071 unsigned Spacing; 6072 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6073 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6074 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6075 Spacing)); 6076 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6077 Spacing * 2)); 6078 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6079 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6080 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6081 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6082 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6083 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6084 Spacing)); 6085 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6086 Spacing * 2)); 6087 TmpInst.addOperand(Inst.getOperand(1)); // lane 6088 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6089 TmpInst.addOperand(Inst.getOperand(5)); 6090 Inst = TmpInst; 6091 return true; 6092 } 6093 6094 case ARM::VLD4LNdWB_fixed_Asm_8: 6095 case ARM::VLD4LNdWB_fixed_Asm_16: 6096 case ARM::VLD4LNdWB_fixed_Asm_32: 6097 case ARM::VLD4LNqWB_fixed_Asm_16: 6098 case ARM::VLD4LNqWB_fixed_Asm_32: { 6099 MCInst TmpInst; 6100 // Shuffle the operands around so the lane index operand is in the 6101 // right place. 6102 unsigned Spacing; 6103 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6104 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6105 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6106 Spacing)); 6107 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6108 Spacing * 2)); 6109 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6110 Spacing * 3)); 6111 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6112 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6113 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6114 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6115 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6116 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6117 Spacing)); 6118 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6119 Spacing * 2)); 6120 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6121 Spacing * 3)); 6122 TmpInst.addOperand(Inst.getOperand(1)); // lane 6123 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6124 TmpInst.addOperand(Inst.getOperand(5)); 6125 Inst = TmpInst; 6126 return true; 6127 } 6128 6129 case ARM::VLD1LNdAsm_8: 6130 case ARM::VLD1LNdAsm_16: 6131 case ARM::VLD1LNdAsm_32: { 6132 MCInst TmpInst; 6133 // Shuffle the operands around so the lane index operand is in the 6134 // right place. 6135 unsigned Spacing; 6136 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6137 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6138 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6139 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6140 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6141 TmpInst.addOperand(Inst.getOperand(1)); // lane 6142 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6143 TmpInst.addOperand(Inst.getOperand(5)); 6144 Inst = TmpInst; 6145 return true; 6146 } 6147 6148 case ARM::VLD2LNdAsm_8: 6149 case ARM::VLD2LNdAsm_16: 6150 case ARM::VLD2LNdAsm_32: 6151 case ARM::VLD2LNqAsm_16: 6152 case ARM::VLD2LNqAsm_32: { 6153 MCInst TmpInst; 6154 // Shuffle the operands around so the lane index operand is in the 6155 // right place. 6156 unsigned Spacing; 6157 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6158 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6159 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6160 Spacing)); 6161 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6162 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6163 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6164 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6165 Spacing)); 6166 TmpInst.addOperand(Inst.getOperand(1)); // lane 6167 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6168 TmpInst.addOperand(Inst.getOperand(5)); 6169 Inst = TmpInst; 6170 return true; 6171 } 6172 6173 case ARM::VLD3LNdAsm_8: 6174 case ARM::VLD3LNdAsm_16: 6175 case ARM::VLD3LNdAsm_32: 6176 case ARM::VLD3LNqAsm_16: 6177 case ARM::VLD3LNqAsm_32: { 6178 MCInst TmpInst; 6179 // Shuffle the operands around so the lane index operand is in the 6180 // right place. 6181 unsigned Spacing; 6182 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6183 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6184 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6185 Spacing)); 6186 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6187 Spacing * 2)); 6188 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6189 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6190 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6191 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6192 Spacing)); 6193 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6194 Spacing * 2)); 6195 TmpInst.addOperand(Inst.getOperand(1)); // lane 6196 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6197 TmpInst.addOperand(Inst.getOperand(5)); 6198 Inst = TmpInst; 6199 return true; 6200 } 6201 6202 case ARM::VLD4LNdAsm_8: 6203 case ARM::VLD4LNdAsm_16: 6204 case ARM::VLD4LNdAsm_32: 6205 case ARM::VLD4LNqAsm_16: 6206 case ARM::VLD4LNqAsm_32: { 6207 MCInst TmpInst; 6208 // Shuffle the operands around so the lane index operand is in the 6209 // right place. 6210 unsigned Spacing; 6211 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6212 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6213 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6214 Spacing)); 6215 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6216 Spacing * 2)); 6217 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6218 Spacing * 3)); 6219 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6220 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6221 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6222 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6223 Spacing)); 6224 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6225 Spacing * 2)); 6226 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6227 Spacing * 3)); 6228 TmpInst.addOperand(Inst.getOperand(1)); // lane 6229 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6230 TmpInst.addOperand(Inst.getOperand(5)); 6231 Inst = TmpInst; 6232 return true; 6233 } 6234 6235 // VLD3DUP single 3-element structure to all lanes instructions. 6236 case ARM::VLD3DUPdAsm_8: 6237 case ARM::VLD3DUPdAsm_16: 6238 case ARM::VLD3DUPdAsm_32: 6239 case ARM::VLD3DUPqAsm_8: 6240 case ARM::VLD3DUPqAsm_16: 6241 case ARM::VLD3DUPqAsm_32: { 6242 MCInst TmpInst; 6243 unsigned Spacing; 6244 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6245 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6246 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6247 Spacing)); 6248 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6249 Spacing * 2)); 6250 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6251 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6252 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6253 TmpInst.addOperand(Inst.getOperand(4)); 6254 Inst = TmpInst; 6255 return true; 6256 } 6257 6258 case ARM::VLD3DUPdWB_fixed_Asm_8: 6259 case ARM::VLD3DUPdWB_fixed_Asm_16: 6260 case ARM::VLD3DUPdWB_fixed_Asm_32: 6261 case ARM::VLD3DUPqWB_fixed_Asm_8: 6262 case ARM::VLD3DUPqWB_fixed_Asm_16: 6263 case ARM::VLD3DUPqWB_fixed_Asm_32: { 6264 MCInst TmpInst; 6265 unsigned Spacing; 6266 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6267 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6268 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6269 Spacing)); 6270 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6271 Spacing * 2)); 6272 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6273 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6274 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6275 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6276 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6277 TmpInst.addOperand(Inst.getOperand(4)); 6278 Inst = TmpInst; 6279 return true; 6280 } 6281 6282 case ARM::VLD3DUPdWB_register_Asm_8: 6283 case ARM::VLD3DUPdWB_register_Asm_16: 6284 case ARM::VLD3DUPdWB_register_Asm_32: 6285 case ARM::VLD3DUPqWB_register_Asm_8: 6286 case ARM::VLD3DUPqWB_register_Asm_16: 6287 case ARM::VLD3DUPqWB_register_Asm_32: { 6288 MCInst TmpInst; 6289 unsigned Spacing; 6290 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6291 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6292 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6293 Spacing)); 6294 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6295 Spacing * 2)); 6296 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6297 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6298 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6299 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6300 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6301 TmpInst.addOperand(Inst.getOperand(5)); 6302 Inst = TmpInst; 6303 return true; 6304 } 6305 6306 // VLD3 multiple 3-element structure instructions. 6307 case ARM::VLD3dAsm_8: 6308 case ARM::VLD3dAsm_16: 6309 case ARM::VLD3dAsm_32: 6310 case ARM::VLD3qAsm_8: 6311 case ARM::VLD3qAsm_16: 6312 case ARM::VLD3qAsm_32: { 6313 MCInst TmpInst; 6314 unsigned Spacing; 6315 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6316 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6317 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6318 Spacing)); 6319 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6320 Spacing * 2)); 6321 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6322 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6323 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6324 TmpInst.addOperand(Inst.getOperand(4)); 6325 Inst = TmpInst; 6326 return true; 6327 } 6328 6329 case ARM::VLD3dWB_fixed_Asm_8: 6330 case ARM::VLD3dWB_fixed_Asm_16: 6331 case ARM::VLD3dWB_fixed_Asm_32: 6332 case ARM::VLD3qWB_fixed_Asm_8: 6333 case ARM::VLD3qWB_fixed_Asm_16: 6334 case ARM::VLD3qWB_fixed_Asm_32: { 6335 MCInst TmpInst; 6336 unsigned Spacing; 6337 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6338 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6339 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6340 Spacing)); 6341 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6342 Spacing * 2)); 6343 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6344 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6345 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6346 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6347 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6348 TmpInst.addOperand(Inst.getOperand(4)); 6349 Inst = TmpInst; 6350 return true; 6351 } 6352 6353 case ARM::VLD3dWB_register_Asm_8: 6354 case ARM::VLD3dWB_register_Asm_16: 6355 case ARM::VLD3dWB_register_Asm_32: 6356 case ARM::VLD3qWB_register_Asm_8: 6357 case ARM::VLD3qWB_register_Asm_16: 6358 case ARM::VLD3qWB_register_Asm_32: { 6359 MCInst TmpInst; 6360 unsigned Spacing; 6361 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6362 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6363 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6364 Spacing)); 6365 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6366 Spacing * 2)); 6367 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6368 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6369 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6370 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6371 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6372 TmpInst.addOperand(Inst.getOperand(5)); 6373 Inst = TmpInst; 6374 return true; 6375 } 6376 6377 // VLD4DUP single 3-element structure to all lanes instructions. 6378 case ARM::VLD4DUPdAsm_8: 6379 case ARM::VLD4DUPdAsm_16: 6380 case ARM::VLD4DUPdAsm_32: 6381 case ARM::VLD4DUPqAsm_8: 6382 case ARM::VLD4DUPqAsm_16: 6383 case ARM::VLD4DUPqAsm_32: { 6384 MCInst TmpInst; 6385 unsigned Spacing; 6386 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6387 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6388 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6389 Spacing)); 6390 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6391 Spacing * 2)); 6392 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6393 Spacing * 3)); 6394 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6395 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6396 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6397 TmpInst.addOperand(Inst.getOperand(4)); 6398 Inst = TmpInst; 6399 return true; 6400 } 6401 6402 case ARM::VLD4DUPdWB_fixed_Asm_8: 6403 case ARM::VLD4DUPdWB_fixed_Asm_16: 6404 case ARM::VLD4DUPdWB_fixed_Asm_32: 6405 case ARM::VLD4DUPqWB_fixed_Asm_8: 6406 case ARM::VLD4DUPqWB_fixed_Asm_16: 6407 case ARM::VLD4DUPqWB_fixed_Asm_32: { 6408 MCInst TmpInst; 6409 unsigned Spacing; 6410 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6411 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6412 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6413 Spacing)); 6414 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6415 Spacing * 2)); 6416 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6417 Spacing * 3)); 6418 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6419 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6420 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6421 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6422 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6423 TmpInst.addOperand(Inst.getOperand(4)); 6424 Inst = TmpInst; 6425 return true; 6426 } 6427 6428 case ARM::VLD4DUPdWB_register_Asm_8: 6429 case ARM::VLD4DUPdWB_register_Asm_16: 6430 case ARM::VLD4DUPdWB_register_Asm_32: 6431 case ARM::VLD4DUPqWB_register_Asm_8: 6432 case ARM::VLD4DUPqWB_register_Asm_16: 6433 case ARM::VLD4DUPqWB_register_Asm_32: { 6434 MCInst TmpInst; 6435 unsigned Spacing; 6436 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6437 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6438 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6439 Spacing)); 6440 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6441 Spacing * 2)); 6442 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6443 Spacing * 3)); 6444 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6445 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6446 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6447 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6448 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6449 TmpInst.addOperand(Inst.getOperand(5)); 6450 Inst = TmpInst; 6451 return true; 6452 } 6453 6454 // VLD4 multiple 4-element structure instructions. 6455 case ARM::VLD4dAsm_8: 6456 case ARM::VLD4dAsm_16: 6457 case ARM::VLD4dAsm_32: 6458 case ARM::VLD4qAsm_8: 6459 case ARM::VLD4qAsm_16: 6460 case ARM::VLD4qAsm_32: { 6461 MCInst TmpInst; 6462 unsigned Spacing; 6463 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6464 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6465 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6466 Spacing)); 6467 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6468 Spacing * 2)); 6469 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6470 Spacing * 3)); 6471 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6472 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6473 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6474 TmpInst.addOperand(Inst.getOperand(4)); 6475 Inst = TmpInst; 6476 return true; 6477 } 6478 6479 case ARM::VLD4dWB_fixed_Asm_8: 6480 case ARM::VLD4dWB_fixed_Asm_16: 6481 case ARM::VLD4dWB_fixed_Asm_32: 6482 case ARM::VLD4qWB_fixed_Asm_8: 6483 case ARM::VLD4qWB_fixed_Asm_16: 6484 case ARM::VLD4qWB_fixed_Asm_32: { 6485 MCInst TmpInst; 6486 unsigned Spacing; 6487 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6488 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6489 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6490 Spacing)); 6491 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6492 Spacing * 2)); 6493 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6494 Spacing * 3)); 6495 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6496 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6497 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6498 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6499 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6500 TmpInst.addOperand(Inst.getOperand(4)); 6501 Inst = TmpInst; 6502 return true; 6503 } 6504 6505 case ARM::VLD4dWB_register_Asm_8: 6506 case ARM::VLD4dWB_register_Asm_16: 6507 case ARM::VLD4dWB_register_Asm_32: 6508 case ARM::VLD4qWB_register_Asm_8: 6509 case ARM::VLD4qWB_register_Asm_16: 6510 case ARM::VLD4qWB_register_Asm_32: { 6511 MCInst TmpInst; 6512 unsigned Spacing; 6513 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6514 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6515 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6516 Spacing)); 6517 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6518 Spacing * 2)); 6519 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6520 Spacing * 3)); 6521 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6522 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6523 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6524 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6525 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6526 TmpInst.addOperand(Inst.getOperand(5)); 6527 Inst = TmpInst; 6528 return true; 6529 } 6530 6531 // VST3 multiple 3-element structure instructions. 6532 case ARM::VST3dAsm_8: 6533 case ARM::VST3dAsm_16: 6534 case ARM::VST3dAsm_32: 6535 case ARM::VST3qAsm_8: 6536 case ARM::VST3qAsm_16: 6537 case ARM::VST3qAsm_32: { 6538 MCInst TmpInst; 6539 unsigned Spacing; 6540 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6541 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6542 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6543 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6544 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6545 Spacing)); 6546 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6547 Spacing * 2)); 6548 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6549 TmpInst.addOperand(Inst.getOperand(4)); 6550 Inst = TmpInst; 6551 return true; 6552 } 6553 6554 case ARM::VST3dWB_fixed_Asm_8: 6555 case ARM::VST3dWB_fixed_Asm_16: 6556 case ARM::VST3dWB_fixed_Asm_32: 6557 case ARM::VST3qWB_fixed_Asm_8: 6558 case ARM::VST3qWB_fixed_Asm_16: 6559 case ARM::VST3qWB_fixed_Asm_32: { 6560 MCInst TmpInst; 6561 unsigned Spacing; 6562 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6563 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6564 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6565 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6566 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6567 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6568 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6569 Spacing)); 6570 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6571 Spacing * 2)); 6572 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6573 TmpInst.addOperand(Inst.getOperand(4)); 6574 Inst = TmpInst; 6575 return true; 6576 } 6577 6578 case ARM::VST3dWB_register_Asm_8: 6579 case ARM::VST3dWB_register_Asm_16: 6580 case ARM::VST3dWB_register_Asm_32: 6581 case ARM::VST3qWB_register_Asm_8: 6582 case ARM::VST3qWB_register_Asm_16: 6583 case ARM::VST3qWB_register_Asm_32: { 6584 MCInst TmpInst; 6585 unsigned Spacing; 6586 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6587 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6588 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6589 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6590 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6591 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6592 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6593 Spacing)); 6594 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6595 Spacing * 2)); 6596 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6597 TmpInst.addOperand(Inst.getOperand(5)); 6598 Inst = TmpInst; 6599 return true; 6600 } 6601 6602 // VST4 multiple 3-element structure instructions. 6603 case ARM::VST4dAsm_8: 6604 case ARM::VST4dAsm_16: 6605 case ARM::VST4dAsm_32: 6606 case ARM::VST4qAsm_8: 6607 case ARM::VST4qAsm_16: 6608 case ARM::VST4qAsm_32: { 6609 MCInst TmpInst; 6610 unsigned Spacing; 6611 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6612 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6613 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6614 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6615 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6616 Spacing)); 6617 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6618 Spacing * 2)); 6619 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6620 Spacing * 3)); 6621 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6622 TmpInst.addOperand(Inst.getOperand(4)); 6623 Inst = TmpInst; 6624 return true; 6625 } 6626 6627 case ARM::VST4dWB_fixed_Asm_8: 6628 case ARM::VST4dWB_fixed_Asm_16: 6629 case ARM::VST4dWB_fixed_Asm_32: 6630 case ARM::VST4qWB_fixed_Asm_8: 6631 case ARM::VST4qWB_fixed_Asm_16: 6632 case ARM::VST4qWB_fixed_Asm_32: { 6633 MCInst TmpInst; 6634 unsigned Spacing; 6635 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6636 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6637 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6638 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6639 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6640 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6641 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6642 Spacing)); 6643 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6644 Spacing * 2)); 6645 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6646 Spacing * 3)); 6647 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6648 TmpInst.addOperand(Inst.getOperand(4)); 6649 Inst = TmpInst; 6650 return true; 6651 } 6652 6653 case ARM::VST4dWB_register_Asm_8: 6654 case ARM::VST4dWB_register_Asm_16: 6655 case ARM::VST4dWB_register_Asm_32: 6656 case ARM::VST4qWB_register_Asm_8: 6657 case ARM::VST4qWB_register_Asm_16: 6658 case ARM::VST4qWB_register_Asm_32: { 6659 MCInst TmpInst; 6660 unsigned Spacing; 6661 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6662 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6663 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6664 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6665 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6666 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6667 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6668 Spacing)); 6669 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6670 Spacing * 2)); 6671 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6672 Spacing * 3)); 6673 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6674 TmpInst.addOperand(Inst.getOperand(5)); 6675 Inst = TmpInst; 6676 return true; 6677 } 6678 6679 // Handle encoding choice for the shift-immediate instructions. 6680 case ARM::t2LSLri: 6681 case ARM::t2LSRri: 6682 case ARM::t2ASRri: { 6683 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 6684 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 6685 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && 6686 !(static_cast<ARMOperand*>(Operands[3])->isToken() && 6687 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) { 6688 unsigned NewOpc; 6689 switch (Inst.getOpcode()) { 6690 default: llvm_unreachable("unexpected opcode"); 6691 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break; 6692 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break; 6693 case ARM::t2ASRri: NewOpc = ARM::tASRri; break; 6694 } 6695 // The Thumb1 operands aren't in the same order. Awesome, eh? 6696 MCInst TmpInst; 6697 TmpInst.setOpcode(NewOpc); 6698 TmpInst.addOperand(Inst.getOperand(0)); 6699 TmpInst.addOperand(Inst.getOperand(5)); 6700 TmpInst.addOperand(Inst.getOperand(1)); 6701 TmpInst.addOperand(Inst.getOperand(2)); 6702 TmpInst.addOperand(Inst.getOperand(3)); 6703 TmpInst.addOperand(Inst.getOperand(4)); 6704 Inst = TmpInst; 6705 return true; 6706 } 6707 return false; 6708 } 6709 6710 // Handle the Thumb2 mode MOV complex aliases. 6711 case ARM::t2MOVsr: 6712 case ARM::t2MOVSsr: { 6713 // Which instruction to expand to depends on the CCOut operand and 6714 // whether we're in an IT block if the register operands are low 6715 // registers. 6716 bool isNarrow = false; 6717 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 6718 isARMLowRegister(Inst.getOperand(1).getReg()) && 6719 isARMLowRegister(Inst.getOperand(2).getReg()) && 6720 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 6721 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr)) 6722 isNarrow = true; 6723 MCInst TmpInst; 6724 unsigned newOpc; 6725 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) { 6726 default: llvm_unreachable("unexpected opcode!"); 6727 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break; 6728 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break; 6729 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break; 6730 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break; 6731 } 6732 TmpInst.setOpcode(newOpc); 6733 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6734 if (isNarrow) 6735 TmpInst.addOperand(MCOperand::CreateReg( 6736 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 6737 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6738 TmpInst.addOperand(Inst.getOperand(2)); // Rm 6739 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6740 TmpInst.addOperand(Inst.getOperand(5)); 6741 if (!isNarrow) 6742 TmpInst.addOperand(MCOperand::CreateReg( 6743 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 6744 Inst = TmpInst; 6745 return true; 6746 } 6747 case ARM::t2MOVsi: 6748 case ARM::t2MOVSsi: { 6749 // Which instruction to expand to depends on the CCOut operand and 6750 // whether we're in an IT block if the register operands are low 6751 // registers. 6752 bool isNarrow = false; 6753 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 6754 isARMLowRegister(Inst.getOperand(1).getReg()) && 6755 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi)) 6756 isNarrow = true; 6757 MCInst TmpInst; 6758 unsigned newOpc; 6759 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) { 6760 default: llvm_unreachable("unexpected opcode!"); 6761 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; 6762 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; 6763 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; 6764 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; 6765 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break; 6766 } 6767 unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); 6768 if (Ammount == 32) Ammount = 0; 6769 TmpInst.setOpcode(newOpc); 6770 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6771 if (isNarrow) 6772 TmpInst.addOperand(MCOperand::CreateReg( 6773 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 6774 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6775 if (newOpc != ARM::t2RRX) 6776 TmpInst.addOperand(MCOperand::CreateImm(Ammount)); 6777 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6778 TmpInst.addOperand(Inst.getOperand(4)); 6779 if (!isNarrow) 6780 TmpInst.addOperand(MCOperand::CreateReg( 6781 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 6782 Inst = TmpInst; 6783 return true; 6784 } 6785 // Handle the ARM mode MOV complex aliases. 6786 case ARM::ASRr: 6787 case ARM::LSRr: 6788 case ARM::LSLr: 6789 case ARM::RORr: { 6790 ARM_AM::ShiftOpc ShiftTy; 6791 switch(Inst.getOpcode()) { 6792 default: llvm_unreachable("unexpected opcode!"); 6793 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 6794 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 6795 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 6796 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 6797 } 6798 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 6799 MCInst TmpInst; 6800 TmpInst.setOpcode(ARM::MOVsr); 6801 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6802 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6803 TmpInst.addOperand(Inst.getOperand(2)); // Rm 6804 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 6805 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6806 TmpInst.addOperand(Inst.getOperand(4)); 6807 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 6808 Inst = TmpInst; 6809 return true; 6810 } 6811 case ARM::ASRi: 6812 case ARM::LSRi: 6813 case ARM::LSLi: 6814 case ARM::RORi: { 6815 ARM_AM::ShiftOpc ShiftTy; 6816 switch(Inst.getOpcode()) { 6817 default: llvm_unreachable("unexpected opcode!"); 6818 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 6819 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 6820 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 6821 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 6822 } 6823 // A shift by zero is a plain MOVr, not a MOVsi. 6824 unsigned Amt = Inst.getOperand(2).getImm(); 6825 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 6826 // A shift by 32 should be encoded as 0 when permitted 6827 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr)) 6828 Amt = 0; 6829 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 6830 MCInst TmpInst; 6831 TmpInst.setOpcode(Opc); 6832 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6833 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6834 if (Opc == ARM::MOVsi) 6835 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 6836 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6837 TmpInst.addOperand(Inst.getOperand(4)); 6838 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 6839 Inst = TmpInst; 6840 return true; 6841 } 6842 case ARM::RRXi: { 6843 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 6844 MCInst TmpInst; 6845 TmpInst.setOpcode(ARM::MOVsi); 6846 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6847 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6848 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 6849 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6850 TmpInst.addOperand(Inst.getOperand(3)); 6851 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 6852 Inst = TmpInst; 6853 return true; 6854 } 6855 case ARM::t2LDMIA_UPD: { 6856 // If this is a load of a single register, then we should use 6857 // a post-indexed LDR instruction instead, per the ARM ARM. 6858 if (Inst.getNumOperands() != 5) 6859 return false; 6860 MCInst TmpInst; 6861 TmpInst.setOpcode(ARM::t2LDR_POST); 6862 TmpInst.addOperand(Inst.getOperand(4)); // Rt 6863 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 6864 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6865 TmpInst.addOperand(MCOperand::CreateImm(4)); 6866 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6867 TmpInst.addOperand(Inst.getOperand(3)); 6868 Inst = TmpInst; 6869 return true; 6870 } 6871 case ARM::t2STMDB_UPD: { 6872 // If this is a store of a single register, then we should use 6873 // a pre-indexed STR instruction instead, per the ARM ARM. 6874 if (Inst.getNumOperands() != 5) 6875 return false; 6876 MCInst TmpInst; 6877 TmpInst.setOpcode(ARM::t2STR_PRE); 6878 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 6879 TmpInst.addOperand(Inst.getOperand(4)); // Rt 6880 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6881 TmpInst.addOperand(MCOperand::CreateImm(-4)); 6882 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6883 TmpInst.addOperand(Inst.getOperand(3)); 6884 Inst = TmpInst; 6885 return true; 6886 } 6887 case ARM::LDMIA_UPD: 6888 // If this is a load of a single register via a 'pop', then we should use 6889 // a post-indexed LDR instruction instead, per the ARM ARM. 6890 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 6891 Inst.getNumOperands() == 5) { 6892 MCInst TmpInst; 6893 TmpInst.setOpcode(ARM::LDR_POST_IMM); 6894 TmpInst.addOperand(Inst.getOperand(4)); // Rt 6895 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 6896 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6897 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 6898 TmpInst.addOperand(MCOperand::CreateImm(4)); 6899 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6900 TmpInst.addOperand(Inst.getOperand(3)); 6901 Inst = TmpInst; 6902 return true; 6903 } 6904 break; 6905 case ARM::STMDB_UPD: 6906 // If this is a store of a single register via a 'push', then we should use 6907 // a pre-indexed STR instruction instead, per the ARM ARM. 6908 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 6909 Inst.getNumOperands() == 5) { 6910 MCInst TmpInst; 6911 TmpInst.setOpcode(ARM::STR_PRE_IMM); 6912 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 6913 TmpInst.addOperand(Inst.getOperand(4)); // Rt 6914 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 6915 TmpInst.addOperand(MCOperand::CreateImm(-4)); 6916 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6917 TmpInst.addOperand(Inst.getOperand(3)); 6918 Inst = TmpInst; 6919 } 6920 break; 6921 case ARM::t2ADDri12: 6922 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 6923 // mnemonic was used (not "addw"), encoding T3 is preferred. 6924 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" || 6925 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 6926 break; 6927 Inst.setOpcode(ARM::t2ADDri); 6928 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 6929 break; 6930 case ARM::t2SUBri12: 6931 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 6932 // mnemonic was used (not "subw"), encoding T3 is preferred. 6933 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" || 6934 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 6935 break; 6936 Inst.setOpcode(ARM::t2SUBri); 6937 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 6938 break; 6939 case ARM::tADDi8: 6940 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 6941 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 6942 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 6943 // to encoding T1 if <Rd> is omitted." 6944 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 6945 Inst.setOpcode(ARM::tADDi3); 6946 return true; 6947 } 6948 break; 6949 case ARM::tSUBi8: 6950 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 6951 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 6952 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 6953 // to encoding T1 if <Rd> is omitted." 6954 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 6955 Inst.setOpcode(ARM::tSUBi3); 6956 return true; 6957 } 6958 break; 6959 case ARM::t2ADDri: 6960 case ARM::t2SUBri: { 6961 // If the destination and first source operand are the same, and 6962 // the flags are compatible with the current IT status, use encoding T2 6963 // instead of T3. For compatibility with the system 'as'. Make sure the 6964 // wide encoding wasn't explicit. 6965 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 6966 !isARMLowRegister(Inst.getOperand(0).getReg()) || 6967 (unsigned)Inst.getOperand(2).getImm() > 255 || 6968 ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) || 6969 (inITBlock() && Inst.getOperand(5).getReg() != 0)) || 6970 (static_cast<ARMOperand*>(Operands[3])->isToken() && 6971 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 6972 break; 6973 MCInst TmpInst; 6974 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ? 6975 ARM::tADDi8 : ARM::tSUBi8); 6976 TmpInst.addOperand(Inst.getOperand(0)); 6977 TmpInst.addOperand(Inst.getOperand(5)); 6978 TmpInst.addOperand(Inst.getOperand(0)); 6979 TmpInst.addOperand(Inst.getOperand(2)); 6980 TmpInst.addOperand(Inst.getOperand(3)); 6981 TmpInst.addOperand(Inst.getOperand(4)); 6982 Inst = TmpInst; 6983 return true; 6984 } 6985 case ARM::t2ADDrr: { 6986 // If the destination and first source operand are the same, and 6987 // there's no setting of the flags, use encoding T2 instead of T3. 6988 // Note that this is only for ADD, not SUB. This mirrors the system 6989 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 6990 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 6991 Inst.getOperand(5).getReg() != 0 || 6992 (static_cast<ARMOperand*>(Operands[3])->isToken() && 6993 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 6994 break; 6995 MCInst TmpInst; 6996 TmpInst.setOpcode(ARM::tADDhirr); 6997 TmpInst.addOperand(Inst.getOperand(0)); 6998 TmpInst.addOperand(Inst.getOperand(0)); 6999 TmpInst.addOperand(Inst.getOperand(2)); 7000 TmpInst.addOperand(Inst.getOperand(3)); 7001 TmpInst.addOperand(Inst.getOperand(4)); 7002 Inst = TmpInst; 7003 return true; 7004 } 7005 case ARM::tADDrSP: { 7006 // If the non-SP source operand and the destination operand are not the 7007 // same, we need to use the 32-bit encoding if it's available. 7008 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { 7009 Inst.setOpcode(ARM::t2ADDrr); 7010 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 7011 return true; 7012 } 7013 break; 7014 } 7015 case ARM::tB: 7016 // A Thumb conditional branch outside of an IT block is a tBcc. 7017 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 7018 Inst.setOpcode(ARM::tBcc); 7019 return true; 7020 } 7021 break; 7022 case ARM::t2B: 7023 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 7024 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 7025 Inst.setOpcode(ARM::t2Bcc); 7026 return true; 7027 } 7028 break; 7029 case ARM::t2Bcc: 7030 // If the conditional is AL or we're in an IT block, we really want t2B. 7031 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 7032 Inst.setOpcode(ARM::t2B); 7033 return true; 7034 } 7035 break; 7036 case ARM::tBcc: 7037 // If the conditional is AL, we really want tB. 7038 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 7039 Inst.setOpcode(ARM::tB); 7040 return true; 7041 } 7042 break; 7043 case ARM::tLDMIA: { 7044 // If the register list contains any high registers, or if the writeback 7045 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 7046 // instead if we're in Thumb2. Otherwise, this should have generated 7047 // an error in validateInstruction(). 7048 unsigned Rn = Inst.getOperand(0).getReg(); 7049 bool hasWritebackToken = 7050 (static_cast<ARMOperand*>(Operands[3])->isToken() && 7051 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 7052 bool listContainsBase; 7053 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 7054 (!listContainsBase && !hasWritebackToken) || 7055 (listContainsBase && hasWritebackToken)) { 7056 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 7057 assert (isThumbTwo()); 7058 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 7059 // If we're switching to the updating version, we need to insert 7060 // the writeback tied operand. 7061 if (hasWritebackToken) 7062 Inst.insert(Inst.begin(), 7063 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 7064 return true; 7065 } 7066 break; 7067 } 7068 case ARM::tSTMIA_UPD: { 7069 // If the register list contains any high registers, we need to use 7070 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 7071 // should have generated an error in validateInstruction(). 7072 unsigned Rn = Inst.getOperand(0).getReg(); 7073 bool listContainsBase; 7074 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 7075 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 7076 assert (isThumbTwo()); 7077 Inst.setOpcode(ARM::t2STMIA_UPD); 7078 return true; 7079 } 7080 break; 7081 } 7082 case ARM::tPOP: { 7083 bool listContainsBase; 7084 // If the register list contains any high registers, we need to use 7085 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 7086 // should have generated an error in validateInstruction(). 7087 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 7088 return false; 7089 assert (isThumbTwo()); 7090 Inst.setOpcode(ARM::t2LDMIA_UPD); 7091 // Add the base register and writeback operands. 7092 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7093 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7094 return true; 7095 } 7096 case ARM::tPUSH: { 7097 bool listContainsBase; 7098 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 7099 return false; 7100 assert (isThumbTwo()); 7101 Inst.setOpcode(ARM::t2STMDB_UPD); 7102 // Add the base register and writeback operands. 7103 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7104 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7105 return true; 7106 } 7107 case ARM::t2MOVi: { 7108 // If we can use the 16-bit encoding and the user didn't explicitly 7109 // request the 32-bit variant, transform it here. 7110 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7111 (unsigned)Inst.getOperand(1).getImm() <= 255 && 7112 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 7113 Inst.getOperand(4).getReg() == ARM::CPSR) || 7114 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 7115 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7116 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7117 // The operands aren't in the same order for tMOVi8... 7118 MCInst TmpInst; 7119 TmpInst.setOpcode(ARM::tMOVi8); 7120 TmpInst.addOperand(Inst.getOperand(0)); 7121 TmpInst.addOperand(Inst.getOperand(4)); 7122 TmpInst.addOperand(Inst.getOperand(1)); 7123 TmpInst.addOperand(Inst.getOperand(2)); 7124 TmpInst.addOperand(Inst.getOperand(3)); 7125 Inst = TmpInst; 7126 return true; 7127 } 7128 break; 7129 } 7130 case ARM::t2MOVr: { 7131 // If we can use the 16-bit encoding and the user didn't explicitly 7132 // request the 32-bit variant, transform it here. 7133 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7134 isARMLowRegister(Inst.getOperand(1).getReg()) && 7135 Inst.getOperand(2).getImm() == ARMCC::AL && 7136 Inst.getOperand(4).getReg() == ARM::CPSR && 7137 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7138 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7139 // The operands aren't the same for tMOV[S]r... (no cc_out) 7140 MCInst TmpInst; 7141 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 7142 TmpInst.addOperand(Inst.getOperand(0)); 7143 TmpInst.addOperand(Inst.getOperand(1)); 7144 TmpInst.addOperand(Inst.getOperand(2)); 7145 TmpInst.addOperand(Inst.getOperand(3)); 7146 Inst = TmpInst; 7147 return true; 7148 } 7149 break; 7150 } 7151 case ARM::t2SXTH: 7152 case ARM::t2SXTB: 7153 case ARM::t2UXTH: 7154 case ARM::t2UXTB: { 7155 // If we can use the 16-bit encoding and the user didn't explicitly 7156 // request the 32-bit variant, transform it here. 7157 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7158 isARMLowRegister(Inst.getOperand(1).getReg()) && 7159 Inst.getOperand(2).getImm() == 0 && 7160 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7161 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7162 unsigned NewOpc; 7163 switch (Inst.getOpcode()) { 7164 default: llvm_unreachable("Illegal opcode!"); 7165 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 7166 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 7167 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 7168 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 7169 } 7170 // The operands aren't the same for thumb1 (no rotate operand). 7171 MCInst TmpInst; 7172 TmpInst.setOpcode(NewOpc); 7173 TmpInst.addOperand(Inst.getOperand(0)); 7174 TmpInst.addOperand(Inst.getOperand(1)); 7175 TmpInst.addOperand(Inst.getOperand(3)); 7176 TmpInst.addOperand(Inst.getOperand(4)); 7177 Inst = TmpInst; 7178 return true; 7179 } 7180 break; 7181 } 7182 case ARM::MOVsi: { 7183 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); 7184 // rrx shifts and asr/lsr of #32 is encoded as 0 7185 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr) 7186 return false; 7187 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) { 7188 // Shifting by zero is accepted as a vanilla 'MOVr' 7189 MCInst TmpInst; 7190 TmpInst.setOpcode(ARM::MOVr); 7191 TmpInst.addOperand(Inst.getOperand(0)); 7192 TmpInst.addOperand(Inst.getOperand(1)); 7193 TmpInst.addOperand(Inst.getOperand(3)); 7194 TmpInst.addOperand(Inst.getOperand(4)); 7195 TmpInst.addOperand(Inst.getOperand(5)); 7196 Inst = TmpInst; 7197 return true; 7198 } 7199 return false; 7200 } 7201 case ARM::ANDrsi: 7202 case ARM::ORRrsi: 7203 case ARM::EORrsi: 7204 case ARM::BICrsi: 7205 case ARM::SUBrsi: 7206 case ARM::ADDrsi: { 7207 unsigned newOpc; 7208 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm()); 7209 if (SOpc == ARM_AM::rrx) return false; 7210 switch (Inst.getOpcode()) { 7211 default: llvm_unreachable("unexpected opcode!"); 7212 case ARM::ANDrsi: newOpc = ARM::ANDrr; break; 7213 case ARM::ORRrsi: newOpc = ARM::ORRrr; break; 7214 case ARM::EORrsi: newOpc = ARM::EORrr; break; 7215 case ARM::BICrsi: newOpc = ARM::BICrr; break; 7216 case ARM::SUBrsi: newOpc = ARM::SUBrr; break; 7217 case ARM::ADDrsi: newOpc = ARM::ADDrr; break; 7218 } 7219 // If the shift is by zero, use the non-shifted instruction definition. 7220 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) { 7221 MCInst TmpInst; 7222 TmpInst.setOpcode(newOpc); 7223 TmpInst.addOperand(Inst.getOperand(0)); 7224 TmpInst.addOperand(Inst.getOperand(1)); 7225 TmpInst.addOperand(Inst.getOperand(2)); 7226 TmpInst.addOperand(Inst.getOperand(4)); 7227 TmpInst.addOperand(Inst.getOperand(5)); 7228 TmpInst.addOperand(Inst.getOperand(6)); 7229 Inst = TmpInst; 7230 return true; 7231 } 7232 return false; 7233 } 7234 case ARM::ITasm: 7235 case ARM::t2IT: { 7236 // The mask bits for all but the first condition are represented as 7237 // the low bit of the condition code value implies 't'. We currently 7238 // always have 1 implies 't', so XOR toggle the bits if the low bit 7239 // of the condition code is zero. 7240 MCOperand &MO = Inst.getOperand(1); 7241 unsigned Mask = MO.getImm(); 7242 unsigned OrigMask = Mask; 7243 unsigned TZ = CountTrailingZeros_32(Mask); 7244 if ((Inst.getOperand(0).getImm() & 1) == 0) { 7245 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 7246 for (unsigned i = 3; i != TZ; --i) 7247 Mask ^= 1 << i; 7248 } 7249 MO.setImm(Mask); 7250 7251 // Set up the IT block state according to the IT instruction we just 7252 // matched. 7253 assert(!inITBlock() && "nested IT blocks?!"); 7254 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 7255 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 7256 ITState.CurPosition = 0; 7257 ITState.FirstCond = true; 7258 break; 7259 } 7260 } 7261 return false; 7262} 7263 7264unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 7265 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 7266 // suffix depending on whether they're in an IT block or not. 7267 unsigned Opc = Inst.getOpcode(); 7268 const MCInstrDesc &MCID = getInstDesc(Opc); 7269 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 7270 assert(MCID.hasOptionalDef() && 7271 "optionally flag setting instruction missing optional def operand"); 7272 assert(MCID.NumOperands == Inst.getNumOperands() && 7273 "operand count mismatch!"); 7274 // Find the optional-def operand (cc_out). 7275 unsigned OpNo; 7276 for (OpNo = 0; 7277 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 7278 ++OpNo) 7279 ; 7280 // If we're parsing Thumb1, reject it completely. 7281 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 7282 return Match_MnemonicFail; 7283 // If we're parsing Thumb2, which form is legal depends on whether we're 7284 // in an IT block. 7285 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 7286 !inITBlock()) 7287 return Match_RequiresITBlock; 7288 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 7289 inITBlock()) 7290 return Match_RequiresNotITBlock; 7291 } 7292 // Some high-register supporting Thumb1 encodings only allow both registers 7293 // to be from r0-r7 when in Thumb2. 7294 else if (Opc == ARM::tADDhirr && isThumbOne() && 7295 isARMLowRegister(Inst.getOperand(1).getReg()) && 7296 isARMLowRegister(Inst.getOperand(2).getReg())) 7297 return Match_RequiresThumb2; 7298 // Others only require ARMv6 or later. 7299 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 7300 isARMLowRegister(Inst.getOperand(0).getReg()) && 7301 isARMLowRegister(Inst.getOperand(1).getReg())) 7302 return Match_RequiresV6; 7303 return Match_Success; 7304} 7305 7306static const char *getSubtargetFeatureName(unsigned Val); 7307bool ARMAsmParser:: 7308MatchAndEmitInstruction(SMLoc IDLoc, 7309 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 7310 MCStreamer &Out) { 7311 MCInst Inst; 7312 unsigned ErrorInfo; 7313 unsigned MatchResult; 7314 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 7315 switch (MatchResult) { 7316 default: break; 7317 case Match_Success: 7318 // Context sensitive operand constraints aren't handled by the matcher, 7319 // so check them here. 7320 if (validateInstruction(Inst, Operands)) { 7321 // Still progress the IT block, otherwise one wrong condition causes 7322 // nasty cascading errors. 7323 forwardITPosition(); 7324 return true; 7325 } 7326 7327 // Some instructions need post-processing to, for example, tweak which 7328 // encoding is selected. Loop on it while changes happen so the 7329 // individual transformations can chain off each other. E.g., 7330 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 7331 while (processInstruction(Inst, Operands)) 7332 ; 7333 7334 // Only move forward at the very end so that everything in validate 7335 // and process gets a consistent answer about whether we're in an IT 7336 // block. 7337 forwardITPosition(); 7338 7339 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and 7340 // doesn't actually encode. 7341 if (Inst.getOpcode() == ARM::ITasm) 7342 return false; 7343 7344 Inst.setLoc(IDLoc); 7345 Out.EmitInstruction(Inst); 7346 return false; 7347 case Match_MissingFeature: { 7348 assert(ErrorInfo && "Unknown missing feature!"); 7349 // Special case the error message for the very common case where only 7350 // a single subtarget feature is missing (Thumb vs. ARM, e.g.). 7351 std::string Msg = "instruction requires:"; 7352 unsigned Mask = 1; 7353 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) { 7354 if (ErrorInfo & Mask) { 7355 Msg += " "; 7356 Msg += getSubtargetFeatureName(ErrorInfo & Mask); 7357 } 7358 Mask <<= 1; 7359 } 7360 return Error(IDLoc, Msg); 7361 } 7362 case Match_InvalidOperand: { 7363 SMLoc ErrorLoc = IDLoc; 7364 if (ErrorInfo != ~0U) { 7365 if (ErrorInfo >= Operands.size()) 7366 return Error(IDLoc, "too few operands for instruction"); 7367 7368 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 7369 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 7370 } 7371 7372 return Error(ErrorLoc, "invalid operand for instruction"); 7373 } 7374 case Match_MnemonicFail: 7375 return Error(IDLoc, "invalid instruction", 7376 ((ARMOperand*)Operands[0])->getLocRange()); 7377 case Match_ConversionFail: 7378 // The converter function will have already emited a diagnostic. 7379 return true; 7380 case Match_RequiresNotITBlock: 7381 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 7382 case Match_RequiresITBlock: 7383 return Error(IDLoc, "instruction only valid inside IT block"); 7384 case Match_RequiresV6: 7385 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 7386 case Match_RequiresThumb2: 7387 return Error(IDLoc, "instruction variant requires Thumb2"); 7388 } 7389 7390 llvm_unreachable("Implement any new match types added!"); 7391} 7392 7393/// parseDirective parses the arm specific directives 7394bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 7395 StringRef IDVal = DirectiveID.getIdentifier(); 7396 if (IDVal == ".word") 7397 return parseDirectiveWord(4, DirectiveID.getLoc()); 7398 else if (IDVal == ".thumb") 7399 return parseDirectiveThumb(DirectiveID.getLoc()); 7400 else if (IDVal == ".arm") 7401 return parseDirectiveARM(DirectiveID.getLoc()); 7402 else if (IDVal == ".thumb_func") 7403 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 7404 else if (IDVal == ".code") 7405 return parseDirectiveCode(DirectiveID.getLoc()); 7406 else if (IDVal == ".syntax") 7407 return parseDirectiveSyntax(DirectiveID.getLoc()); 7408 else if (IDVal == ".unreq") 7409 return parseDirectiveUnreq(DirectiveID.getLoc()); 7410 else if (IDVal == ".arch") 7411 return parseDirectiveArch(DirectiveID.getLoc()); 7412 else if (IDVal == ".eabi_attribute") 7413 return parseDirectiveEabiAttr(DirectiveID.getLoc()); 7414 return true; 7415} 7416 7417/// parseDirectiveWord 7418/// ::= .word [ expression (, expression)* ] 7419bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 7420 if (getLexer().isNot(AsmToken::EndOfStatement)) { 7421 for (;;) { 7422 const MCExpr *Value; 7423 if (getParser().ParseExpression(Value)) 7424 return true; 7425 7426 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 7427 7428 if (getLexer().is(AsmToken::EndOfStatement)) 7429 break; 7430 7431 // FIXME: Improve diagnostic. 7432 if (getLexer().isNot(AsmToken::Comma)) 7433 return Error(L, "unexpected token in directive"); 7434 Parser.Lex(); 7435 } 7436 } 7437 7438 Parser.Lex(); 7439 return false; 7440} 7441 7442/// parseDirectiveThumb 7443/// ::= .thumb 7444bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 7445 if (getLexer().isNot(AsmToken::EndOfStatement)) 7446 return Error(L, "unexpected token in directive"); 7447 Parser.Lex(); 7448 7449 if (!isThumb()) 7450 SwitchMode(); 7451 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 7452 return false; 7453} 7454 7455/// parseDirectiveARM 7456/// ::= .arm 7457bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 7458 if (getLexer().isNot(AsmToken::EndOfStatement)) 7459 return Error(L, "unexpected token in directive"); 7460 Parser.Lex(); 7461 7462 if (isThumb()) 7463 SwitchMode(); 7464 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 7465 return false; 7466} 7467 7468/// parseDirectiveThumbFunc 7469/// ::= .thumbfunc symbol_name 7470bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 7471 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 7472 bool isMachO = MAI.hasSubsectionsViaSymbols(); 7473 StringRef Name; 7474 bool needFuncName = true; 7475 7476 // Darwin asm has (optionally) function name after .thumb_func direction 7477 // ELF doesn't 7478 if (isMachO) { 7479 const AsmToken &Tok = Parser.getTok(); 7480 if (Tok.isNot(AsmToken::EndOfStatement)) { 7481 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 7482 return Error(L, "unexpected token in .thumb_func directive"); 7483 Name = Tok.getIdentifier(); 7484 Parser.Lex(); // Consume the identifier token. 7485 needFuncName = false; 7486 } 7487 } 7488 7489 if (getLexer().isNot(AsmToken::EndOfStatement)) 7490 return Error(L, "unexpected token in directive"); 7491 7492 // Eat the end of statement and any blank lines that follow. 7493 while (getLexer().is(AsmToken::EndOfStatement)) 7494 Parser.Lex(); 7495 7496 // FIXME: assuming function name will be the line following .thumb_func 7497 // We really should be checking the next symbol definition even if there's 7498 // stuff in between. 7499 if (needFuncName) { 7500 Name = Parser.getTok().getIdentifier(); 7501 } 7502 7503 // Mark symbol as a thumb symbol. 7504 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 7505 getParser().getStreamer().EmitThumbFunc(Func); 7506 return false; 7507} 7508 7509/// parseDirectiveSyntax 7510/// ::= .syntax unified | divided 7511bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 7512 const AsmToken &Tok = Parser.getTok(); 7513 if (Tok.isNot(AsmToken::Identifier)) 7514 return Error(L, "unexpected token in .syntax directive"); 7515 StringRef Mode = Tok.getString(); 7516 if (Mode == "unified" || Mode == "UNIFIED") 7517 Parser.Lex(); 7518 else if (Mode == "divided" || Mode == "DIVIDED") 7519 return Error(L, "'.syntax divided' arm asssembly not supported"); 7520 else 7521 return Error(L, "unrecognized syntax mode in .syntax directive"); 7522 7523 if (getLexer().isNot(AsmToken::EndOfStatement)) 7524 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 7525 Parser.Lex(); 7526 7527 // TODO tell the MC streamer the mode 7528 // getParser().getStreamer().Emit???(); 7529 return false; 7530} 7531 7532/// parseDirectiveCode 7533/// ::= .code 16 | 32 7534bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 7535 const AsmToken &Tok = Parser.getTok(); 7536 if (Tok.isNot(AsmToken::Integer)) 7537 return Error(L, "unexpected token in .code directive"); 7538 int64_t Val = Parser.getTok().getIntVal(); 7539 if (Val == 16) 7540 Parser.Lex(); 7541 else if (Val == 32) 7542 Parser.Lex(); 7543 else 7544 return Error(L, "invalid operand to .code directive"); 7545 7546 if (getLexer().isNot(AsmToken::EndOfStatement)) 7547 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 7548 Parser.Lex(); 7549 7550 if (Val == 16) { 7551 if (!isThumb()) 7552 SwitchMode(); 7553 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 7554 } else { 7555 if (isThumb()) 7556 SwitchMode(); 7557 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 7558 } 7559 7560 return false; 7561} 7562 7563/// parseDirectiveReq 7564/// ::= name .req registername 7565bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { 7566 Parser.Lex(); // Eat the '.req' token. 7567 unsigned Reg; 7568 SMLoc SRegLoc, ERegLoc; 7569 if (ParseRegister(Reg, SRegLoc, ERegLoc)) { 7570 Parser.EatToEndOfStatement(); 7571 return Error(SRegLoc, "register name expected"); 7572 } 7573 7574 // Shouldn't be anything else. 7575 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { 7576 Parser.EatToEndOfStatement(); 7577 return Error(Parser.getTok().getLoc(), 7578 "unexpected input in .req directive."); 7579 } 7580 7581 Parser.Lex(); // Consume the EndOfStatement 7582 7583 if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) 7584 return Error(SRegLoc, "redefinition of '" + Name + 7585 "' does not match original."); 7586 7587 return false; 7588} 7589 7590/// parseDirectiveUneq 7591/// ::= .unreq registername 7592bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { 7593 if (Parser.getTok().isNot(AsmToken::Identifier)) { 7594 Parser.EatToEndOfStatement(); 7595 return Error(L, "unexpected input in .unreq directive."); 7596 } 7597 RegisterReqs.erase(Parser.getTok().getIdentifier()); 7598 Parser.Lex(); // Eat the identifier. 7599 return false; 7600} 7601 7602/// parseDirectiveArch 7603/// ::= .arch token 7604bool ARMAsmParser::parseDirectiveArch(SMLoc L) { 7605 return true; 7606} 7607 7608/// parseDirectiveEabiAttr 7609/// ::= .eabi_attribute int, int 7610bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) { 7611 return true; 7612} 7613 7614extern "C" void LLVMInitializeARMAsmLexer(); 7615 7616/// Force static initialization. 7617extern "C" void LLVMInitializeARMAsmParser() { 7618 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 7619 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 7620 LLVMInitializeARMAsmLexer(); 7621} 7622 7623#define GET_REGISTER_MATCHER 7624#define GET_SUBTARGET_FEATURE_NAME 7625#define GET_MATCHER_IMPLEMENTATION 7626#include "ARMGenAsmMatcher.inc" 7627