ARMAsmParser.cpp revision 3bc8a3d3afe3ddda884a681002e24850099b719e
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 43 44class ARMAsmParser : public MCTargetAsmParser { 45 MCSubtargetInfo &STI; 46 MCAsmParser &Parser; 47 48 struct { 49 ARMCC::CondCodes Cond; // Condition for IT block. 50 unsigned Mask:4; // Condition mask for instructions. 51 // Starting at first 1 (from lsb). 52 // '1' condition as indicated in IT. 53 // '0' inverse of condition (else). 54 // Count of instructions in IT block is 55 // 4 - trailingzeroes(mask) 56 57 bool FirstCond; // Explicit flag for when we're parsing the 58 // First instruction in the IT block. It's 59 // implied in the mask, so needs special 60 // handling. 61 62 unsigned CurPosition; // Current position in parsing of IT 63 // block. In range [0,3]. Initialized 64 // according to count of instructions in block. 65 // ~0U if no active IT block. 66 } ITState; 67 bool inITBlock() { return ITState.CurPosition != ~0U;} 68 void forwardITPosition() { 69 if (!inITBlock()) return; 70 // Move to the next instruction in the IT block, if there is one. If not, 71 // mark the block as done. 72 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 73 if (++ITState.CurPosition == 5 - TZ) 74 ITState.CurPosition = ~0U; // Done with the IT block after this. 75 } 76 77 78 MCAsmParser &getParser() const { return Parser; } 79 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 80 81 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 82 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 83 84 int tryParseRegister(); 85 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 86 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 89 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 90 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 91 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 92 unsigned &ShiftAmount); 93 bool parseDirectiveWord(unsigned Size, SMLoc L); 94 bool parseDirectiveThumb(SMLoc L); 95 bool parseDirectiveARM(SMLoc L); 96 bool parseDirectiveThumbFunc(SMLoc L); 97 bool parseDirectiveCode(SMLoc L); 98 bool parseDirectiveSyntax(SMLoc L); 99 100 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 101 bool &CarrySetting, unsigned &ProcessorIMod, 102 StringRef &ITMask); 103 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 104 bool &CanAcceptPredicationCode); 105 106 bool isThumb() const { 107 // FIXME: Can tablegen auto-generate this? 108 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 109 } 110 bool isThumbOne() const { 111 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 112 } 113 bool isThumbTwo() const { 114 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 115 } 116 bool hasV6Ops() const { 117 return STI.getFeatureBits() & ARM::HasV6Ops; 118 } 119 bool hasV7Ops() const { 120 return STI.getFeatureBits() & ARM::HasV7Ops; 121 } 122 void SwitchMode() { 123 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 124 setAvailableFeatures(FB); 125 } 126 bool isMClass() const { 127 return STI.getFeatureBits() & ARM::FeatureMClass; 128 } 129 130 /// @name Auto-generated Match Functions 131 /// { 132 133#define GET_ASSEMBLER_HEADER 134#include "ARMGenAsmMatcher.inc" 135 136 /// } 137 138 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocNumOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocRegOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseCoprocOptionOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseMemBarrierOptOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseProcIFlagsOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parseMSRMaskOperand( 150 SmallVectorImpl<MCParsedAsmOperand*>&); 151 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 152 StringRef Op, int Low, int High); 153 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 154 return parsePKHImm(O, "lsl", 0, 31); 155 } 156 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 157 return parsePKHImm(O, "asr", 1, 32); 158 } 159 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 165 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 166 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 167 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index); 168 169 // Asm Match Converter Methods 170 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 209 const SmallVectorImpl<MCParsedAsmOperand*> &); 210 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 211 const SmallVectorImpl<MCParsedAsmOperand*> &); 212 213 bool validateInstruction(MCInst &Inst, 214 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 215 bool processInstruction(MCInst &Inst, 216 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 217 bool shouldOmitCCOutOperand(StringRef Mnemonic, 218 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 219 220public: 221 enum ARMMatchResultTy { 222 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 223 Match_RequiresNotITBlock, 224 Match_RequiresV6, 225 Match_RequiresThumb2 226 }; 227 228 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 229 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 230 MCAsmParserExtension::Initialize(_Parser); 231 232 // Initialize the set of available features. 233 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 234 235 // Not in an ITBlock to start with. 236 ITState.CurPosition = ~0U; 237 } 238 239 // Implementation of the MCTargetAsmParser interface: 240 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 241 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 242 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 243 bool ParseDirective(AsmToken DirectiveID); 244 245 unsigned checkTargetMatchPredicate(MCInst &Inst); 246 247 bool MatchAndEmitInstruction(SMLoc IDLoc, 248 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 249 MCStreamer &Out); 250}; 251} // end anonymous namespace 252 253namespace { 254 255/// ARMOperand - Instances of this class represent a parsed ARM machine 256/// instruction. 257class ARMOperand : public MCParsedAsmOperand { 258 enum KindTy { 259 k_CondCode, 260 k_CCOut, 261 k_ITCondMask, 262 k_CoprocNum, 263 k_CoprocReg, 264 k_CoprocOption, 265 k_Immediate, 266 k_FPImmediate, 267 k_MemBarrierOpt, 268 k_Memory, 269 k_PostIndexRegister, 270 k_MSRMask, 271 k_ProcIFlags, 272 k_VectorIndex, 273 k_Register, 274 k_RegisterList, 275 k_DPRRegisterList, 276 k_SPRRegisterList, 277 k_VectorList, 278 k_VectorListAllLanes, 279 k_VectorListIndexed, 280 k_ShiftedRegister, 281 k_ShiftedImmediate, 282 k_ShifterImmediate, 283 k_RotateImmediate, 284 k_BitfieldDescriptor, 285 k_Token 286 } Kind; 287 288 SMLoc StartLoc, EndLoc; 289 SmallVector<unsigned, 8> Registers; 290 291 union { 292 struct { 293 ARMCC::CondCodes Val; 294 } CC; 295 296 struct { 297 unsigned Val; 298 } Cop; 299 300 struct { 301 unsigned Val; 302 } CoprocOption; 303 304 struct { 305 unsigned Mask:4; 306 } ITMask; 307 308 struct { 309 ARM_MB::MemBOpt Val; 310 } MBOpt; 311 312 struct { 313 ARM_PROC::IFlags Val; 314 } IFlags; 315 316 struct { 317 unsigned Val; 318 } MMask; 319 320 struct { 321 const char *Data; 322 unsigned Length; 323 } Tok; 324 325 struct { 326 unsigned RegNum; 327 } Reg; 328 329 // A vector register list is a sequential list of 1 to 4 registers. 330 struct { 331 unsigned RegNum; 332 unsigned Count; 333 unsigned LaneIndex; 334 } VectorList; 335 336 struct { 337 unsigned Val; 338 } VectorIndex; 339 340 struct { 341 const MCExpr *Val; 342 } Imm; 343 344 struct { 345 unsigned Val; // encoded 8-bit representation 346 } FPImm; 347 348 /// Combined record for all forms of ARM address expressions. 349 struct { 350 unsigned BaseRegNum; 351 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 352 // was specified. 353 const MCConstantExpr *OffsetImm; // Offset immediate value 354 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 355 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 356 unsigned ShiftImm; // shift for OffsetReg. 357 unsigned Alignment; // 0 = no alignment specified 358 // n = alignment in bytes (8, 16, or 32) 359 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 360 } Memory; 361 362 struct { 363 unsigned RegNum; 364 bool isAdd; 365 ARM_AM::ShiftOpc ShiftTy; 366 unsigned ShiftImm; 367 } PostIdxReg; 368 369 struct { 370 bool isASR; 371 unsigned Imm; 372 } ShifterImm; 373 struct { 374 ARM_AM::ShiftOpc ShiftTy; 375 unsigned SrcReg; 376 unsigned ShiftReg; 377 unsigned ShiftImm; 378 } RegShiftedReg; 379 struct { 380 ARM_AM::ShiftOpc ShiftTy; 381 unsigned SrcReg; 382 unsigned ShiftImm; 383 } RegShiftedImm; 384 struct { 385 unsigned Imm; 386 } RotImm; 387 struct { 388 unsigned LSB; 389 unsigned Width; 390 } Bitfield; 391 }; 392 393 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 394public: 395 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 396 Kind = o.Kind; 397 StartLoc = o.StartLoc; 398 EndLoc = o.EndLoc; 399 switch (Kind) { 400 case k_CondCode: 401 CC = o.CC; 402 break; 403 case k_ITCondMask: 404 ITMask = o.ITMask; 405 break; 406 case k_Token: 407 Tok = o.Tok; 408 break; 409 case k_CCOut: 410 case k_Register: 411 Reg = o.Reg; 412 break; 413 case k_RegisterList: 414 case k_DPRRegisterList: 415 case k_SPRRegisterList: 416 Registers = o.Registers; 417 break; 418 case k_VectorList: 419 case k_VectorListAllLanes: 420 case k_VectorListIndexed: 421 VectorList = o.VectorList; 422 break; 423 case k_CoprocNum: 424 case k_CoprocReg: 425 Cop = o.Cop; 426 break; 427 case k_CoprocOption: 428 CoprocOption = o.CoprocOption; 429 break; 430 case k_Immediate: 431 Imm = o.Imm; 432 break; 433 case k_FPImmediate: 434 FPImm = o.FPImm; 435 break; 436 case k_MemBarrierOpt: 437 MBOpt = o.MBOpt; 438 break; 439 case k_Memory: 440 Memory = o.Memory; 441 break; 442 case k_PostIndexRegister: 443 PostIdxReg = o.PostIdxReg; 444 break; 445 case k_MSRMask: 446 MMask = o.MMask; 447 break; 448 case k_ProcIFlags: 449 IFlags = o.IFlags; 450 break; 451 case k_ShifterImmediate: 452 ShifterImm = o.ShifterImm; 453 break; 454 case k_ShiftedRegister: 455 RegShiftedReg = o.RegShiftedReg; 456 break; 457 case k_ShiftedImmediate: 458 RegShiftedImm = o.RegShiftedImm; 459 break; 460 case k_RotateImmediate: 461 RotImm = o.RotImm; 462 break; 463 case k_BitfieldDescriptor: 464 Bitfield = o.Bitfield; 465 break; 466 case k_VectorIndex: 467 VectorIndex = o.VectorIndex; 468 break; 469 } 470 } 471 472 /// getStartLoc - Get the location of the first token of this operand. 473 SMLoc getStartLoc() const { return StartLoc; } 474 /// getEndLoc - Get the location of the last token of this operand. 475 SMLoc getEndLoc() const { return EndLoc; } 476 477 ARMCC::CondCodes getCondCode() const { 478 assert(Kind == k_CondCode && "Invalid access!"); 479 return CC.Val; 480 } 481 482 unsigned getCoproc() const { 483 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 484 return Cop.Val; 485 } 486 487 StringRef getToken() const { 488 assert(Kind == k_Token && "Invalid access!"); 489 return StringRef(Tok.Data, Tok.Length); 490 } 491 492 unsigned getReg() const { 493 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 494 return Reg.RegNum; 495 } 496 497 const SmallVectorImpl<unsigned> &getRegList() const { 498 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 499 Kind == k_SPRRegisterList) && "Invalid access!"); 500 return Registers; 501 } 502 503 const MCExpr *getImm() const { 504 assert(Kind == k_Immediate && "Invalid access!"); 505 return Imm.Val; 506 } 507 508 unsigned getFPImm() const { 509 assert(Kind == k_FPImmediate && "Invalid access!"); 510 return FPImm.Val; 511 } 512 513 unsigned getVectorIndex() const { 514 assert(Kind == k_VectorIndex && "Invalid access!"); 515 return VectorIndex.Val; 516 } 517 518 ARM_MB::MemBOpt getMemBarrierOpt() const { 519 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 520 return MBOpt.Val; 521 } 522 523 ARM_PROC::IFlags getProcIFlags() const { 524 assert(Kind == k_ProcIFlags && "Invalid access!"); 525 return IFlags.Val; 526 } 527 528 unsigned getMSRMask() const { 529 assert(Kind == k_MSRMask && "Invalid access!"); 530 return MMask.Val; 531 } 532 533 bool isCoprocNum() const { return Kind == k_CoprocNum; } 534 bool isCoprocReg() const { return Kind == k_CoprocReg; } 535 bool isCoprocOption() const { return Kind == k_CoprocOption; } 536 bool isCondCode() const { return Kind == k_CondCode; } 537 bool isCCOut() const { return Kind == k_CCOut; } 538 bool isITMask() const { return Kind == k_ITCondMask; } 539 bool isITCondCode() const { return Kind == k_CondCode; } 540 bool isImm() const { return Kind == k_Immediate; } 541 bool isFPImm() const { return Kind == k_FPImmediate; } 542 bool isImm8s4() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 549 } 550 bool isImm0_1020s4() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 557 } 558 bool isImm0_508s4() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 565 } 566 bool isImm0_255() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value >= 0 && Value < 256; 573 } 574 bool isImm0_1() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 2; 581 } 582 bool isImm0_3() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return Value >= 0 && Value < 4; 589 } 590 bool isImm0_7() const { 591 if (Kind != k_Immediate) 592 return false; 593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 594 if (!CE) return false; 595 int64_t Value = CE->getValue(); 596 return Value >= 0 && Value < 8; 597 } 598 bool isImm0_15() const { 599 if (Kind != k_Immediate) 600 return false; 601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 602 if (!CE) return false; 603 int64_t Value = CE->getValue(); 604 return Value >= 0 && Value < 16; 605 } 606 bool isImm0_31() const { 607 if (Kind != k_Immediate) 608 return false; 609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 610 if (!CE) return false; 611 int64_t Value = CE->getValue(); 612 return Value >= 0 && Value < 32; 613 } 614 bool isImm8() const { 615 if (Kind != k_Immediate) 616 return false; 617 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 618 if (!CE) return false; 619 int64_t Value = CE->getValue(); 620 return Value == 8; 621 } 622 bool isImm16() const { 623 if (Kind != k_Immediate) 624 return false; 625 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 626 if (!CE) return false; 627 int64_t Value = CE->getValue(); 628 return Value == 16; 629 } 630 bool isImm32() const { 631 if (Kind != k_Immediate) 632 return false; 633 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 634 if (!CE) return false; 635 int64_t Value = CE->getValue(); 636 return Value == 32; 637 } 638 bool isImm1_7() const { 639 if (Kind != k_Immediate) 640 return false; 641 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 642 if (!CE) return false; 643 int64_t Value = CE->getValue(); 644 return Value > 0 && Value < 8; 645 } 646 bool isImm1_15() const { 647 if (Kind != k_Immediate) 648 return false; 649 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 650 if (!CE) return false; 651 int64_t Value = CE->getValue(); 652 return Value > 0 && Value < 16; 653 } 654 bool isImm1_31() const { 655 if (Kind != k_Immediate) 656 return false; 657 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 658 if (!CE) return false; 659 int64_t Value = CE->getValue(); 660 return Value > 0 && Value < 32; 661 } 662 bool isImm1_16() const { 663 if (Kind != k_Immediate) 664 return false; 665 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 666 if (!CE) return false; 667 int64_t Value = CE->getValue(); 668 return Value > 0 && Value < 17; 669 } 670 bool isImm1_32() const { 671 if (Kind != k_Immediate) 672 return false; 673 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 674 if (!CE) return false; 675 int64_t Value = CE->getValue(); 676 return Value > 0 && Value < 33; 677 } 678 bool isImm0_32() const { 679 if (Kind != k_Immediate) 680 return false; 681 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 682 if (!CE) return false; 683 int64_t Value = CE->getValue(); 684 return Value >= 0 && Value < 33; 685 } 686 bool isImm0_65535() const { 687 if (Kind != k_Immediate) 688 return false; 689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 690 if (!CE) return false; 691 int64_t Value = CE->getValue(); 692 return Value >= 0 && Value < 65536; 693 } 694 bool isImm0_65535Expr() const { 695 if (Kind != k_Immediate) 696 return false; 697 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 698 // If it's not a constant expression, it'll generate a fixup and be 699 // handled later. 700 if (!CE) return true; 701 int64_t Value = CE->getValue(); 702 return Value >= 0 && Value < 65536; 703 } 704 bool isImm24bit() const { 705 if (Kind != k_Immediate) 706 return false; 707 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 708 if (!CE) return false; 709 int64_t Value = CE->getValue(); 710 return Value >= 0 && Value <= 0xffffff; 711 } 712 bool isImmThumbSR() const { 713 if (Kind != k_Immediate) 714 return false; 715 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 716 if (!CE) return false; 717 int64_t Value = CE->getValue(); 718 return Value > 0 && Value < 33; 719 } 720 bool isPKHLSLImm() const { 721 if (Kind != k_Immediate) 722 return false; 723 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 724 if (!CE) return false; 725 int64_t Value = CE->getValue(); 726 return Value >= 0 && Value < 32; 727 } 728 bool isPKHASRImm() const { 729 if (Kind != k_Immediate) 730 return false; 731 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 732 if (!CE) return false; 733 int64_t Value = CE->getValue(); 734 return Value > 0 && Value <= 32; 735 } 736 bool isARMSOImm() const { 737 if (Kind != k_Immediate) 738 return false; 739 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 740 if (!CE) return false; 741 int64_t Value = CE->getValue(); 742 return ARM_AM::getSOImmVal(Value) != -1; 743 } 744 bool isARMSOImmNot() const { 745 if (Kind != k_Immediate) 746 return false; 747 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 748 if (!CE) return false; 749 int64_t Value = CE->getValue(); 750 return ARM_AM::getSOImmVal(~Value) != -1; 751 } 752 bool isARMSOImmNeg() const { 753 if (Kind != k_Immediate) 754 return false; 755 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 756 if (!CE) return false; 757 int64_t Value = CE->getValue(); 758 return ARM_AM::getSOImmVal(-Value) != -1; 759 } 760 bool isT2SOImm() const { 761 if (Kind != k_Immediate) 762 return false; 763 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 764 if (!CE) return false; 765 int64_t Value = CE->getValue(); 766 return ARM_AM::getT2SOImmVal(Value) != -1; 767 } 768 bool isT2SOImmNot() const { 769 if (Kind != k_Immediate) 770 return false; 771 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 772 if (!CE) return false; 773 int64_t Value = CE->getValue(); 774 return ARM_AM::getT2SOImmVal(~Value) != -1; 775 } 776 bool isT2SOImmNeg() const { 777 if (Kind != k_Immediate) 778 return false; 779 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 780 if (!CE) return false; 781 int64_t Value = CE->getValue(); 782 return ARM_AM::getT2SOImmVal(-Value) != -1; 783 } 784 bool isSetEndImm() const { 785 if (Kind != k_Immediate) 786 return false; 787 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 788 if (!CE) return false; 789 int64_t Value = CE->getValue(); 790 return Value == 1 || Value == 0; 791 } 792 bool isReg() const { return Kind == k_Register; } 793 bool isRegList() const { return Kind == k_RegisterList; } 794 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 795 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 796 bool isToken() const { return Kind == k_Token; } 797 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 798 bool isMemory() const { return Kind == k_Memory; } 799 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 800 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 801 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 802 bool isRotImm() const { return Kind == k_RotateImmediate; } 803 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 804 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 805 bool isPostIdxReg() const { 806 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 807 } 808 bool isMemNoOffset(bool alignOK = false) const { 809 if (!isMemory()) 810 return false; 811 // No offset of any kind. 812 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 813 (alignOK || Memory.Alignment == 0); 814 } 815 bool isAlignedMemory() const { 816 return isMemNoOffset(true); 817 } 818 bool isAddrMode2() const { 819 if (!isMemory() || Memory.Alignment != 0) return false; 820 // Check for register offset. 821 if (Memory.OffsetRegNum) return true; 822 // Immediate offset in range [-4095, 4095]. 823 if (!Memory.OffsetImm) return true; 824 int64_t Val = Memory.OffsetImm->getValue(); 825 return Val > -4096 && Val < 4096; 826 } 827 bool isAM2OffsetImm() const { 828 if (Kind != k_Immediate) 829 return false; 830 // Immediate offset in range [-4095, 4095]. 831 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 832 if (!CE) return false; 833 int64_t Val = CE->getValue(); 834 return Val > -4096 && Val < 4096; 835 } 836 bool isAddrMode3() const { 837 if (!isMemory() || Memory.Alignment != 0) return false; 838 // No shifts are legal for AM3. 839 if (Memory.ShiftType != ARM_AM::no_shift) return false; 840 // Check for register offset. 841 if (Memory.OffsetRegNum) return true; 842 // Immediate offset in range [-255, 255]. 843 if (!Memory.OffsetImm) return true; 844 int64_t Val = Memory.OffsetImm->getValue(); 845 return Val > -256 && Val < 256; 846 } 847 bool isAM3Offset() const { 848 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 849 return false; 850 if (Kind == k_PostIndexRegister) 851 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 852 // Immediate offset in range [-255, 255]. 853 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 854 if (!CE) return false; 855 int64_t Val = CE->getValue(); 856 // Special case, #-0 is INT32_MIN. 857 return (Val > -256 && Val < 256) || Val == INT32_MIN; 858 } 859 bool isAddrMode5() const { 860 // If we have an immediate that's not a constant, treat it as a label 861 // reference needing a fixup. If it is a constant, it's something else 862 // and we reject it. 863 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 864 return true; 865 if (!isMemory() || Memory.Alignment != 0) return false; 866 // Check for register offset. 867 if (Memory.OffsetRegNum) return false; 868 // Immediate offset in range [-1020, 1020] and a multiple of 4. 869 if (!Memory.OffsetImm) return true; 870 int64_t Val = Memory.OffsetImm->getValue(); 871 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 872 Val == INT32_MIN; 873 } 874 bool isMemTBB() const { 875 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 876 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 877 return false; 878 return true; 879 } 880 bool isMemTBH() const { 881 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 882 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 883 Memory.Alignment != 0 ) 884 return false; 885 return true; 886 } 887 bool isMemRegOffset() const { 888 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 889 return false; 890 return true; 891 } 892 bool isT2MemRegOffset() const { 893 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 894 Memory.Alignment != 0) 895 return false; 896 // Only lsl #{0, 1, 2, 3} allowed. 897 if (Memory.ShiftType == ARM_AM::no_shift) 898 return true; 899 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 900 return false; 901 return true; 902 } 903 bool isMemThumbRR() const { 904 // Thumb reg+reg addressing is simple. Just two registers, a base and 905 // an offset. No shifts, negations or any other complicating factors. 906 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 907 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 908 return false; 909 return isARMLowRegister(Memory.BaseRegNum) && 910 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 911 } 912 bool isMemThumbRIs4() const { 913 if (!isMemory() || Memory.OffsetRegNum != 0 || 914 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 915 return false; 916 // Immediate offset, multiple of 4 in range [0, 124]. 917 if (!Memory.OffsetImm) return true; 918 int64_t Val = Memory.OffsetImm->getValue(); 919 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 920 } 921 bool isMemThumbRIs2() const { 922 if (!isMemory() || Memory.OffsetRegNum != 0 || 923 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 924 return false; 925 // Immediate offset, multiple of 4 in range [0, 62]. 926 if (!Memory.OffsetImm) return true; 927 int64_t Val = Memory.OffsetImm->getValue(); 928 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 929 } 930 bool isMemThumbRIs1() const { 931 if (!isMemory() || Memory.OffsetRegNum != 0 || 932 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 933 return false; 934 // Immediate offset in range [0, 31]. 935 if (!Memory.OffsetImm) return true; 936 int64_t Val = Memory.OffsetImm->getValue(); 937 return Val >= 0 && Val <= 31; 938 } 939 bool isMemThumbSPI() const { 940 if (!isMemory() || Memory.OffsetRegNum != 0 || 941 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 942 return false; 943 // Immediate offset, multiple of 4 in range [0, 1020]. 944 if (!Memory.OffsetImm) return true; 945 int64_t Val = Memory.OffsetImm->getValue(); 946 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 947 } 948 bool isMemImm8s4Offset() const { 949 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 950 return false; 951 // Immediate offset a multiple of 4 in range [-1020, 1020]. 952 if (!Memory.OffsetImm) return true; 953 int64_t Val = Memory.OffsetImm->getValue(); 954 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 955 } 956 bool isMemImm0_1020s4Offset() const { 957 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 958 return false; 959 // Immediate offset a multiple of 4 in range [0, 1020]. 960 if (!Memory.OffsetImm) return true; 961 int64_t Val = Memory.OffsetImm->getValue(); 962 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 963 } 964 bool isMemImm8Offset() const { 965 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 966 return false; 967 // Immediate offset in range [-255, 255]. 968 if (!Memory.OffsetImm) return true; 969 int64_t Val = Memory.OffsetImm->getValue(); 970 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 971 } 972 bool isMemPosImm8Offset() const { 973 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 974 return false; 975 // Immediate offset in range [0, 255]. 976 if (!Memory.OffsetImm) return true; 977 int64_t Val = Memory.OffsetImm->getValue(); 978 return Val >= 0 && Val < 256; 979 } 980 bool isMemNegImm8Offset() const { 981 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 982 return false; 983 // Immediate offset in range [-255, -1]. 984 if (!Memory.OffsetImm) return false; 985 int64_t Val = Memory.OffsetImm->getValue(); 986 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 987 } 988 bool isMemUImm12Offset() const { 989 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 990 return false; 991 // Immediate offset in range [0, 4095]. 992 if (!Memory.OffsetImm) return true; 993 int64_t Val = Memory.OffsetImm->getValue(); 994 return (Val >= 0 && Val < 4096); 995 } 996 bool isMemImm12Offset() const { 997 // If we have an immediate that's not a constant, treat it as a label 998 // reference needing a fixup. If it is a constant, it's something else 999 // and we reject it. 1000 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 1001 return true; 1002 1003 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1004 return false; 1005 // Immediate offset in range [-4095, 4095]. 1006 if (!Memory.OffsetImm) return true; 1007 int64_t Val = Memory.OffsetImm->getValue(); 1008 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 1009 } 1010 bool isPostIdxImm8() const { 1011 if (Kind != k_Immediate) 1012 return false; 1013 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1014 if (!CE) return false; 1015 int64_t Val = CE->getValue(); 1016 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 1017 } 1018 bool isPostIdxImm8s4() const { 1019 if (Kind != k_Immediate) 1020 return false; 1021 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1022 if (!CE) return false; 1023 int64_t Val = CE->getValue(); 1024 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1025 (Val == INT32_MIN); 1026 } 1027 1028 bool isMSRMask() const { return Kind == k_MSRMask; } 1029 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1030 1031 // NEON operands. 1032 bool isVecListOneD() const { 1033 if (Kind != k_VectorList) return false; 1034 return VectorList.Count == 1; 1035 } 1036 1037 bool isVecListTwoD() const { 1038 if (Kind != k_VectorList) return false; 1039 return VectorList.Count == 2; 1040 } 1041 1042 bool isVecListThreeD() const { 1043 if (Kind != k_VectorList) return false; 1044 return VectorList.Count == 3; 1045 } 1046 1047 bool isVecListFourD() const { 1048 if (Kind != k_VectorList) return false; 1049 return VectorList.Count == 4; 1050 } 1051 1052 bool isVecListTwoQ() const { 1053 if (Kind != k_VectorList) return false; 1054 //FIXME: We haven't taught the parser to handle by-two register lists 1055 // yet, so don't pretend to know one. 1056 return VectorList.Count == 2 && false; 1057 } 1058 1059 bool isVecListOneDAllLanes() const { 1060 if (Kind != k_VectorListAllLanes) return false; 1061 return VectorList.Count == 1; 1062 } 1063 1064 bool isVecListTwoDAllLanes() const { 1065 if (Kind != k_VectorListAllLanes) return false; 1066 return VectorList.Count == 2; 1067 } 1068 1069 bool isVecListOneDByteIndexed() const { 1070 if (Kind != k_VectorListIndexed) return false; 1071 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1072 } 1073 1074 bool isVectorIndex8() const { 1075 if (Kind != k_VectorIndex) return false; 1076 return VectorIndex.Val < 8; 1077 } 1078 bool isVectorIndex16() const { 1079 if (Kind != k_VectorIndex) return false; 1080 return VectorIndex.Val < 4; 1081 } 1082 bool isVectorIndex32() const { 1083 if (Kind != k_VectorIndex) return false; 1084 return VectorIndex.Val < 2; 1085 } 1086 1087 bool isNEONi8splat() const { 1088 if (Kind != k_Immediate) 1089 return false; 1090 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1091 // Must be a constant. 1092 if (!CE) return false; 1093 int64_t Value = CE->getValue(); 1094 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1095 // value. 1096 return Value >= 0 && Value < 256; 1097 } 1098 1099 bool isNEONi16splat() const { 1100 if (Kind != k_Immediate) 1101 return false; 1102 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1103 // Must be a constant. 1104 if (!CE) return false; 1105 int64_t Value = CE->getValue(); 1106 // i16 value in the range [0,255] or [0x0100, 0xff00] 1107 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1108 } 1109 1110 bool isNEONi32splat() const { 1111 if (Kind != k_Immediate) 1112 return false; 1113 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1114 // Must be a constant. 1115 if (!CE) return false; 1116 int64_t Value = CE->getValue(); 1117 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1118 return (Value >= 0 && Value < 256) || 1119 (Value >= 0x0100 && Value <= 0xff00) || 1120 (Value >= 0x010000 && Value <= 0xff0000) || 1121 (Value >= 0x01000000 && Value <= 0xff000000); 1122 } 1123 1124 bool isNEONi32vmov() const { 1125 if (Kind != k_Immediate) 1126 return false; 1127 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1128 // Must be a constant. 1129 if (!CE) return false; 1130 int64_t Value = CE->getValue(); 1131 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1132 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1133 return (Value >= 0 && Value < 256) || 1134 (Value >= 0x0100 && Value <= 0xff00) || 1135 (Value >= 0x010000 && Value <= 0xff0000) || 1136 (Value >= 0x01000000 && Value <= 0xff000000) || 1137 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1138 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1139 } 1140 1141 bool isNEONi64splat() const { 1142 if (Kind != k_Immediate) 1143 return false; 1144 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1145 // Must be a constant. 1146 if (!CE) return false; 1147 uint64_t Value = CE->getValue(); 1148 // i64 value with each byte being either 0 or 0xff. 1149 for (unsigned i = 0; i < 8; ++i) 1150 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1151 return true; 1152 } 1153 1154 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1155 // Add as immediates when possible. Null MCExpr = 0. 1156 if (Expr == 0) 1157 Inst.addOperand(MCOperand::CreateImm(0)); 1158 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1159 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1160 else 1161 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1162 } 1163 1164 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1165 assert(N == 2 && "Invalid number of operands!"); 1166 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1167 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1168 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1169 } 1170 1171 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1172 assert(N == 1 && "Invalid number of operands!"); 1173 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1174 } 1175 1176 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1177 assert(N == 1 && "Invalid number of operands!"); 1178 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1179 } 1180 1181 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1182 assert(N == 1 && "Invalid number of operands!"); 1183 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1184 } 1185 1186 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1187 assert(N == 1 && "Invalid number of operands!"); 1188 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1189 } 1190 1191 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1192 assert(N == 1 && "Invalid number of operands!"); 1193 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1194 } 1195 1196 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1197 assert(N == 1 && "Invalid number of operands!"); 1198 Inst.addOperand(MCOperand::CreateReg(getReg())); 1199 } 1200 1201 void addRegOperands(MCInst &Inst, unsigned N) const { 1202 assert(N == 1 && "Invalid number of operands!"); 1203 Inst.addOperand(MCOperand::CreateReg(getReg())); 1204 } 1205 1206 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1207 assert(N == 3 && "Invalid number of operands!"); 1208 assert(isRegShiftedReg() && 1209 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1210 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1211 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1212 Inst.addOperand(MCOperand::CreateImm( 1213 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1214 } 1215 1216 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1217 assert(N == 2 && "Invalid number of operands!"); 1218 assert(isRegShiftedImm() && 1219 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1220 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1221 Inst.addOperand(MCOperand::CreateImm( 1222 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1223 } 1224 1225 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1226 assert(N == 1 && "Invalid number of operands!"); 1227 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1228 ShifterImm.Imm)); 1229 } 1230 1231 void addRegListOperands(MCInst &Inst, unsigned N) const { 1232 assert(N == 1 && "Invalid number of operands!"); 1233 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1234 for (SmallVectorImpl<unsigned>::const_iterator 1235 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1236 Inst.addOperand(MCOperand::CreateReg(*I)); 1237 } 1238 1239 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1240 addRegListOperands(Inst, N); 1241 } 1242 1243 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1244 addRegListOperands(Inst, N); 1245 } 1246 1247 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1248 assert(N == 1 && "Invalid number of operands!"); 1249 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1250 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1251 } 1252 1253 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1254 assert(N == 1 && "Invalid number of operands!"); 1255 // Munge the lsb/width into a bitfield mask. 1256 unsigned lsb = Bitfield.LSB; 1257 unsigned width = Bitfield.Width; 1258 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1259 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1260 (32 - (lsb + width))); 1261 Inst.addOperand(MCOperand::CreateImm(Mask)); 1262 } 1263 1264 void addImmOperands(MCInst &Inst, unsigned N) const { 1265 assert(N == 1 && "Invalid number of operands!"); 1266 addExpr(Inst, getImm()); 1267 } 1268 1269 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1270 assert(N == 1 && "Invalid number of operands!"); 1271 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1272 } 1273 1274 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1275 assert(N == 1 && "Invalid number of operands!"); 1276 // FIXME: We really want to scale the value here, but the LDRD/STRD 1277 // instruction don't encode operands that way yet. 1278 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1279 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1280 } 1281 1282 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1283 assert(N == 1 && "Invalid number of operands!"); 1284 // The immediate is scaled by four in the encoding and is stored 1285 // in the MCInst as such. Lop off the low two bits here. 1286 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1287 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1288 } 1289 1290 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1291 assert(N == 1 && "Invalid number of operands!"); 1292 // The immediate is scaled by four in the encoding and is stored 1293 // in the MCInst as such. Lop off the low two bits here. 1294 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1295 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1296 } 1297 1298 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1299 assert(N == 1 && "Invalid number of operands!"); 1300 // The constant encodes as the immediate-1, and we store in the instruction 1301 // the bits as encoded, so subtract off one here. 1302 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1303 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1304 } 1305 1306 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1307 assert(N == 1 && "Invalid number of operands!"); 1308 // The constant encodes as the immediate-1, and we store in the instruction 1309 // the bits as encoded, so subtract off one here. 1310 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1311 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1312 } 1313 1314 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1315 assert(N == 1 && "Invalid number of operands!"); 1316 // The constant encodes as the immediate, except for 32, which encodes as 1317 // zero. 1318 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1319 unsigned Imm = CE->getValue(); 1320 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1321 } 1322 1323 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1324 assert(N == 1 && "Invalid number of operands!"); 1325 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1326 // the instruction as well. 1327 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1328 int Val = CE->getValue(); 1329 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1330 } 1331 1332 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1333 assert(N == 1 && "Invalid number of operands!"); 1334 // The operand is actually a t2_so_imm, but we have its bitwise 1335 // negation in the assembly source, so twiddle it here. 1336 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1337 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1338 } 1339 1340 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 1341 assert(N == 1 && "Invalid number of operands!"); 1342 // The operand is actually a t2_so_imm, but we have its 1343 // negation in the assembly source, so twiddle it here. 1344 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1345 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1346 } 1347 1348 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1349 assert(N == 1 && "Invalid number of operands!"); 1350 // The operand is actually a so_imm, but we have its bitwise 1351 // negation in the assembly source, so twiddle it here. 1352 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1353 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1354 } 1355 1356 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const { 1357 assert(N == 1 && "Invalid number of operands!"); 1358 // The operand is actually a so_imm, but we have its 1359 // negation in the assembly source, so twiddle it here. 1360 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1361 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1362 } 1363 1364 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1365 assert(N == 1 && "Invalid number of operands!"); 1366 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1367 } 1368 1369 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1370 assert(N == 1 && "Invalid number of operands!"); 1371 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1372 } 1373 1374 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1375 assert(N == 2 && "Invalid number of operands!"); 1376 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1377 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1378 } 1379 1380 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1381 assert(N == 3 && "Invalid number of operands!"); 1382 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1383 if (!Memory.OffsetRegNum) { 1384 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1385 // Special case for #-0 1386 if (Val == INT32_MIN) Val = 0; 1387 if (Val < 0) Val = -Val; 1388 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1389 } else { 1390 // For register offset, we encode the shift type and negation flag 1391 // here. 1392 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1393 Memory.ShiftImm, Memory.ShiftType); 1394 } 1395 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1396 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1397 Inst.addOperand(MCOperand::CreateImm(Val)); 1398 } 1399 1400 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1401 assert(N == 2 && "Invalid number of operands!"); 1402 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1403 assert(CE && "non-constant AM2OffsetImm operand!"); 1404 int32_t Val = CE->getValue(); 1405 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1406 // Special case for #-0 1407 if (Val == INT32_MIN) Val = 0; 1408 if (Val < 0) Val = -Val; 1409 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1410 Inst.addOperand(MCOperand::CreateReg(0)); 1411 Inst.addOperand(MCOperand::CreateImm(Val)); 1412 } 1413 1414 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1415 assert(N == 3 && "Invalid number of operands!"); 1416 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1417 if (!Memory.OffsetRegNum) { 1418 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1419 // Special case for #-0 1420 if (Val == INT32_MIN) Val = 0; 1421 if (Val < 0) Val = -Val; 1422 Val = ARM_AM::getAM3Opc(AddSub, Val); 1423 } else { 1424 // For register offset, we encode the shift type and negation flag 1425 // here. 1426 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1427 } 1428 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1429 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1430 Inst.addOperand(MCOperand::CreateImm(Val)); 1431 } 1432 1433 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1434 assert(N == 2 && "Invalid number of operands!"); 1435 if (Kind == k_PostIndexRegister) { 1436 int32_t Val = 1437 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1438 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1439 Inst.addOperand(MCOperand::CreateImm(Val)); 1440 return; 1441 } 1442 1443 // Constant offset. 1444 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1445 int32_t Val = CE->getValue(); 1446 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1447 // Special case for #-0 1448 if (Val == INT32_MIN) Val = 0; 1449 if (Val < 0) Val = -Val; 1450 Val = ARM_AM::getAM3Opc(AddSub, Val); 1451 Inst.addOperand(MCOperand::CreateReg(0)); 1452 Inst.addOperand(MCOperand::CreateImm(Val)); 1453 } 1454 1455 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1456 assert(N == 2 && "Invalid number of operands!"); 1457 // If we have an immediate that's not a constant, treat it as a label 1458 // reference needing a fixup. If it is a constant, it's something else 1459 // and we reject it. 1460 if (isImm()) { 1461 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1462 Inst.addOperand(MCOperand::CreateImm(0)); 1463 return; 1464 } 1465 1466 // The lower two bits are always zero and as such are not encoded. 1467 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1468 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1469 // Special case for #-0 1470 if (Val == INT32_MIN) Val = 0; 1471 if (Val < 0) Val = -Val; 1472 Val = ARM_AM::getAM5Opc(AddSub, Val); 1473 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1474 Inst.addOperand(MCOperand::CreateImm(Val)); 1475 } 1476 1477 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1478 assert(N == 2 && "Invalid number of operands!"); 1479 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1480 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1481 Inst.addOperand(MCOperand::CreateImm(Val)); 1482 } 1483 1484 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1485 assert(N == 2 && "Invalid number of operands!"); 1486 // The lower two bits are always zero and as such are not encoded. 1487 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1488 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1489 Inst.addOperand(MCOperand::CreateImm(Val)); 1490 } 1491 1492 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1493 assert(N == 2 && "Invalid number of operands!"); 1494 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1495 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1496 Inst.addOperand(MCOperand::CreateImm(Val)); 1497 } 1498 1499 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1500 addMemImm8OffsetOperands(Inst, N); 1501 } 1502 1503 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1504 addMemImm8OffsetOperands(Inst, N); 1505 } 1506 1507 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1508 assert(N == 2 && "Invalid number of operands!"); 1509 // If this is an immediate, it's a label reference. 1510 if (Kind == k_Immediate) { 1511 addExpr(Inst, getImm()); 1512 Inst.addOperand(MCOperand::CreateImm(0)); 1513 return; 1514 } 1515 1516 // Otherwise, it's a normal memory reg+offset. 1517 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1518 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1519 Inst.addOperand(MCOperand::CreateImm(Val)); 1520 } 1521 1522 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1523 assert(N == 2 && "Invalid number of operands!"); 1524 // If this is an immediate, it's a label reference. 1525 if (Kind == k_Immediate) { 1526 addExpr(Inst, getImm()); 1527 Inst.addOperand(MCOperand::CreateImm(0)); 1528 return; 1529 } 1530 1531 // Otherwise, it's a normal memory reg+offset. 1532 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1533 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1534 Inst.addOperand(MCOperand::CreateImm(Val)); 1535 } 1536 1537 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1538 assert(N == 2 && "Invalid number of operands!"); 1539 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1540 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1541 } 1542 1543 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1544 assert(N == 2 && "Invalid number of operands!"); 1545 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1546 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1547 } 1548 1549 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1550 assert(N == 3 && "Invalid number of operands!"); 1551 unsigned Val = 1552 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1553 Memory.ShiftImm, Memory.ShiftType); 1554 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1555 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1556 Inst.addOperand(MCOperand::CreateImm(Val)); 1557 } 1558 1559 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1560 assert(N == 3 && "Invalid number of operands!"); 1561 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1562 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1563 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1564 } 1565 1566 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1567 assert(N == 2 && "Invalid number of operands!"); 1568 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1569 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1570 } 1571 1572 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1573 assert(N == 2 && "Invalid number of operands!"); 1574 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1575 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1576 Inst.addOperand(MCOperand::CreateImm(Val)); 1577 } 1578 1579 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1580 assert(N == 2 && "Invalid number of operands!"); 1581 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1582 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1583 Inst.addOperand(MCOperand::CreateImm(Val)); 1584 } 1585 1586 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1587 assert(N == 2 && "Invalid number of operands!"); 1588 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1589 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1590 Inst.addOperand(MCOperand::CreateImm(Val)); 1591 } 1592 1593 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1594 assert(N == 2 && "Invalid number of operands!"); 1595 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1596 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1597 Inst.addOperand(MCOperand::CreateImm(Val)); 1598 } 1599 1600 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1601 assert(N == 1 && "Invalid number of operands!"); 1602 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1603 assert(CE && "non-constant post-idx-imm8 operand!"); 1604 int Imm = CE->getValue(); 1605 bool isAdd = Imm >= 0; 1606 if (Imm == INT32_MIN) Imm = 0; 1607 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1608 Inst.addOperand(MCOperand::CreateImm(Imm)); 1609 } 1610 1611 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1612 assert(N == 1 && "Invalid number of operands!"); 1613 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1614 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1615 int Imm = CE->getValue(); 1616 bool isAdd = Imm >= 0; 1617 if (Imm == INT32_MIN) Imm = 0; 1618 // Immediate is scaled by 4. 1619 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1620 Inst.addOperand(MCOperand::CreateImm(Imm)); 1621 } 1622 1623 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1624 assert(N == 2 && "Invalid number of operands!"); 1625 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1626 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1627 } 1628 1629 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1630 assert(N == 2 && "Invalid number of operands!"); 1631 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1632 // The sign, shift type, and shift amount are encoded in a single operand 1633 // using the AM2 encoding helpers. 1634 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1635 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1636 PostIdxReg.ShiftTy); 1637 Inst.addOperand(MCOperand::CreateImm(Imm)); 1638 } 1639 1640 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1641 assert(N == 1 && "Invalid number of operands!"); 1642 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1643 } 1644 1645 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1646 assert(N == 1 && "Invalid number of operands!"); 1647 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1648 } 1649 1650 void addVecListOperands(MCInst &Inst, unsigned N) const { 1651 assert(N == 1 && "Invalid number of operands!"); 1652 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1653 } 1654 1655 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 1656 assert(N == 2 && "Invalid number of operands!"); 1657 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1658 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 1659 } 1660 1661 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1662 assert(N == 1 && "Invalid number of operands!"); 1663 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1664 } 1665 1666 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1667 assert(N == 1 && "Invalid number of operands!"); 1668 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1669 } 1670 1671 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1672 assert(N == 1 && "Invalid number of operands!"); 1673 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1674 } 1675 1676 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1677 assert(N == 1 && "Invalid number of operands!"); 1678 // The immediate encodes the type of constant as well as the value. 1679 // Mask in that this is an i8 splat. 1680 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1681 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1682 } 1683 1684 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1685 assert(N == 1 && "Invalid number of operands!"); 1686 // The immediate encodes the type of constant as well as the value. 1687 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1688 unsigned Value = CE->getValue(); 1689 if (Value >= 256) 1690 Value = (Value >> 8) | 0xa00; 1691 else 1692 Value |= 0x800; 1693 Inst.addOperand(MCOperand::CreateImm(Value)); 1694 } 1695 1696 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1697 assert(N == 1 && "Invalid number of operands!"); 1698 // The immediate encodes the type of constant as well as the value. 1699 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1700 unsigned Value = CE->getValue(); 1701 if (Value >= 256 && Value <= 0xff00) 1702 Value = (Value >> 8) | 0x200; 1703 else if (Value > 0xffff && Value <= 0xff0000) 1704 Value = (Value >> 16) | 0x400; 1705 else if (Value > 0xffffff) 1706 Value = (Value >> 24) | 0x600; 1707 Inst.addOperand(MCOperand::CreateImm(Value)); 1708 } 1709 1710 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1711 assert(N == 1 && "Invalid number of operands!"); 1712 // The immediate encodes the type of constant as well as the value. 1713 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1714 unsigned Value = CE->getValue(); 1715 if (Value >= 256 && Value <= 0xffff) 1716 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1717 else if (Value > 0xffff && Value <= 0xffffff) 1718 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1719 else if (Value > 0xffffff) 1720 Value = (Value >> 24) | 0x600; 1721 Inst.addOperand(MCOperand::CreateImm(Value)); 1722 } 1723 1724 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1725 assert(N == 1 && "Invalid number of operands!"); 1726 // The immediate encodes the type of constant as well as the value. 1727 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1728 uint64_t Value = CE->getValue(); 1729 unsigned Imm = 0; 1730 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1731 Imm |= (Value & 1) << i; 1732 } 1733 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1734 } 1735 1736 virtual void print(raw_ostream &OS) const; 1737 1738 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1739 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1740 Op->ITMask.Mask = Mask; 1741 Op->StartLoc = S; 1742 Op->EndLoc = S; 1743 return Op; 1744 } 1745 1746 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1747 ARMOperand *Op = new ARMOperand(k_CondCode); 1748 Op->CC.Val = CC; 1749 Op->StartLoc = S; 1750 Op->EndLoc = S; 1751 return Op; 1752 } 1753 1754 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1755 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1756 Op->Cop.Val = CopVal; 1757 Op->StartLoc = S; 1758 Op->EndLoc = S; 1759 return Op; 1760 } 1761 1762 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1763 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1764 Op->Cop.Val = CopVal; 1765 Op->StartLoc = S; 1766 Op->EndLoc = S; 1767 return Op; 1768 } 1769 1770 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1771 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1772 Op->Cop.Val = Val; 1773 Op->StartLoc = S; 1774 Op->EndLoc = E; 1775 return Op; 1776 } 1777 1778 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1779 ARMOperand *Op = new ARMOperand(k_CCOut); 1780 Op->Reg.RegNum = RegNum; 1781 Op->StartLoc = S; 1782 Op->EndLoc = S; 1783 return Op; 1784 } 1785 1786 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1787 ARMOperand *Op = new ARMOperand(k_Token); 1788 Op->Tok.Data = Str.data(); 1789 Op->Tok.Length = Str.size(); 1790 Op->StartLoc = S; 1791 Op->EndLoc = S; 1792 return Op; 1793 } 1794 1795 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1796 ARMOperand *Op = new ARMOperand(k_Register); 1797 Op->Reg.RegNum = RegNum; 1798 Op->StartLoc = S; 1799 Op->EndLoc = E; 1800 return Op; 1801 } 1802 1803 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1804 unsigned SrcReg, 1805 unsigned ShiftReg, 1806 unsigned ShiftImm, 1807 SMLoc S, SMLoc E) { 1808 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1809 Op->RegShiftedReg.ShiftTy = ShTy; 1810 Op->RegShiftedReg.SrcReg = SrcReg; 1811 Op->RegShiftedReg.ShiftReg = ShiftReg; 1812 Op->RegShiftedReg.ShiftImm = ShiftImm; 1813 Op->StartLoc = S; 1814 Op->EndLoc = E; 1815 return Op; 1816 } 1817 1818 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1819 unsigned SrcReg, 1820 unsigned ShiftImm, 1821 SMLoc S, SMLoc E) { 1822 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1823 Op->RegShiftedImm.ShiftTy = ShTy; 1824 Op->RegShiftedImm.SrcReg = SrcReg; 1825 Op->RegShiftedImm.ShiftImm = ShiftImm; 1826 Op->StartLoc = S; 1827 Op->EndLoc = E; 1828 return Op; 1829 } 1830 1831 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1832 SMLoc S, SMLoc E) { 1833 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1834 Op->ShifterImm.isASR = isASR; 1835 Op->ShifterImm.Imm = Imm; 1836 Op->StartLoc = S; 1837 Op->EndLoc = E; 1838 return Op; 1839 } 1840 1841 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1842 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1843 Op->RotImm.Imm = Imm; 1844 Op->StartLoc = S; 1845 Op->EndLoc = E; 1846 return Op; 1847 } 1848 1849 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1850 SMLoc S, SMLoc E) { 1851 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1852 Op->Bitfield.LSB = LSB; 1853 Op->Bitfield.Width = Width; 1854 Op->StartLoc = S; 1855 Op->EndLoc = E; 1856 return Op; 1857 } 1858 1859 static ARMOperand * 1860 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1861 SMLoc StartLoc, SMLoc EndLoc) { 1862 KindTy Kind = k_RegisterList; 1863 1864 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1865 Kind = k_DPRRegisterList; 1866 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1867 contains(Regs.front().first)) 1868 Kind = k_SPRRegisterList; 1869 1870 ARMOperand *Op = new ARMOperand(Kind); 1871 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1872 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1873 Op->Registers.push_back(I->first); 1874 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1875 Op->StartLoc = StartLoc; 1876 Op->EndLoc = EndLoc; 1877 return Op; 1878 } 1879 1880 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1881 SMLoc S, SMLoc E) { 1882 ARMOperand *Op = new ARMOperand(k_VectorList); 1883 Op->VectorList.RegNum = RegNum; 1884 Op->VectorList.Count = Count; 1885 Op->StartLoc = S; 1886 Op->EndLoc = E; 1887 return Op; 1888 } 1889 1890 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, 1891 SMLoc S, SMLoc E) { 1892 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); 1893 Op->VectorList.RegNum = RegNum; 1894 Op->VectorList.Count = Count; 1895 Op->StartLoc = S; 1896 Op->EndLoc = E; 1897 return Op; 1898 } 1899 1900 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, 1901 unsigned Index, SMLoc S, SMLoc E) { 1902 ARMOperand *Op = new ARMOperand(k_VectorListIndexed); 1903 Op->VectorList.RegNum = RegNum; 1904 Op->VectorList.Count = Count; 1905 Op->VectorList.LaneIndex = Index; 1906 Op->StartLoc = S; 1907 Op->EndLoc = E; 1908 return Op; 1909 } 1910 1911 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1912 MCContext &Ctx) { 1913 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1914 Op->VectorIndex.Val = Idx; 1915 Op->StartLoc = S; 1916 Op->EndLoc = E; 1917 return Op; 1918 } 1919 1920 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1921 ARMOperand *Op = new ARMOperand(k_Immediate); 1922 Op->Imm.Val = Val; 1923 Op->StartLoc = S; 1924 Op->EndLoc = E; 1925 return Op; 1926 } 1927 1928 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1929 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1930 Op->FPImm.Val = Val; 1931 Op->StartLoc = S; 1932 Op->EndLoc = S; 1933 return Op; 1934 } 1935 1936 static ARMOperand *CreateMem(unsigned BaseRegNum, 1937 const MCConstantExpr *OffsetImm, 1938 unsigned OffsetRegNum, 1939 ARM_AM::ShiftOpc ShiftType, 1940 unsigned ShiftImm, 1941 unsigned Alignment, 1942 bool isNegative, 1943 SMLoc S, SMLoc E) { 1944 ARMOperand *Op = new ARMOperand(k_Memory); 1945 Op->Memory.BaseRegNum = BaseRegNum; 1946 Op->Memory.OffsetImm = OffsetImm; 1947 Op->Memory.OffsetRegNum = OffsetRegNum; 1948 Op->Memory.ShiftType = ShiftType; 1949 Op->Memory.ShiftImm = ShiftImm; 1950 Op->Memory.Alignment = Alignment; 1951 Op->Memory.isNegative = isNegative; 1952 Op->StartLoc = S; 1953 Op->EndLoc = E; 1954 return Op; 1955 } 1956 1957 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1958 ARM_AM::ShiftOpc ShiftTy, 1959 unsigned ShiftImm, 1960 SMLoc S, SMLoc E) { 1961 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1962 Op->PostIdxReg.RegNum = RegNum; 1963 Op->PostIdxReg.isAdd = isAdd; 1964 Op->PostIdxReg.ShiftTy = ShiftTy; 1965 Op->PostIdxReg.ShiftImm = ShiftImm; 1966 Op->StartLoc = S; 1967 Op->EndLoc = E; 1968 return Op; 1969 } 1970 1971 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1972 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1973 Op->MBOpt.Val = Opt; 1974 Op->StartLoc = S; 1975 Op->EndLoc = S; 1976 return Op; 1977 } 1978 1979 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1980 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1981 Op->IFlags.Val = IFlags; 1982 Op->StartLoc = S; 1983 Op->EndLoc = S; 1984 return Op; 1985 } 1986 1987 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1988 ARMOperand *Op = new ARMOperand(k_MSRMask); 1989 Op->MMask.Val = MMask; 1990 Op->StartLoc = S; 1991 Op->EndLoc = S; 1992 return Op; 1993 } 1994}; 1995 1996} // end anonymous namespace. 1997 1998void ARMOperand::print(raw_ostream &OS) const { 1999 switch (Kind) { 2000 case k_FPImmediate: 2001 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 2002 << ") >"; 2003 break; 2004 case k_CondCode: 2005 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 2006 break; 2007 case k_CCOut: 2008 OS << "<ccout " << getReg() << ">"; 2009 break; 2010 case k_ITCondMask: { 2011 static const char *MaskStr[] = { 2012 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 2013 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 2014 }; 2015 assert((ITMask.Mask & 0xf) == ITMask.Mask); 2016 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 2017 break; 2018 } 2019 case k_CoprocNum: 2020 OS << "<coprocessor number: " << getCoproc() << ">"; 2021 break; 2022 case k_CoprocReg: 2023 OS << "<coprocessor register: " << getCoproc() << ">"; 2024 break; 2025 case k_CoprocOption: 2026 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 2027 break; 2028 case k_MSRMask: 2029 OS << "<mask: " << getMSRMask() << ">"; 2030 break; 2031 case k_Immediate: 2032 getImm()->print(OS); 2033 break; 2034 case k_MemBarrierOpt: 2035 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 2036 break; 2037 case k_Memory: 2038 OS << "<memory " 2039 << " base:" << Memory.BaseRegNum; 2040 OS << ">"; 2041 break; 2042 case k_PostIndexRegister: 2043 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 2044 << PostIdxReg.RegNum; 2045 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 2046 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 2047 << PostIdxReg.ShiftImm; 2048 OS << ">"; 2049 break; 2050 case k_ProcIFlags: { 2051 OS << "<ARM_PROC::"; 2052 unsigned IFlags = getProcIFlags(); 2053 for (int i=2; i >= 0; --i) 2054 if (IFlags & (1 << i)) 2055 OS << ARM_PROC::IFlagsToString(1 << i); 2056 OS << ">"; 2057 break; 2058 } 2059 case k_Register: 2060 OS << "<register " << getReg() << ">"; 2061 break; 2062 case k_ShifterImmediate: 2063 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2064 << " #" << ShifterImm.Imm << ">"; 2065 break; 2066 case k_ShiftedRegister: 2067 OS << "<so_reg_reg " 2068 << RegShiftedReg.SrcReg << " " 2069 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 2070 << " " << RegShiftedReg.ShiftReg << ">"; 2071 break; 2072 case k_ShiftedImmediate: 2073 OS << "<so_reg_imm " 2074 << RegShiftedImm.SrcReg << " " 2075 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 2076 << " #" << RegShiftedImm.ShiftImm << ">"; 2077 break; 2078 case k_RotateImmediate: 2079 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2080 break; 2081 case k_BitfieldDescriptor: 2082 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2083 << ", width: " << Bitfield.Width << ">"; 2084 break; 2085 case k_RegisterList: 2086 case k_DPRRegisterList: 2087 case k_SPRRegisterList: { 2088 OS << "<register_list "; 2089 2090 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2091 for (SmallVectorImpl<unsigned>::const_iterator 2092 I = RegList.begin(), E = RegList.end(); I != E; ) { 2093 OS << *I; 2094 if (++I < E) OS << ", "; 2095 } 2096 2097 OS << ">"; 2098 break; 2099 } 2100 case k_VectorList: 2101 OS << "<vector_list " << VectorList.Count << " * " 2102 << VectorList.RegNum << ">"; 2103 break; 2104 case k_VectorListAllLanes: 2105 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 2106 << VectorList.RegNum << ">"; 2107 break; 2108 case k_VectorListIndexed: 2109 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 2110 << VectorList.Count << " * " << VectorList.RegNum << ">"; 2111 break; 2112 case k_Token: 2113 OS << "'" << getToken() << "'"; 2114 break; 2115 case k_VectorIndex: 2116 OS << "<vectorindex " << getVectorIndex() << ">"; 2117 break; 2118 } 2119} 2120 2121/// @name Auto-generated Match Functions 2122/// { 2123 2124static unsigned MatchRegisterName(StringRef Name); 2125 2126/// } 2127 2128bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2129 SMLoc &StartLoc, SMLoc &EndLoc) { 2130 RegNo = tryParseRegister(); 2131 2132 return (RegNo == (unsigned)-1); 2133} 2134 2135/// Try to parse a register name. The token must be an Identifier when called, 2136/// and if it is a register name the token is eaten and the register number is 2137/// returned. Otherwise return -1. 2138/// 2139int ARMAsmParser::tryParseRegister() { 2140 const AsmToken &Tok = Parser.getTok(); 2141 if (Tok.isNot(AsmToken::Identifier)) return -1; 2142 2143 // FIXME: Validate register for the current architecture; we have to do 2144 // validation later, so maybe there is no need for this here. 2145 std::string lowerCase = Tok.getString().lower(); 2146 unsigned RegNum = MatchRegisterName(lowerCase); 2147 if (!RegNum) { 2148 RegNum = StringSwitch<unsigned>(lowerCase) 2149 .Case("r13", ARM::SP) 2150 .Case("r14", ARM::LR) 2151 .Case("r15", ARM::PC) 2152 .Case("ip", ARM::R12) 2153 .Default(0); 2154 } 2155 if (!RegNum) return -1; 2156 2157 Parser.Lex(); // Eat identifier token. 2158 2159 return RegNum; 2160} 2161 2162// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2163// If a recoverable error occurs, return 1. If an irrecoverable error 2164// occurs, return -1. An irrecoverable error is one where tokens have been 2165// consumed in the process of trying to parse the shifter (i.e., when it is 2166// indeed a shifter operand, but malformed). 2167int ARMAsmParser::tryParseShiftRegister( 2168 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2169 SMLoc S = Parser.getTok().getLoc(); 2170 const AsmToken &Tok = Parser.getTok(); 2171 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2172 2173 std::string lowerCase = Tok.getString().lower(); 2174 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2175 .Case("asl", ARM_AM::lsl) 2176 .Case("lsl", ARM_AM::lsl) 2177 .Case("lsr", ARM_AM::lsr) 2178 .Case("asr", ARM_AM::asr) 2179 .Case("ror", ARM_AM::ror) 2180 .Case("rrx", ARM_AM::rrx) 2181 .Default(ARM_AM::no_shift); 2182 2183 if (ShiftTy == ARM_AM::no_shift) 2184 return 1; 2185 2186 Parser.Lex(); // Eat the operator. 2187 2188 // The source register for the shift has already been added to the 2189 // operand list, so we need to pop it off and combine it into the shifted 2190 // register operand instead. 2191 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2192 if (!PrevOp->isReg()) 2193 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2194 int SrcReg = PrevOp->getReg(); 2195 int64_t Imm = 0; 2196 int ShiftReg = 0; 2197 if (ShiftTy == ARM_AM::rrx) { 2198 // RRX Doesn't have an explicit shift amount. The encoder expects 2199 // the shift register to be the same as the source register. Seems odd, 2200 // but OK. 2201 ShiftReg = SrcReg; 2202 } else { 2203 // Figure out if this is shifted by a constant or a register (for non-RRX). 2204 if (Parser.getTok().is(AsmToken::Hash)) { 2205 Parser.Lex(); // Eat hash. 2206 SMLoc ImmLoc = Parser.getTok().getLoc(); 2207 const MCExpr *ShiftExpr = 0; 2208 if (getParser().ParseExpression(ShiftExpr)) { 2209 Error(ImmLoc, "invalid immediate shift value"); 2210 return -1; 2211 } 2212 // The expression must be evaluatable as an immediate. 2213 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2214 if (!CE) { 2215 Error(ImmLoc, "invalid immediate shift value"); 2216 return -1; 2217 } 2218 // Range check the immediate. 2219 // lsl, ror: 0 <= imm <= 31 2220 // lsr, asr: 0 <= imm <= 32 2221 Imm = CE->getValue(); 2222 if (Imm < 0 || 2223 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2224 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2225 Error(ImmLoc, "immediate shift value out of range"); 2226 return -1; 2227 } 2228 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2229 ShiftReg = tryParseRegister(); 2230 SMLoc L = Parser.getTok().getLoc(); 2231 if (ShiftReg == -1) { 2232 Error (L, "expected immediate or register in shift operand"); 2233 return -1; 2234 } 2235 } else { 2236 Error (Parser.getTok().getLoc(), 2237 "expected immediate or register in shift operand"); 2238 return -1; 2239 } 2240 } 2241 2242 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2243 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2244 ShiftReg, Imm, 2245 S, Parser.getTok().getLoc())); 2246 else 2247 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2248 S, Parser.getTok().getLoc())); 2249 2250 return 0; 2251} 2252 2253 2254/// Try to parse a register name. The token must be an Identifier when called. 2255/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2256/// if there is a "writeback". 'true' if it's not a register. 2257/// 2258/// TODO this is likely to change to allow different register types and or to 2259/// parse for a specific register type. 2260bool ARMAsmParser:: 2261tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2262 SMLoc S = Parser.getTok().getLoc(); 2263 int RegNo = tryParseRegister(); 2264 if (RegNo == -1) 2265 return true; 2266 2267 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2268 2269 const AsmToken &ExclaimTok = Parser.getTok(); 2270 if (ExclaimTok.is(AsmToken::Exclaim)) { 2271 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2272 ExclaimTok.getLoc())); 2273 Parser.Lex(); // Eat exclaim token 2274 return false; 2275 } 2276 2277 // Also check for an index operand. This is only legal for vector registers, 2278 // but that'll get caught OK in operand matching, so we don't need to 2279 // explicitly filter everything else out here. 2280 if (Parser.getTok().is(AsmToken::LBrac)) { 2281 SMLoc SIdx = Parser.getTok().getLoc(); 2282 Parser.Lex(); // Eat left bracket token. 2283 2284 const MCExpr *ImmVal; 2285 if (getParser().ParseExpression(ImmVal)) 2286 return MatchOperand_ParseFail; 2287 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2288 if (!MCE) { 2289 TokError("immediate value expected for vector index"); 2290 return MatchOperand_ParseFail; 2291 } 2292 2293 SMLoc E = Parser.getTok().getLoc(); 2294 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2295 Error(E, "']' expected"); 2296 return MatchOperand_ParseFail; 2297 } 2298 2299 Parser.Lex(); // Eat right bracket token. 2300 2301 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2302 SIdx, E, 2303 getContext())); 2304 } 2305 2306 return false; 2307} 2308 2309/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2310/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2311/// "c5", ... 2312static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2313 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2314 // but efficient. 2315 switch (Name.size()) { 2316 default: break; 2317 case 2: 2318 if (Name[0] != CoprocOp) 2319 return -1; 2320 switch (Name[1]) { 2321 default: return -1; 2322 case '0': return 0; 2323 case '1': return 1; 2324 case '2': return 2; 2325 case '3': return 3; 2326 case '4': return 4; 2327 case '5': return 5; 2328 case '6': return 6; 2329 case '7': return 7; 2330 case '8': return 8; 2331 case '9': return 9; 2332 } 2333 break; 2334 case 3: 2335 if (Name[0] != CoprocOp || Name[1] != '1') 2336 return -1; 2337 switch (Name[2]) { 2338 default: return -1; 2339 case '0': return 10; 2340 case '1': return 11; 2341 case '2': return 12; 2342 case '3': return 13; 2343 case '4': return 14; 2344 case '5': return 15; 2345 } 2346 break; 2347 } 2348 2349 return -1; 2350} 2351 2352/// parseITCondCode - Try to parse a condition code for an IT instruction. 2353ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2354parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2355 SMLoc S = Parser.getTok().getLoc(); 2356 const AsmToken &Tok = Parser.getTok(); 2357 if (!Tok.is(AsmToken::Identifier)) 2358 return MatchOperand_NoMatch; 2359 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2360 .Case("eq", ARMCC::EQ) 2361 .Case("ne", ARMCC::NE) 2362 .Case("hs", ARMCC::HS) 2363 .Case("cs", ARMCC::HS) 2364 .Case("lo", ARMCC::LO) 2365 .Case("cc", ARMCC::LO) 2366 .Case("mi", ARMCC::MI) 2367 .Case("pl", ARMCC::PL) 2368 .Case("vs", ARMCC::VS) 2369 .Case("vc", ARMCC::VC) 2370 .Case("hi", ARMCC::HI) 2371 .Case("ls", ARMCC::LS) 2372 .Case("ge", ARMCC::GE) 2373 .Case("lt", ARMCC::LT) 2374 .Case("gt", ARMCC::GT) 2375 .Case("le", ARMCC::LE) 2376 .Case("al", ARMCC::AL) 2377 .Default(~0U); 2378 if (CC == ~0U) 2379 return MatchOperand_NoMatch; 2380 Parser.Lex(); // Eat the token. 2381 2382 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2383 2384 return MatchOperand_Success; 2385} 2386 2387/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2388/// token must be an Identifier when called, and if it is a coprocessor 2389/// number, the token is eaten and the operand is added to the operand list. 2390ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2391parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2392 SMLoc S = Parser.getTok().getLoc(); 2393 const AsmToken &Tok = Parser.getTok(); 2394 if (Tok.isNot(AsmToken::Identifier)) 2395 return MatchOperand_NoMatch; 2396 2397 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2398 if (Num == -1) 2399 return MatchOperand_NoMatch; 2400 2401 Parser.Lex(); // Eat identifier token. 2402 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2403 return MatchOperand_Success; 2404} 2405 2406/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2407/// token must be an Identifier when called, and if it is a coprocessor 2408/// number, the token is eaten and the operand is added to the operand list. 2409ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2410parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2411 SMLoc S = Parser.getTok().getLoc(); 2412 const AsmToken &Tok = Parser.getTok(); 2413 if (Tok.isNot(AsmToken::Identifier)) 2414 return MatchOperand_NoMatch; 2415 2416 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2417 if (Reg == -1) 2418 return MatchOperand_NoMatch; 2419 2420 Parser.Lex(); // Eat identifier token. 2421 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2422 return MatchOperand_Success; 2423} 2424 2425/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2426/// coproc_option : '{' imm0_255 '}' 2427ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2428parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2429 SMLoc S = Parser.getTok().getLoc(); 2430 2431 // If this isn't a '{', this isn't a coprocessor immediate operand. 2432 if (Parser.getTok().isNot(AsmToken::LCurly)) 2433 return MatchOperand_NoMatch; 2434 Parser.Lex(); // Eat the '{' 2435 2436 const MCExpr *Expr; 2437 SMLoc Loc = Parser.getTok().getLoc(); 2438 if (getParser().ParseExpression(Expr)) { 2439 Error(Loc, "illegal expression"); 2440 return MatchOperand_ParseFail; 2441 } 2442 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2443 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2444 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2445 return MatchOperand_ParseFail; 2446 } 2447 int Val = CE->getValue(); 2448 2449 // Check for and consume the closing '}' 2450 if (Parser.getTok().isNot(AsmToken::RCurly)) 2451 return MatchOperand_ParseFail; 2452 SMLoc E = Parser.getTok().getLoc(); 2453 Parser.Lex(); // Eat the '}' 2454 2455 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2456 return MatchOperand_Success; 2457} 2458 2459// For register list parsing, we need to map from raw GPR register numbering 2460// to the enumeration values. The enumeration values aren't sorted by 2461// register number due to our using "sp", "lr" and "pc" as canonical names. 2462static unsigned getNextRegister(unsigned Reg) { 2463 // If this is a GPR, we need to do it manually, otherwise we can rely 2464 // on the sort ordering of the enumeration since the other reg-classes 2465 // are sane. 2466 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2467 return Reg + 1; 2468 switch(Reg) { 2469 default: assert(0 && "Invalid GPR number!"); 2470 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2471 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2472 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2473 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2474 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2475 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2476 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2477 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2478 } 2479} 2480 2481// Return the low-subreg of a given Q register. 2482static unsigned getDRegFromQReg(unsigned QReg) { 2483 switch (QReg) { 2484 default: llvm_unreachable("expected a Q register!"); 2485 case ARM::Q0: return ARM::D0; 2486 case ARM::Q1: return ARM::D2; 2487 case ARM::Q2: return ARM::D4; 2488 case ARM::Q3: return ARM::D6; 2489 case ARM::Q4: return ARM::D8; 2490 case ARM::Q5: return ARM::D10; 2491 case ARM::Q6: return ARM::D12; 2492 case ARM::Q7: return ARM::D14; 2493 case ARM::Q8: return ARM::D16; 2494 case ARM::Q9: return ARM::D18; 2495 case ARM::Q10: return ARM::D20; 2496 case ARM::Q11: return ARM::D22; 2497 case ARM::Q12: return ARM::D24; 2498 case ARM::Q13: return ARM::D26; 2499 case ARM::Q14: return ARM::D28; 2500 case ARM::Q15: return ARM::D30; 2501 } 2502} 2503 2504/// Parse a register list. 2505bool ARMAsmParser:: 2506parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2507 assert(Parser.getTok().is(AsmToken::LCurly) && 2508 "Token is not a Left Curly Brace"); 2509 SMLoc S = Parser.getTok().getLoc(); 2510 Parser.Lex(); // Eat '{' token. 2511 SMLoc RegLoc = Parser.getTok().getLoc(); 2512 2513 // Check the first register in the list to see what register class 2514 // this is a list of. 2515 int Reg = tryParseRegister(); 2516 if (Reg == -1) 2517 return Error(RegLoc, "register expected"); 2518 2519 // The reglist instructions have at most 16 registers, so reserve 2520 // space for that many. 2521 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2522 2523 // Allow Q regs and just interpret them as the two D sub-registers. 2524 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2525 Reg = getDRegFromQReg(Reg); 2526 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2527 ++Reg; 2528 } 2529 const MCRegisterClass *RC; 2530 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2531 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2532 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2533 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2534 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2535 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2536 else 2537 return Error(RegLoc, "invalid register in register list"); 2538 2539 // Store the register. 2540 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2541 2542 // This starts immediately after the first register token in the list, 2543 // so we can see either a comma or a minus (range separator) as a legal 2544 // next token. 2545 while (Parser.getTok().is(AsmToken::Comma) || 2546 Parser.getTok().is(AsmToken::Minus)) { 2547 if (Parser.getTok().is(AsmToken::Minus)) { 2548 Parser.Lex(); // Eat the minus. 2549 SMLoc EndLoc = Parser.getTok().getLoc(); 2550 int EndReg = tryParseRegister(); 2551 if (EndReg == -1) 2552 return Error(EndLoc, "register expected"); 2553 // Allow Q regs and just interpret them as the two D sub-registers. 2554 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2555 EndReg = getDRegFromQReg(EndReg) + 1; 2556 // If the register is the same as the start reg, there's nothing 2557 // more to do. 2558 if (Reg == EndReg) 2559 continue; 2560 // The register must be in the same register class as the first. 2561 if (!RC->contains(EndReg)) 2562 return Error(EndLoc, "invalid register in register list"); 2563 // Ranges must go from low to high. 2564 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2565 return Error(EndLoc, "bad range in register list"); 2566 2567 // Add all the registers in the range to the register list. 2568 while (Reg != EndReg) { 2569 Reg = getNextRegister(Reg); 2570 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2571 } 2572 continue; 2573 } 2574 Parser.Lex(); // Eat the comma. 2575 RegLoc = Parser.getTok().getLoc(); 2576 int OldReg = Reg; 2577 Reg = tryParseRegister(); 2578 if (Reg == -1) 2579 return Error(RegLoc, "register expected"); 2580 // Allow Q regs and just interpret them as the two D sub-registers. 2581 bool isQReg = false; 2582 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2583 Reg = getDRegFromQReg(Reg); 2584 isQReg = true; 2585 } 2586 // The register must be in the same register class as the first. 2587 if (!RC->contains(Reg)) 2588 return Error(RegLoc, "invalid register in register list"); 2589 // List must be monotonically increasing. 2590 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2591 return Error(RegLoc, "register list not in ascending order"); 2592 // VFP register lists must also be contiguous. 2593 // It's OK to use the enumeration values directly here rather, as the 2594 // VFP register classes have the enum sorted properly. 2595 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2596 Reg != OldReg + 1) 2597 return Error(RegLoc, "non-contiguous register range"); 2598 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2599 if (isQReg) 2600 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2601 } 2602 2603 SMLoc E = Parser.getTok().getLoc(); 2604 if (Parser.getTok().isNot(AsmToken::RCurly)) 2605 return Error(E, "'}' expected"); 2606 Parser.Lex(); // Eat '}' token. 2607 2608 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2609 return false; 2610} 2611 2612// Helper function to parse the lane index for vector lists. 2613ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2614parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) { 2615 Index = 0; // Always return a defined index value. 2616 if (Parser.getTok().is(AsmToken::LBrac)) { 2617 Parser.Lex(); // Eat the '['. 2618 if (Parser.getTok().is(AsmToken::RBrac)) { 2619 // "Dn[]" is the 'all lanes' syntax. 2620 LaneKind = AllLanes; 2621 Parser.Lex(); // Eat the ']'. 2622 return MatchOperand_Success; 2623 } 2624 if (Parser.getTok().is(AsmToken::Integer)) { 2625 int64_t Val = Parser.getTok().getIntVal(); 2626 // Make this range check context sensitive for .8, .16, .32. 2627 if (Val < 0 && Val > 7) 2628 Error(Parser.getTok().getLoc(), "lane index out of range"); 2629 Index = Val; 2630 LaneKind = IndexedLane; 2631 Parser.Lex(); // Eat the token; 2632 if (Parser.getTok().isNot(AsmToken::RBrac)) 2633 Error(Parser.getTok().getLoc(), "']' expected"); 2634 Parser.Lex(); // Eat the ']'. 2635 return MatchOperand_Success; 2636 } 2637 Error(Parser.getTok().getLoc(), "lane index must be empty or an integer"); 2638 return MatchOperand_ParseFail; 2639 } 2640 LaneKind = NoLanes; 2641 return MatchOperand_Success; 2642} 2643 2644// parse a vector register list 2645ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2646parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2647 VectorLaneTy LaneKind; 2648 unsigned LaneIndex; 2649 SMLoc S = Parser.getTok().getLoc(); 2650 // As an extension (to match gas), support a plain D register or Q register 2651 // (without encosing curly braces) as a single or double entry list, 2652 // respectively. 2653 if (Parser.getTok().is(AsmToken::Identifier)) { 2654 int Reg = tryParseRegister(); 2655 if (Reg == -1) 2656 return MatchOperand_NoMatch; 2657 SMLoc E = Parser.getTok().getLoc(); 2658 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 2659 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2660 if (Res != MatchOperand_Success) 2661 return Res; 2662 switch (LaneKind) { 2663 default: 2664 assert(0 && "unexpected lane kind!"); 2665 case NoLanes: 2666 E = Parser.getTok().getLoc(); 2667 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E)); 2668 break; 2669 case AllLanes: 2670 E = Parser.getTok().getLoc(); 2671 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E)); 2672 break; 2673 case IndexedLane: 2674 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 2675 LaneIndex, S,E)); 2676 break; 2677 } 2678 return MatchOperand_Success; 2679 } 2680 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2681 Reg = getDRegFromQReg(Reg); 2682 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2683 if (Res != MatchOperand_Success) 2684 return Res; 2685 switch (LaneKind) { 2686 default: 2687 assert(0 && "unexpected lane kind!"); 2688 case NoLanes: 2689 E = Parser.getTok().getLoc(); 2690 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E)); 2691 break; 2692 case AllLanes: 2693 E = Parser.getTok().getLoc(); 2694 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E)); 2695 break; 2696 case IndexedLane: 2697 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 2698 LaneIndex, S,E)); 2699 break; 2700 } 2701 return MatchOperand_Success; 2702 } 2703 Error(S, "vector register expected"); 2704 return MatchOperand_ParseFail; 2705 } 2706 2707 if (Parser.getTok().isNot(AsmToken::LCurly)) 2708 return MatchOperand_NoMatch; 2709 2710 Parser.Lex(); // Eat '{' token. 2711 SMLoc RegLoc = Parser.getTok().getLoc(); 2712 2713 int Reg = tryParseRegister(); 2714 if (Reg == -1) { 2715 Error(RegLoc, "register expected"); 2716 return MatchOperand_ParseFail; 2717 } 2718 unsigned Count = 1; 2719 unsigned FirstReg = Reg; 2720 // The list is of D registers, but we also allow Q regs and just interpret 2721 // them as the two D sub-registers. 2722 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2723 FirstReg = Reg = getDRegFromQReg(Reg); 2724 ++Reg; 2725 ++Count; 2726 } 2727 if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success) 2728 return MatchOperand_ParseFail; 2729 2730 while (Parser.getTok().is(AsmToken::Comma) || 2731 Parser.getTok().is(AsmToken::Minus)) { 2732 if (Parser.getTok().is(AsmToken::Minus)) { 2733 Parser.Lex(); // Eat the minus. 2734 SMLoc EndLoc = Parser.getTok().getLoc(); 2735 int EndReg = tryParseRegister(); 2736 if (EndReg == -1) { 2737 Error(EndLoc, "register expected"); 2738 return MatchOperand_ParseFail; 2739 } 2740 // Allow Q regs and just interpret them as the two D sub-registers. 2741 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2742 EndReg = getDRegFromQReg(EndReg) + 1; 2743 // If the register is the same as the start reg, there's nothing 2744 // more to do. 2745 if (Reg == EndReg) 2746 continue; 2747 // The register must be in the same register class as the first. 2748 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 2749 Error(EndLoc, "invalid register in register list"); 2750 return MatchOperand_ParseFail; 2751 } 2752 // Ranges must go from low to high. 2753 if (Reg > EndReg) { 2754 Error(EndLoc, "bad range in register list"); 2755 return MatchOperand_ParseFail; 2756 } 2757 // Parse the lane specifier if present. 2758 VectorLaneTy NextLaneKind; 2759 unsigned NextLaneIndex; 2760 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2761 return MatchOperand_ParseFail; 2762 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2763 Error(EndLoc, "mismatched lane index in register list"); 2764 return MatchOperand_ParseFail; 2765 } 2766 EndLoc = Parser.getTok().getLoc(); 2767 2768 // Add all the registers in the range to the register list. 2769 Count += EndReg - Reg; 2770 Reg = EndReg; 2771 continue; 2772 } 2773 Parser.Lex(); // Eat the comma. 2774 RegLoc = Parser.getTok().getLoc(); 2775 int OldReg = Reg; 2776 Reg = tryParseRegister(); 2777 if (Reg == -1) { 2778 Error(RegLoc, "register expected"); 2779 return MatchOperand_ParseFail; 2780 } 2781 // vector register lists must be contiguous. 2782 // It's OK to use the enumeration values directly here rather, as the 2783 // VFP register classes have the enum sorted properly. 2784 // 2785 // The list is of D registers, but we also allow Q regs and just interpret 2786 // them as the two D sub-registers. 2787 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2788 Reg = getDRegFromQReg(Reg); 2789 if (Reg != OldReg + 1) { 2790 Error(RegLoc, "non-contiguous register range"); 2791 return MatchOperand_ParseFail; 2792 } 2793 ++Reg; 2794 Count += 2; 2795 // Parse the lane specifier if present. 2796 VectorLaneTy NextLaneKind; 2797 unsigned NextLaneIndex; 2798 SMLoc EndLoc = Parser.getTok().getLoc(); 2799 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2800 return MatchOperand_ParseFail; 2801 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2802 Error(EndLoc, "mismatched lane index in register list"); 2803 return MatchOperand_ParseFail; 2804 } 2805 continue; 2806 } 2807 // Normal D register. Just check that it's contiguous and keep going. 2808 if (Reg != OldReg + 1) { 2809 Error(RegLoc, "non-contiguous register range"); 2810 return MatchOperand_ParseFail; 2811 } 2812 ++Count; 2813 // Parse the lane specifier if present. 2814 VectorLaneTy NextLaneKind; 2815 unsigned NextLaneIndex; 2816 SMLoc EndLoc = Parser.getTok().getLoc(); 2817 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2818 return MatchOperand_ParseFail; 2819 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2820 Error(EndLoc, "mismatched lane index in register list"); 2821 return MatchOperand_ParseFail; 2822 } 2823 } 2824 2825 SMLoc E = Parser.getTok().getLoc(); 2826 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2827 Error(E, "'}' expected"); 2828 return MatchOperand_ParseFail; 2829 } 2830 Parser.Lex(); // Eat '}' token. 2831 2832 switch (LaneKind) { 2833 default: 2834 assert(0 && "unexpected lane kind in register list."); 2835 case NoLanes: 2836 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2837 break; 2838 case AllLanes: 2839 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 2840 S, E)); 2841 break; 2842 case IndexedLane: 2843 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 2844 LaneIndex, S, E)); 2845 break; 2846 } 2847 return MatchOperand_Success; 2848} 2849 2850/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2851ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2852parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2853 SMLoc S = Parser.getTok().getLoc(); 2854 const AsmToken &Tok = Parser.getTok(); 2855 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2856 StringRef OptStr = Tok.getString(); 2857 2858 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2859 .Case("sy", ARM_MB::SY) 2860 .Case("st", ARM_MB::ST) 2861 .Case("sh", ARM_MB::ISH) 2862 .Case("ish", ARM_MB::ISH) 2863 .Case("shst", ARM_MB::ISHST) 2864 .Case("ishst", ARM_MB::ISHST) 2865 .Case("nsh", ARM_MB::NSH) 2866 .Case("un", ARM_MB::NSH) 2867 .Case("nshst", ARM_MB::NSHST) 2868 .Case("unst", ARM_MB::NSHST) 2869 .Case("osh", ARM_MB::OSH) 2870 .Case("oshst", ARM_MB::OSHST) 2871 .Default(~0U); 2872 2873 if (Opt == ~0U) 2874 return MatchOperand_NoMatch; 2875 2876 Parser.Lex(); // Eat identifier token. 2877 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2878 return MatchOperand_Success; 2879} 2880 2881/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2882ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2883parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2884 SMLoc S = Parser.getTok().getLoc(); 2885 const AsmToken &Tok = Parser.getTok(); 2886 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2887 StringRef IFlagsStr = Tok.getString(); 2888 2889 // An iflags string of "none" is interpreted to mean that none of the AIF 2890 // bits are set. Not a terribly useful instruction, but a valid encoding. 2891 unsigned IFlags = 0; 2892 if (IFlagsStr != "none") { 2893 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2894 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2895 .Case("a", ARM_PROC::A) 2896 .Case("i", ARM_PROC::I) 2897 .Case("f", ARM_PROC::F) 2898 .Default(~0U); 2899 2900 // If some specific iflag is already set, it means that some letter is 2901 // present more than once, this is not acceptable. 2902 if (Flag == ~0U || (IFlags & Flag)) 2903 return MatchOperand_NoMatch; 2904 2905 IFlags |= Flag; 2906 } 2907 } 2908 2909 Parser.Lex(); // Eat identifier token. 2910 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2911 return MatchOperand_Success; 2912} 2913 2914/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2915ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2916parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2917 SMLoc S = Parser.getTok().getLoc(); 2918 const AsmToken &Tok = Parser.getTok(); 2919 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2920 StringRef Mask = Tok.getString(); 2921 2922 if (isMClass()) { 2923 // See ARMv6-M 10.1.1 2924 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2925 .Case("apsr", 0) 2926 .Case("iapsr", 1) 2927 .Case("eapsr", 2) 2928 .Case("xpsr", 3) 2929 .Case("ipsr", 5) 2930 .Case("epsr", 6) 2931 .Case("iepsr", 7) 2932 .Case("msp", 8) 2933 .Case("psp", 9) 2934 .Case("primask", 16) 2935 .Case("basepri", 17) 2936 .Case("basepri_max", 18) 2937 .Case("faultmask", 19) 2938 .Case("control", 20) 2939 .Default(~0U); 2940 2941 if (FlagsVal == ~0U) 2942 return MatchOperand_NoMatch; 2943 2944 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2945 // basepri, basepri_max and faultmask only valid for V7m. 2946 return MatchOperand_NoMatch; 2947 2948 Parser.Lex(); // Eat identifier token. 2949 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2950 return MatchOperand_Success; 2951 } 2952 2953 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2954 size_t Start = 0, Next = Mask.find('_'); 2955 StringRef Flags = ""; 2956 std::string SpecReg = Mask.slice(Start, Next).lower(); 2957 if (Next != StringRef::npos) 2958 Flags = Mask.slice(Next+1, Mask.size()); 2959 2960 // FlagsVal contains the complete mask: 2961 // 3-0: Mask 2962 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2963 unsigned FlagsVal = 0; 2964 2965 if (SpecReg == "apsr") { 2966 FlagsVal = StringSwitch<unsigned>(Flags) 2967 .Case("nzcvq", 0x8) // same as CPSR_f 2968 .Case("g", 0x4) // same as CPSR_s 2969 .Case("nzcvqg", 0xc) // same as CPSR_fs 2970 .Default(~0U); 2971 2972 if (FlagsVal == ~0U) { 2973 if (!Flags.empty()) 2974 return MatchOperand_NoMatch; 2975 else 2976 FlagsVal = 8; // No flag 2977 } 2978 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2979 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2980 Flags = "fc"; 2981 for (int i = 0, e = Flags.size(); i != e; ++i) { 2982 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2983 .Case("c", 1) 2984 .Case("x", 2) 2985 .Case("s", 4) 2986 .Case("f", 8) 2987 .Default(~0U); 2988 2989 // If some specific flag is already set, it means that some letter is 2990 // present more than once, this is not acceptable. 2991 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2992 return MatchOperand_NoMatch; 2993 FlagsVal |= Flag; 2994 } 2995 } else // No match for special register. 2996 return MatchOperand_NoMatch; 2997 2998 // Special register without flags is NOT equivalent to "fc" flags. 2999 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 3000 // two lines would enable gas compatibility at the expense of breaking 3001 // round-tripping. 3002 // 3003 // if (!FlagsVal) 3004 // FlagsVal = 0x9; 3005 3006 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3007 if (SpecReg == "spsr") 3008 FlagsVal |= 16; 3009 3010 Parser.Lex(); // Eat identifier token. 3011 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3012 return MatchOperand_Success; 3013} 3014 3015ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3016parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 3017 int Low, int High) { 3018 const AsmToken &Tok = Parser.getTok(); 3019 if (Tok.isNot(AsmToken::Identifier)) { 3020 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3021 return MatchOperand_ParseFail; 3022 } 3023 StringRef ShiftName = Tok.getString(); 3024 std::string LowerOp = Op.lower(); 3025 std::string UpperOp = Op.upper(); 3026 if (ShiftName != LowerOp && ShiftName != UpperOp) { 3027 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3028 return MatchOperand_ParseFail; 3029 } 3030 Parser.Lex(); // Eat shift type token. 3031 3032 // There must be a '#' and a shift amount. 3033 if (Parser.getTok().isNot(AsmToken::Hash)) { 3034 Error(Parser.getTok().getLoc(), "'#' expected"); 3035 return MatchOperand_ParseFail; 3036 } 3037 Parser.Lex(); // Eat hash token. 3038 3039 const MCExpr *ShiftAmount; 3040 SMLoc Loc = Parser.getTok().getLoc(); 3041 if (getParser().ParseExpression(ShiftAmount)) { 3042 Error(Loc, "illegal expression"); 3043 return MatchOperand_ParseFail; 3044 } 3045 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3046 if (!CE) { 3047 Error(Loc, "constant expression expected"); 3048 return MatchOperand_ParseFail; 3049 } 3050 int Val = CE->getValue(); 3051 if (Val < Low || Val > High) { 3052 Error(Loc, "immediate value out of range"); 3053 return MatchOperand_ParseFail; 3054 } 3055 3056 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 3057 3058 return MatchOperand_Success; 3059} 3060 3061ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3062parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3063 const AsmToken &Tok = Parser.getTok(); 3064 SMLoc S = Tok.getLoc(); 3065 if (Tok.isNot(AsmToken::Identifier)) { 3066 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3067 return MatchOperand_ParseFail; 3068 } 3069 int Val = StringSwitch<int>(Tok.getString()) 3070 .Case("be", 1) 3071 .Case("le", 0) 3072 .Default(-1); 3073 Parser.Lex(); // Eat the token. 3074 3075 if (Val == -1) { 3076 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3077 return MatchOperand_ParseFail; 3078 } 3079 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 3080 getContext()), 3081 S, Parser.getTok().getLoc())); 3082 return MatchOperand_Success; 3083} 3084 3085/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 3086/// instructions. Legal values are: 3087/// lsl #n 'n' in [0,31] 3088/// asr #n 'n' in [1,32] 3089/// n == 32 encoded as n == 0. 3090ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3091parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3092 const AsmToken &Tok = Parser.getTok(); 3093 SMLoc S = Tok.getLoc(); 3094 if (Tok.isNot(AsmToken::Identifier)) { 3095 Error(S, "shift operator 'asr' or 'lsl' expected"); 3096 return MatchOperand_ParseFail; 3097 } 3098 StringRef ShiftName = Tok.getString(); 3099 bool isASR; 3100 if (ShiftName == "lsl" || ShiftName == "LSL") 3101 isASR = false; 3102 else if (ShiftName == "asr" || ShiftName == "ASR") 3103 isASR = true; 3104 else { 3105 Error(S, "shift operator 'asr' or 'lsl' expected"); 3106 return MatchOperand_ParseFail; 3107 } 3108 Parser.Lex(); // Eat the operator. 3109 3110 // A '#' and a shift amount. 3111 if (Parser.getTok().isNot(AsmToken::Hash)) { 3112 Error(Parser.getTok().getLoc(), "'#' expected"); 3113 return MatchOperand_ParseFail; 3114 } 3115 Parser.Lex(); // Eat hash token. 3116 3117 const MCExpr *ShiftAmount; 3118 SMLoc E = Parser.getTok().getLoc(); 3119 if (getParser().ParseExpression(ShiftAmount)) { 3120 Error(E, "malformed shift expression"); 3121 return MatchOperand_ParseFail; 3122 } 3123 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3124 if (!CE) { 3125 Error(E, "shift amount must be an immediate"); 3126 return MatchOperand_ParseFail; 3127 } 3128 3129 int64_t Val = CE->getValue(); 3130 if (isASR) { 3131 // Shift amount must be in [1,32] 3132 if (Val < 1 || Val > 32) { 3133 Error(E, "'asr' shift amount must be in range [1,32]"); 3134 return MatchOperand_ParseFail; 3135 } 3136 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 3137 if (isThumb() && Val == 32) { 3138 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 3139 return MatchOperand_ParseFail; 3140 } 3141 if (Val == 32) Val = 0; 3142 } else { 3143 // Shift amount must be in [1,32] 3144 if (Val < 0 || Val > 31) { 3145 Error(E, "'lsr' shift amount must be in range [0,31]"); 3146 return MatchOperand_ParseFail; 3147 } 3148 } 3149 3150 E = Parser.getTok().getLoc(); 3151 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 3152 3153 return MatchOperand_Success; 3154} 3155 3156/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 3157/// of instructions. Legal values are: 3158/// ror #n 'n' in {0, 8, 16, 24} 3159ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3160parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3161 const AsmToken &Tok = Parser.getTok(); 3162 SMLoc S = Tok.getLoc(); 3163 if (Tok.isNot(AsmToken::Identifier)) 3164 return MatchOperand_NoMatch; 3165 StringRef ShiftName = Tok.getString(); 3166 if (ShiftName != "ror" && ShiftName != "ROR") 3167 return MatchOperand_NoMatch; 3168 Parser.Lex(); // Eat the operator. 3169 3170 // A '#' and a rotate amount. 3171 if (Parser.getTok().isNot(AsmToken::Hash)) { 3172 Error(Parser.getTok().getLoc(), "'#' expected"); 3173 return MatchOperand_ParseFail; 3174 } 3175 Parser.Lex(); // Eat hash token. 3176 3177 const MCExpr *ShiftAmount; 3178 SMLoc E = Parser.getTok().getLoc(); 3179 if (getParser().ParseExpression(ShiftAmount)) { 3180 Error(E, "malformed rotate expression"); 3181 return MatchOperand_ParseFail; 3182 } 3183 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3184 if (!CE) { 3185 Error(E, "rotate amount must be an immediate"); 3186 return MatchOperand_ParseFail; 3187 } 3188 3189 int64_t Val = CE->getValue(); 3190 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 3191 // normally, zero is represented in asm by omitting the rotate operand 3192 // entirely. 3193 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 3194 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 3195 return MatchOperand_ParseFail; 3196 } 3197 3198 E = Parser.getTok().getLoc(); 3199 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 3200 3201 return MatchOperand_Success; 3202} 3203 3204ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3205parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3206 SMLoc S = Parser.getTok().getLoc(); 3207 // The bitfield descriptor is really two operands, the LSB and the width. 3208 if (Parser.getTok().isNot(AsmToken::Hash)) { 3209 Error(Parser.getTok().getLoc(), "'#' expected"); 3210 return MatchOperand_ParseFail; 3211 } 3212 Parser.Lex(); // Eat hash token. 3213 3214 const MCExpr *LSBExpr; 3215 SMLoc E = Parser.getTok().getLoc(); 3216 if (getParser().ParseExpression(LSBExpr)) { 3217 Error(E, "malformed immediate expression"); 3218 return MatchOperand_ParseFail; 3219 } 3220 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 3221 if (!CE) { 3222 Error(E, "'lsb' operand must be an immediate"); 3223 return MatchOperand_ParseFail; 3224 } 3225 3226 int64_t LSB = CE->getValue(); 3227 // The LSB must be in the range [0,31] 3228 if (LSB < 0 || LSB > 31) { 3229 Error(E, "'lsb' operand must be in the range [0,31]"); 3230 return MatchOperand_ParseFail; 3231 } 3232 E = Parser.getTok().getLoc(); 3233 3234 // Expect another immediate operand. 3235 if (Parser.getTok().isNot(AsmToken::Comma)) { 3236 Error(Parser.getTok().getLoc(), "too few operands"); 3237 return MatchOperand_ParseFail; 3238 } 3239 Parser.Lex(); // Eat hash token. 3240 if (Parser.getTok().isNot(AsmToken::Hash)) { 3241 Error(Parser.getTok().getLoc(), "'#' expected"); 3242 return MatchOperand_ParseFail; 3243 } 3244 Parser.Lex(); // Eat hash token. 3245 3246 const MCExpr *WidthExpr; 3247 if (getParser().ParseExpression(WidthExpr)) { 3248 Error(E, "malformed immediate expression"); 3249 return MatchOperand_ParseFail; 3250 } 3251 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3252 if (!CE) { 3253 Error(E, "'width' operand must be an immediate"); 3254 return MatchOperand_ParseFail; 3255 } 3256 3257 int64_t Width = CE->getValue(); 3258 // The LSB must be in the range [1,32-lsb] 3259 if (Width < 1 || Width > 32 - LSB) { 3260 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3261 return MatchOperand_ParseFail; 3262 } 3263 E = Parser.getTok().getLoc(); 3264 3265 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3266 3267 return MatchOperand_Success; 3268} 3269 3270ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3271parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3272 // Check for a post-index addressing register operand. Specifically: 3273 // postidx_reg := '+' register {, shift} 3274 // | '-' register {, shift} 3275 // | register {, shift} 3276 3277 // This method must return MatchOperand_NoMatch without consuming any tokens 3278 // in the case where there is no match, as other alternatives take other 3279 // parse methods. 3280 AsmToken Tok = Parser.getTok(); 3281 SMLoc S = Tok.getLoc(); 3282 bool haveEaten = false; 3283 bool isAdd = true; 3284 int Reg = -1; 3285 if (Tok.is(AsmToken::Plus)) { 3286 Parser.Lex(); // Eat the '+' token. 3287 haveEaten = true; 3288 } else if (Tok.is(AsmToken::Minus)) { 3289 Parser.Lex(); // Eat the '-' token. 3290 isAdd = false; 3291 haveEaten = true; 3292 } 3293 if (Parser.getTok().is(AsmToken::Identifier)) 3294 Reg = tryParseRegister(); 3295 if (Reg == -1) { 3296 if (!haveEaten) 3297 return MatchOperand_NoMatch; 3298 Error(Parser.getTok().getLoc(), "register expected"); 3299 return MatchOperand_ParseFail; 3300 } 3301 SMLoc E = Parser.getTok().getLoc(); 3302 3303 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3304 unsigned ShiftImm = 0; 3305 if (Parser.getTok().is(AsmToken::Comma)) { 3306 Parser.Lex(); // Eat the ','. 3307 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3308 return MatchOperand_ParseFail; 3309 } 3310 3311 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3312 ShiftImm, S, E)); 3313 3314 return MatchOperand_Success; 3315} 3316 3317ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3318parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3319 // Check for a post-index addressing register operand. Specifically: 3320 // am3offset := '+' register 3321 // | '-' register 3322 // | register 3323 // | # imm 3324 // | # + imm 3325 // | # - imm 3326 3327 // This method must return MatchOperand_NoMatch without consuming any tokens 3328 // in the case where there is no match, as other alternatives take other 3329 // parse methods. 3330 AsmToken Tok = Parser.getTok(); 3331 SMLoc S = Tok.getLoc(); 3332 3333 // Do immediates first, as we always parse those if we have a '#'. 3334 if (Parser.getTok().is(AsmToken::Hash)) { 3335 Parser.Lex(); // Eat the '#'. 3336 // Explicitly look for a '-', as we need to encode negative zero 3337 // differently. 3338 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3339 const MCExpr *Offset; 3340 if (getParser().ParseExpression(Offset)) 3341 return MatchOperand_ParseFail; 3342 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3343 if (!CE) { 3344 Error(S, "constant expression expected"); 3345 return MatchOperand_ParseFail; 3346 } 3347 SMLoc E = Tok.getLoc(); 3348 // Negative zero is encoded as the flag value INT32_MIN. 3349 int32_t Val = CE->getValue(); 3350 if (isNegative && Val == 0) 3351 Val = INT32_MIN; 3352 3353 Operands.push_back( 3354 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3355 3356 return MatchOperand_Success; 3357 } 3358 3359 3360 bool haveEaten = false; 3361 bool isAdd = true; 3362 int Reg = -1; 3363 if (Tok.is(AsmToken::Plus)) { 3364 Parser.Lex(); // Eat the '+' token. 3365 haveEaten = true; 3366 } else if (Tok.is(AsmToken::Minus)) { 3367 Parser.Lex(); // Eat the '-' token. 3368 isAdd = false; 3369 haveEaten = true; 3370 } 3371 if (Parser.getTok().is(AsmToken::Identifier)) 3372 Reg = tryParseRegister(); 3373 if (Reg == -1) { 3374 if (!haveEaten) 3375 return MatchOperand_NoMatch; 3376 Error(Parser.getTok().getLoc(), "register expected"); 3377 return MatchOperand_ParseFail; 3378 } 3379 SMLoc E = Parser.getTok().getLoc(); 3380 3381 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3382 0, S, E)); 3383 3384 return MatchOperand_Success; 3385} 3386 3387/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3388/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3389/// when they refer multiple MIOperands inside a single one. 3390bool ARMAsmParser:: 3391cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3392 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3393 // Rt, Rt2 3394 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3395 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3396 // Create a writeback register dummy placeholder. 3397 Inst.addOperand(MCOperand::CreateReg(0)); 3398 // addr 3399 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3400 // pred 3401 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3402 return true; 3403} 3404 3405/// cvtT2StrdPre - Convert parsed operands to MCInst. 3406/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3407/// when they refer multiple MIOperands inside a single one. 3408bool ARMAsmParser:: 3409cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3410 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3411 // Create a writeback register dummy placeholder. 3412 Inst.addOperand(MCOperand::CreateReg(0)); 3413 // Rt, Rt2 3414 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3415 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3416 // addr 3417 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3418 // pred 3419 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3420 return true; 3421} 3422 3423/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3424/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3425/// when they refer multiple MIOperands inside a single one. 3426bool ARMAsmParser:: 3427cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3428 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3429 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3430 3431 // Create a writeback register dummy placeholder. 3432 Inst.addOperand(MCOperand::CreateImm(0)); 3433 3434 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3435 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3436 return true; 3437} 3438 3439/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3440/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3441/// when they refer multiple MIOperands inside a single one. 3442bool ARMAsmParser:: 3443cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3444 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3445 // Create a writeback register dummy placeholder. 3446 Inst.addOperand(MCOperand::CreateImm(0)); 3447 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3448 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3449 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3450 return true; 3451} 3452 3453/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3454/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3455/// when they refer multiple MIOperands inside a single one. 3456bool ARMAsmParser:: 3457cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3458 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3459 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3460 3461 // Create a writeback register dummy placeholder. 3462 Inst.addOperand(MCOperand::CreateImm(0)); 3463 3464 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3465 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3466 return true; 3467} 3468 3469/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3470/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3471/// when they refer multiple MIOperands inside a single one. 3472bool ARMAsmParser:: 3473cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3474 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3475 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3476 3477 // Create a writeback register dummy placeholder. 3478 Inst.addOperand(MCOperand::CreateImm(0)); 3479 3480 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3481 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3482 return true; 3483} 3484 3485 3486/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3487/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3488/// when they refer multiple MIOperands inside a single one. 3489bool ARMAsmParser:: 3490cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3491 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3492 // Create a writeback register dummy placeholder. 3493 Inst.addOperand(MCOperand::CreateImm(0)); 3494 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3495 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3496 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3497 return true; 3498} 3499 3500/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3501/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3502/// when they refer multiple MIOperands inside a single one. 3503bool ARMAsmParser:: 3504cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3505 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3506 // Create a writeback register dummy placeholder. 3507 Inst.addOperand(MCOperand::CreateImm(0)); 3508 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3509 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3510 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3511 return true; 3512} 3513 3514/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3515/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3516/// when they refer multiple MIOperands inside a single one. 3517bool ARMAsmParser:: 3518cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3519 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3520 // Create a writeback register dummy placeholder. 3521 Inst.addOperand(MCOperand::CreateImm(0)); 3522 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3523 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3524 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3525 return true; 3526} 3527 3528/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3529/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3530/// when they refer multiple MIOperands inside a single one. 3531bool ARMAsmParser:: 3532cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3533 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3534 // Rt 3535 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3536 // Create a writeback register dummy placeholder. 3537 Inst.addOperand(MCOperand::CreateImm(0)); 3538 // addr 3539 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3540 // offset 3541 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3542 // pred 3543 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3544 return true; 3545} 3546 3547/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3548/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3549/// when they refer multiple MIOperands inside a single one. 3550bool ARMAsmParser:: 3551cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3552 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3553 // Rt 3554 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3555 // Create a writeback register dummy placeholder. 3556 Inst.addOperand(MCOperand::CreateImm(0)); 3557 // addr 3558 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3559 // offset 3560 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3561 // pred 3562 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3563 return true; 3564} 3565 3566/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3567/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3568/// when they refer multiple MIOperands inside a single one. 3569bool ARMAsmParser:: 3570cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3571 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3572 // Create a writeback register dummy placeholder. 3573 Inst.addOperand(MCOperand::CreateImm(0)); 3574 // Rt 3575 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3576 // addr 3577 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3578 // offset 3579 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3580 // pred 3581 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3582 return true; 3583} 3584 3585/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3586/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3587/// when they refer multiple MIOperands inside a single one. 3588bool ARMAsmParser:: 3589cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3590 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3591 // Create a writeback register dummy placeholder. 3592 Inst.addOperand(MCOperand::CreateImm(0)); 3593 // Rt 3594 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3595 // addr 3596 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3597 // offset 3598 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3599 // pred 3600 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3601 return true; 3602} 3603 3604/// cvtLdrdPre - Convert parsed operands to MCInst. 3605/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3606/// when they refer multiple MIOperands inside a single one. 3607bool ARMAsmParser:: 3608cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3609 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3610 // Rt, Rt2 3611 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3612 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3613 // Create a writeback register dummy placeholder. 3614 Inst.addOperand(MCOperand::CreateImm(0)); 3615 // addr 3616 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3617 // pred 3618 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3619 return true; 3620} 3621 3622/// cvtStrdPre - Convert parsed operands to MCInst. 3623/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3624/// when they refer multiple MIOperands inside a single one. 3625bool ARMAsmParser:: 3626cvtStrdPre(MCInst &Inst, unsigned Opcode, 3627 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3628 // Create a writeback register dummy placeholder. 3629 Inst.addOperand(MCOperand::CreateImm(0)); 3630 // Rt, Rt2 3631 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3632 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3633 // addr 3634 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3635 // pred 3636 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3637 return true; 3638} 3639 3640/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3641/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3642/// when they refer multiple MIOperands inside a single one. 3643bool ARMAsmParser:: 3644cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3645 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3646 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3647 // Create a writeback register dummy placeholder. 3648 Inst.addOperand(MCOperand::CreateImm(0)); 3649 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3650 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3651 return true; 3652} 3653 3654/// cvtThumbMultiple- Convert parsed operands to MCInst. 3655/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3656/// when they refer multiple MIOperands inside a single one. 3657bool ARMAsmParser:: 3658cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3659 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3660 // The second source operand must be the same register as the destination 3661 // operand. 3662 if (Operands.size() == 6 && 3663 (((ARMOperand*)Operands[3])->getReg() != 3664 ((ARMOperand*)Operands[5])->getReg()) && 3665 (((ARMOperand*)Operands[3])->getReg() != 3666 ((ARMOperand*)Operands[4])->getReg())) { 3667 Error(Operands[3]->getStartLoc(), 3668 "destination register must match source register"); 3669 return false; 3670 } 3671 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3672 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3673 // If we have a three-operand form, make sure to set Rn to be the operand 3674 // that isn't the same as Rd. 3675 unsigned RegOp = 4; 3676 if (Operands.size() == 6 && 3677 ((ARMOperand*)Operands[4])->getReg() == 3678 ((ARMOperand*)Operands[3])->getReg()) 3679 RegOp = 5; 3680 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 3681 Inst.addOperand(Inst.getOperand(0)); 3682 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3683 3684 return true; 3685} 3686 3687bool ARMAsmParser:: 3688cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3689 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3690 // Vd 3691 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3692 // Create a writeback register dummy placeholder. 3693 Inst.addOperand(MCOperand::CreateImm(0)); 3694 // Vn 3695 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3696 // pred 3697 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3698 return true; 3699} 3700 3701bool ARMAsmParser:: 3702cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3703 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3704 // Vd 3705 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3706 // Create a writeback register dummy placeholder. 3707 Inst.addOperand(MCOperand::CreateImm(0)); 3708 // Vn 3709 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3710 // Vm 3711 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3712 // pred 3713 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3714 return true; 3715} 3716 3717bool ARMAsmParser:: 3718cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3719 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3720 // Create a writeback register dummy placeholder. 3721 Inst.addOperand(MCOperand::CreateImm(0)); 3722 // Vn 3723 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3724 // Vt 3725 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3726 // pred 3727 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3728 return true; 3729} 3730 3731bool ARMAsmParser:: 3732cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3733 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3734 // Create a writeback register dummy placeholder. 3735 Inst.addOperand(MCOperand::CreateImm(0)); 3736 // Vn 3737 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3738 // Vm 3739 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3740 // Vt 3741 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3742 // pred 3743 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3744 return true; 3745} 3746 3747/// Parse an ARM memory expression, return false if successful else return true 3748/// or an error. The first token must be a '[' when called. 3749bool ARMAsmParser:: 3750parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3751 SMLoc S, E; 3752 assert(Parser.getTok().is(AsmToken::LBrac) && 3753 "Token is not a Left Bracket"); 3754 S = Parser.getTok().getLoc(); 3755 Parser.Lex(); // Eat left bracket token. 3756 3757 const AsmToken &BaseRegTok = Parser.getTok(); 3758 int BaseRegNum = tryParseRegister(); 3759 if (BaseRegNum == -1) 3760 return Error(BaseRegTok.getLoc(), "register expected"); 3761 3762 // The next token must either be a comma or a closing bracket. 3763 const AsmToken &Tok = Parser.getTok(); 3764 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3765 return Error(Tok.getLoc(), "malformed memory operand"); 3766 3767 if (Tok.is(AsmToken::RBrac)) { 3768 E = Tok.getLoc(); 3769 Parser.Lex(); // Eat right bracket token. 3770 3771 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3772 0, 0, false, S, E)); 3773 3774 // If there's a pre-indexing writeback marker, '!', just add it as a token 3775 // operand. It's rather odd, but syntactically valid. 3776 if (Parser.getTok().is(AsmToken::Exclaim)) { 3777 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3778 Parser.Lex(); // Eat the '!'. 3779 } 3780 3781 return false; 3782 } 3783 3784 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3785 Parser.Lex(); // Eat the comma. 3786 3787 // If we have a ':', it's an alignment specifier. 3788 if (Parser.getTok().is(AsmToken::Colon)) { 3789 Parser.Lex(); // Eat the ':'. 3790 E = Parser.getTok().getLoc(); 3791 3792 const MCExpr *Expr; 3793 if (getParser().ParseExpression(Expr)) 3794 return true; 3795 3796 // The expression has to be a constant. Memory references with relocations 3797 // don't come through here, as they use the <label> forms of the relevant 3798 // instructions. 3799 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3800 if (!CE) 3801 return Error (E, "constant expression expected"); 3802 3803 unsigned Align = 0; 3804 switch (CE->getValue()) { 3805 default: 3806 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3807 case 64: Align = 8; break; 3808 case 128: Align = 16; break; 3809 case 256: Align = 32; break; 3810 } 3811 3812 // Now we should have the closing ']' 3813 E = Parser.getTok().getLoc(); 3814 if (Parser.getTok().isNot(AsmToken::RBrac)) 3815 return Error(E, "']' expected"); 3816 Parser.Lex(); // Eat right bracket token. 3817 3818 // Don't worry about range checking the value here. That's handled by 3819 // the is*() predicates. 3820 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3821 ARM_AM::no_shift, 0, Align, 3822 false, S, E)); 3823 3824 // If there's a pre-indexing writeback marker, '!', just add it as a token 3825 // operand. 3826 if (Parser.getTok().is(AsmToken::Exclaim)) { 3827 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3828 Parser.Lex(); // Eat the '!'. 3829 } 3830 3831 return false; 3832 } 3833 3834 // If we have a '#', it's an immediate offset, else assume it's a register 3835 // offset. Be friendly and also accept a plain integer (without a leading 3836 // hash) for gas compatibility. 3837 if (Parser.getTok().is(AsmToken::Hash) || 3838 Parser.getTok().is(AsmToken::Integer)) { 3839 if (Parser.getTok().is(AsmToken::Hash)) 3840 Parser.Lex(); // Eat the '#'. 3841 E = Parser.getTok().getLoc(); 3842 3843 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3844 const MCExpr *Offset; 3845 if (getParser().ParseExpression(Offset)) 3846 return true; 3847 3848 // The expression has to be a constant. Memory references with relocations 3849 // don't come through here, as they use the <label> forms of the relevant 3850 // instructions. 3851 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3852 if (!CE) 3853 return Error (E, "constant expression expected"); 3854 3855 // If the constant was #-0, represent it as INT32_MIN. 3856 int32_t Val = CE->getValue(); 3857 if (isNegative && Val == 0) 3858 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3859 3860 // Now we should have the closing ']' 3861 E = Parser.getTok().getLoc(); 3862 if (Parser.getTok().isNot(AsmToken::RBrac)) 3863 return Error(E, "']' expected"); 3864 Parser.Lex(); // Eat right bracket token. 3865 3866 // Don't worry about range checking the value here. That's handled by 3867 // the is*() predicates. 3868 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3869 ARM_AM::no_shift, 0, 0, 3870 false, S, E)); 3871 3872 // If there's a pre-indexing writeback marker, '!', just add it as a token 3873 // operand. 3874 if (Parser.getTok().is(AsmToken::Exclaim)) { 3875 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3876 Parser.Lex(); // Eat the '!'. 3877 } 3878 3879 return false; 3880 } 3881 3882 // The register offset is optionally preceded by a '+' or '-' 3883 bool isNegative = false; 3884 if (Parser.getTok().is(AsmToken::Minus)) { 3885 isNegative = true; 3886 Parser.Lex(); // Eat the '-'. 3887 } else if (Parser.getTok().is(AsmToken::Plus)) { 3888 // Nothing to do. 3889 Parser.Lex(); // Eat the '+'. 3890 } 3891 3892 E = Parser.getTok().getLoc(); 3893 int OffsetRegNum = tryParseRegister(); 3894 if (OffsetRegNum == -1) 3895 return Error(E, "register expected"); 3896 3897 // If there's a shift operator, handle it. 3898 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3899 unsigned ShiftImm = 0; 3900 if (Parser.getTok().is(AsmToken::Comma)) { 3901 Parser.Lex(); // Eat the ','. 3902 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3903 return true; 3904 } 3905 3906 // Now we should have the closing ']' 3907 E = Parser.getTok().getLoc(); 3908 if (Parser.getTok().isNot(AsmToken::RBrac)) 3909 return Error(E, "']' expected"); 3910 Parser.Lex(); // Eat right bracket token. 3911 3912 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3913 ShiftType, ShiftImm, 0, isNegative, 3914 S, E)); 3915 3916 // If there's a pre-indexing writeback marker, '!', just add it as a token 3917 // operand. 3918 if (Parser.getTok().is(AsmToken::Exclaim)) { 3919 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3920 Parser.Lex(); // Eat the '!'. 3921 } 3922 3923 return false; 3924} 3925 3926/// parseMemRegOffsetShift - one of these two: 3927/// ( lsl | lsr | asr | ror ) , # shift_amount 3928/// rrx 3929/// return true if it parses a shift otherwise it returns false. 3930bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3931 unsigned &Amount) { 3932 SMLoc Loc = Parser.getTok().getLoc(); 3933 const AsmToken &Tok = Parser.getTok(); 3934 if (Tok.isNot(AsmToken::Identifier)) 3935 return true; 3936 StringRef ShiftName = Tok.getString(); 3937 if (ShiftName == "lsl" || ShiftName == "LSL" || 3938 ShiftName == "asl" || ShiftName == "ASL") 3939 St = ARM_AM::lsl; 3940 else if (ShiftName == "lsr" || ShiftName == "LSR") 3941 St = ARM_AM::lsr; 3942 else if (ShiftName == "asr" || ShiftName == "ASR") 3943 St = ARM_AM::asr; 3944 else if (ShiftName == "ror" || ShiftName == "ROR") 3945 St = ARM_AM::ror; 3946 else if (ShiftName == "rrx" || ShiftName == "RRX") 3947 St = ARM_AM::rrx; 3948 else 3949 return Error(Loc, "illegal shift operator"); 3950 Parser.Lex(); // Eat shift type token. 3951 3952 // rrx stands alone. 3953 Amount = 0; 3954 if (St != ARM_AM::rrx) { 3955 Loc = Parser.getTok().getLoc(); 3956 // A '#' and a shift amount. 3957 const AsmToken &HashTok = Parser.getTok(); 3958 if (HashTok.isNot(AsmToken::Hash)) 3959 return Error(HashTok.getLoc(), "'#' expected"); 3960 Parser.Lex(); // Eat hash token. 3961 3962 const MCExpr *Expr; 3963 if (getParser().ParseExpression(Expr)) 3964 return true; 3965 // Range check the immediate. 3966 // lsl, ror: 0 <= imm <= 31 3967 // lsr, asr: 0 <= imm <= 32 3968 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3969 if (!CE) 3970 return Error(Loc, "shift amount must be an immediate"); 3971 int64_t Imm = CE->getValue(); 3972 if (Imm < 0 || 3973 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3974 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3975 return Error(Loc, "immediate shift value out of range"); 3976 Amount = Imm; 3977 } 3978 3979 return false; 3980} 3981 3982/// parseFPImm - A floating point immediate expression operand. 3983ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3984parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3985 SMLoc S = Parser.getTok().getLoc(); 3986 3987 if (Parser.getTok().isNot(AsmToken::Hash)) 3988 return MatchOperand_NoMatch; 3989 3990 // Disambiguate the VMOV forms that can accept an FP immediate. 3991 // vmov.f32 <sreg>, #imm 3992 // vmov.f64 <dreg>, #imm 3993 // vmov.f32 <dreg>, #imm @ vector f32x2 3994 // vmov.f32 <qreg>, #imm @ vector f32x4 3995 // 3996 // There are also the NEON VMOV instructions which expect an 3997 // integer constant. Make sure we don't try to parse an FPImm 3998 // for these: 3999 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 4000 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 4001 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 4002 TyOp->getToken() != ".f64")) 4003 return MatchOperand_NoMatch; 4004 4005 Parser.Lex(); // Eat the '#'. 4006 4007 // Handle negation, as that still comes through as a separate token. 4008 bool isNegative = false; 4009 if (Parser.getTok().is(AsmToken::Minus)) { 4010 isNegative = true; 4011 Parser.Lex(); 4012 } 4013 const AsmToken &Tok = Parser.getTok(); 4014 if (Tok.is(AsmToken::Real)) { 4015 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 4016 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 4017 // If we had a '-' in front, toggle the sign bit. 4018 IntVal ^= (uint64_t)isNegative << 63; 4019 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 4020 Parser.Lex(); // Eat the token. 4021 if (Val == -1) { 4022 TokError("floating point value out of range"); 4023 return MatchOperand_ParseFail; 4024 } 4025 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4026 return MatchOperand_Success; 4027 } 4028 if (Tok.is(AsmToken::Integer)) { 4029 int64_t Val = Tok.getIntVal(); 4030 Parser.Lex(); // Eat the token. 4031 if (Val > 255 || Val < 0) { 4032 TokError("encoded floating point value out of range"); 4033 return MatchOperand_ParseFail; 4034 } 4035 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4036 return MatchOperand_Success; 4037 } 4038 4039 TokError("invalid floating point immediate"); 4040 return MatchOperand_ParseFail; 4041} 4042/// Parse a arm instruction operand. For now this parses the operand regardless 4043/// of the mnemonic. 4044bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4045 StringRef Mnemonic) { 4046 SMLoc S, E; 4047 4048 // Check if the current operand has a custom associated parser, if so, try to 4049 // custom parse the operand, or fallback to the general approach. 4050 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 4051 if (ResTy == MatchOperand_Success) 4052 return false; 4053 // If there wasn't a custom match, try the generic matcher below. Otherwise, 4054 // there was a match, but an error occurred, in which case, just return that 4055 // the operand parsing failed. 4056 if (ResTy == MatchOperand_ParseFail) 4057 return true; 4058 4059 switch (getLexer().getKind()) { 4060 default: 4061 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 4062 return true; 4063 case AsmToken::Identifier: { 4064 // If this is VMRS, check for the apsr_nzcv operand. 4065 if (!tryParseRegisterWithWriteBack(Operands)) 4066 return false; 4067 int Res = tryParseShiftRegister(Operands); 4068 if (Res == 0) // success 4069 return false; 4070 else if (Res == -1) // irrecoverable error 4071 return true; 4072 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 4073 S = Parser.getTok().getLoc(); 4074 Parser.Lex(); 4075 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 4076 return false; 4077 } 4078 4079 // Fall though for the Identifier case that is not a register or a 4080 // special name. 4081 } 4082 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 4083 case AsmToken::Integer: // things like 1f and 2b as a branch targets 4084 case AsmToken::String: // quoted label names. 4085 case AsmToken::Dot: { // . as a branch target 4086 // This was not a register so parse other operands that start with an 4087 // identifier (like labels) as expressions and create them as immediates. 4088 const MCExpr *IdVal; 4089 S = Parser.getTok().getLoc(); 4090 if (getParser().ParseExpression(IdVal)) 4091 return true; 4092 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4093 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 4094 return false; 4095 } 4096 case AsmToken::LBrac: 4097 return parseMemory(Operands); 4098 case AsmToken::LCurly: 4099 return parseRegisterList(Operands); 4100 case AsmToken::Hash: { 4101 // #42 -> immediate. 4102 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 4103 S = Parser.getTok().getLoc(); 4104 Parser.Lex(); 4105 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4106 const MCExpr *ImmVal; 4107 if (getParser().ParseExpression(ImmVal)) 4108 return true; 4109 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 4110 if (CE) { 4111 int32_t Val = CE->getValue(); 4112 if (isNegative && Val == 0) 4113 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 4114 } 4115 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4116 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 4117 return false; 4118 } 4119 case AsmToken::Colon: { 4120 // ":lower16:" and ":upper16:" expression prefixes 4121 // FIXME: Check it's an expression prefix, 4122 // e.g. (FOO - :lower16:BAR) isn't legal. 4123 ARMMCExpr::VariantKind RefKind; 4124 if (parsePrefix(RefKind)) 4125 return true; 4126 4127 const MCExpr *SubExprVal; 4128 if (getParser().ParseExpression(SubExprVal)) 4129 return true; 4130 4131 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 4132 getContext()); 4133 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4134 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 4135 return false; 4136 } 4137 } 4138} 4139 4140// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 4141// :lower16: and :upper16:. 4142bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 4143 RefKind = ARMMCExpr::VK_ARM_None; 4144 4145 // :lower16: and :upper16: modifiers 4146 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 4147 Parser.Lex(); // Eat ':' 4148 4149 if (getLexer().isNot(AsmToken::Identifier)) { 4150 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 4151 return true; 4152 } 4153 4154 StringRef IDVal = Parser.getTok().getIdentifier(); 4155 if (IDVal == "lower16") { 4156 RefKind = ARMMCExpr::VK_ARM_LO16; 4157 } else if (IDVal == "upper16") { 4158 RefKind = ARMMCExpr::VK_ARM_HI16; 4159 } else { 4160 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 4161 return true; 4162 } 4163 Parser.Lex(); 4164 4165 if (getLexer().isNot(AsmToken::Colon)) { 4166 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 4167 return true; 4168 } 4169 Parser.Lex(); // Eat the last ':' 4170 return false; 4171} 4172 4173/// \brief Given a mnemonic, split out possible predication code and carry 4174/// setting letters to form a canonical mnemonic and flags. 4175// 4176// FIXME: Would be nice to autogen this. 4177// FIXME: This is a bit of a maze of special cases. 4178StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 4179 unsigned &PredicationCode, 4180 bool &CarrySetting, 4181 unsigned &ProcessorIMod, 4182 StringRef &ITMask) { 4183 PredicationCode = ARMCC::AL; 4184 CarrySetting = false; 4185 ProcessorIMod = 0; 4186 4187 // Ignore some mnemonics we know aren't predicated forms. 4188 // 4189 // FIXME: Would be nice to autogen this. 4190 if ((Mnemonic == "movs" && isThumb()) || 4191 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 4192 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 4193 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 4194 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 4195 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 4196 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 4197 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 4198 return Mnemonic; 4199 4200 // First, split out any predication code. Ignore mnemonics we know aren't 4201 // predicated but do have a carry-set and so weren't caught above. 4202 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 4203 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 4204 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 4205 Mnemonic != "sbcs" && Mnemonic != "rscs") { 4206 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 4207 .Case("eq", ARMCC::EQ) 4208 .Case("ne", ARMCC::NE) 4209 .Case("hs", ARMCC::HS) 4210 .Case("cs", ARMCC::HS) 4211 .Case("lo", ARMCC::LO) 4212 .Case("cc", ARMCC::LO) 4213 .Case("mi", ARMCC::MI) 4214 .Case("pl", ARMCC::PL) 4215 .Case("vs", ARMCC::VS) 4216 .Case("vc", ARMCC::VC) 4217 .Case("hi", ARMCC::HI) 4218 .Case("ls", ARMCC::LS) 4219 .Case("ge", ARMCC::GE) 4220 .Case("lt", ARMCC::LT) 4221 .Case("gt", ARMCC::GT) 4222 .Case("le", ARMCC::LE) 4223 .Case("al", ARMCC::AL) 4224 .Default(~0U); 4225 if (CC != ~0U) { 4226 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 4227 PredicationCode = CC; 4228 } 4229 } 4230 4231 // Next, determine if we have a carry setting bit. We explicitly ignore all 4232 // the instructions we know end in 's'. 4233 if (Mnemonic.endswith("s") && 4234 !(Mnemonic == "cps" || Mnemonic == "mls" || 4235 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 4236 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 4237 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 4238 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 4239 (Mnemonic == "movs" && isThumb()))) { 4240 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4241 CarrySetting = true; 4242 } 4243 4244 // The "cps" instruction can have a interrupt mode operand which is glued into 4245 // the mnemonic. Check if this is the case, split it and parse the imod op 4246 if (Mnemonic.startswith("cps")) { 4247 // Split out any imod code. 4248 unsigned IMod = 4249 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4250 .Case("ie", ARM_PROC::IE) 4251 .Case("id", ARM_PROC::ID) 4252 .Default(~0U); 4253 if (IMod != ~0U) { 4254 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4255 ProcessorIMod = IMod; 4256 } 4257 } 4258 4259 // The "it" instruction has the condition mask on the end of the mnemonic. 4260 if (Mnemonic.startswith("it")) { 4261 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4262 Mnemonic = Mnemonic.slice(0, 2); 4263 } 4264 4265 return Mnemonic; 4266} 4267 4268/// \brief Given a canonical mnemonic, determine if the instruction ever allows 4269/// inclusion of carry set or predication code operands. 4270// 4271// FIXME: It would be nice to autogen this. 4272void ARMAsmParser:: 4273getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4274 bool &CanAcceptPredicationCode) { 4275 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4276 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4277 Mnemonic == "add" || Mnemonic == "adc" || 4278 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4279 Mnemonic == "orr" || Mnemonic == "mvn" || 4280 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4281 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4282 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4283 Mnemonic == "mla" || Mnemonic == "smlal" || 4284 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4285 CanAcceptCarrySet = true; 4286 } else 4287 CanAcceptCarrySet = false; 4288 4289 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4290 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4291 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4292 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4293 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4294 (Mnemonic == "clrex" && !isThumb()) || 4295 (Mnemonic == "nop" && isThumbOne()) || 4296 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4297 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4298 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4299 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4300 !isThumb()) || 4301 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4302 CanAcceptPredicationCode = false; 4303 } else 4304 CanAcceptPredicationCode = true; 4305 4306 if (isThumb()) { 4307 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4308 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4309 CanAcceptPredicationCode = false; 4310 } 4311} 4312 4313bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4314 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4315 // FIXME: This is all horribly hacky. We really need a better way to deal 4316 // with optional operands like this in the matcher table. 4317 4318 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4319 // another does not. Specifically, the MOVW instruction does not. So we 4320 // special case it here and remove the defaulted (non-setting) cc_out 4321 // operand if that's the instruction we're trying to match. 4322 // 4323 // We do this as post-processing of the explicit operands rather than just 4324 // conditionally adding the cc_out in the first place because we need 4325 // to check the type of the parsed immediate operand. 4326 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4327 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4328 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4329 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4330 return true; 4331 4332 // Register-register 'add' for thumb does not have a cc_out operand 4333 // when there are only two register operands. 4334 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4335 static_cast<ARMOperand*>(Operands[3])->isReg() && 4336 static_cast<ARMOperand*>(Operands[4])->isReg() && 4337 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4338 return true; 4339 // Register-register 'add' for thumb does not have a cc_out operand 4340 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4341 // have to check the immediate range here since Thumb2 has a variant 4342 // that can handle a different range and has a cc_out operand. 4343 if (((isThumb() && Mnemonic == "add") || 4344 (isThumbTwo() && Mnemonic == "sub")) && 4345 Operands.size() == 6 && 4346 static_cast<ARMOperand*>(Operands[3])->isReg() && 4347 static_cast<ARMOperand*>(Operands[4])->isReg() && 4348 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4349 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4350 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4351 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4352 return true; 4353 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4354 // imm0_4095 variant. That's the least-preferred variant when 4355 // selecting via the generic "add" mnemonic, so to know that we 4356 // should remove the cc_out operand, we have to explicitly check that 4357 // it's not one of the other variants. Ugh. 4358 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4359 Operands.size() == 6 && 4360 static_cast<ARMOperand*>(Operands[3])->isReg() && 4361 static_cast<ARMOperand*>(Operands[4])->isReg() && 4362 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4363 // Nest conditions rather than one big 'if' statement for readability. 4364 // 4365 // If either register is a high reg, it's either one of the SP 4366 // variants (handled above) or a 32-bit encoding, so we just 4367 // check against T3. 4368 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4369 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4370 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4371 return false; 4372 // If both registers are low, we're in an IT block, and the immediate is 4373 // in range, we should use encoding T1 instead, which has a cc_out. 4374 if (inITBlock() && 4375 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4376 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4377 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4378 return false; 4379 4380 // Otherwise, we use encoding T4, which does not have a cc_out 4381 // operand. 4382 return true; 4383 } 4384 4385 // The thumb2 multiply instruction doesn't have a CCOut register, so 4386 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4387 // use the 16-bit encoding or not. 4388 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4389 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4390 static_cast<ARMOperand*>(Operands[3])->isReg() && 4391 static_cast<ARMOperand*>(Operands[4])->isReg() && 4392 static_cast<ARMOperand*>(Operands[5])->isReg() && 4393 // If the registers aren't low regs, the destination reg isn't the 4394 // same as one of the source regs, or the cc_out operand is zero 4395 // outside of an IT block, we have to use the 32-bit encoding, so 4396 // remove the cc_out operand. 4397 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4398 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4399 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4400 !inITBlock() || 4401 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4402 static_cast<ARMOperand*>(Operands[5])->getReg() && 4403 static_cast<ARMOperand*>(Operands[3])->getReg() != 4404 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4405 return true; 4406 4407 // Also check the 'mul' syntax variant that doesn't specify an explicit 4408 // destination register. 4409 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4410 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4411 static_cast<ARMOperand*>(Operands[3])->isReg() && 4412 static_cast<ARMOperand*>(Operands[4])->isReg() && 4413 // If the registers aren't low regs or the cc_out operand is zero 4414 // outside of an IT block, we have to use the 32-bit encoding, so 4415 // remove the cc_out operand. 4416 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4417 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4418 !inITBlock())) 4419 return true; 4420 4421 4422 4423 // Register-register 'add/sub' for thumb does not have a cc_out operand 4424 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4425 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4426 // right, this will result in better diagnostics (which operand is off) 4427 // anyway. 4428 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4429 (Operands.size() == 5 || Operands.size() == 6) && 4430 static_cast<ARMOperand*>(Operands[3])->isReg() && 4431 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4432 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4433 return true; 4434 4435 return false; 4436} 4437 4438static bool isDataTypeToken(StringRef Tok) { 4439 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4440 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4441 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4442 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4443 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4444 Tok == ".f" || Tok == ".d"; 4445} 4446 4447// FIXME: This bit should probably be handled via an explicit match class 4448// in the .td files that matches the suffix instead of having it be 4449// a literal string token the way it is now. 4450static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4451 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4452} 4453 4454/// Parse an arm instruction mnemonic followed by its operands. 4455bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4456 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4457 // Create the leading tokens for the mnemonic, split by '.' characters. 4458 size_t Start = 0, Next = Name.find('.'); 4459 StringRef Mnemonic = Name.slice(Start, Next); 4460 4461 // Split out the predication code and carry setting flag from the mnemonic. 4462 unsigned PredicationCode; 4463 unsigned ProcessorIMod; 4464 bool CarrySetting; 4465 StringRef ITMask; 4466 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4467 ProcessorIMod, ITMask); 4468 4469 // In Thumb1, only the branch (B) instruction can be predicated. 4470 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4471 Parser.EatToEndOfStatement(); 4472 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4473 } 4474 4475 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4476 4477 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4478 // is the mask as it will be for the IT encoding if the conditional 4479 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4480 // where the conditional bit0 is zero, the instruction post-processing 4481 // will adjust the mask accordingly. 4482 if (Mnemonic == "it") { 4483 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4484 if (ITMask.size() > 3) { 4485 Parser.EatToEndOfStatement(); 4486 return Error(Loc, "too many conditions on IT instruction"); 4487 } 4488 unsigned Mask = 8; 4489 for (unsigned i = ITMask.size(); i != 0; --i) { 4490 char pos = ITMask[i - 1]; 4491 if (pos != 't' && pos != 'e') { 4492 Parser.EatToEndOfStatement(); 4493 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4494 } 4495 Mask >>= 1; 4496 if (ITMask[i - 1] == 't') 4497 Mask |= 8; 4498 } 4499 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4500 } 4501 4502 // FIXME: This is all a pretty gross hack. We should automatically handle 4503 // optional operands like this via tblgen. 4504 4505 // Next, add the CCOut and ConditionCode operands, if needed. 4506 // 4507 // For mnemonics which can ever incorporate a carry setting bit or predication 4508 // code, our matching model involves us always generating CCOut and 4509 // ConditionCode operands to match the mnemonic "as written" and then we let 4510 // the matcher deal with finding the right instruction or generating an 4511 // appropriate error. 4512 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4513 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4514 4515 // If we had a carry-set on an instruction that can't do that, issue an 4516 // error. 4517 if (!CanAcceptCarrySet && CarrySetting) { 4518 Parser.EatToEndOfStatement(); 4519 return Error(NameLoc, "instruction '" + Mnemonic + 4520 "' can not set flags, but 's' suffix specified"); 4521 } 4522 // If we had a predication code on an instruction that can't do that, issue an 4523 // error. 4524 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4525 Parser.EatToEndOfStatement(); 4526 return Error(NameLoc, "instruction '" + Mnemonic + 4527 "' is not predicable, but condition code specified"); 4528 } 4529 4530 // Add the carry setting operand, if necessary. 4531 if (CanAcceptCarrySet) { 4532 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4533 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4534 Loc)); 4535 } 4536 4537 // Add the predication code operand, if necessary. 4538 if (CanAcceptPredicationCode) { 4539 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4540 CarrySetting); 4541 Operands.push_back(ARMOperand::CreateCondCode( 4542 ARMCC::CondCodes(PredicationCode), Loc)); 4543 } 4544 4545 // Add the processor imod operand, if necessary. 4546 if (ProcessorIMod) { 4547 Operands.push_back(ARMOperand::CreateImm( 4548 MCConstantExpr::Create(ProcessorIMod, getContext()), 4549 NameLoc, NameLoc)); 4550 } 4551 4552 // Add the remaining tokens in the mnemonic. 4553 while (Next != StringRef::npos) { 4554 Start = Next; 4555 Next = Name.find('.', Start + 1); 4556 StringRef ExtraToken = Name.slice(Start, Next); 4557 4558 // Some NEON instructions have an optional datatype suffix that is 4559 // completely ignored. Check for that. 4560 if (isDataTypeToken(ExtraToken) && 4561 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 4562 continue; 4563 4564 if (ExtraToken != ".n") { 4565 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4566 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4567 } 4568 } 4569 4570 // Read the remaining operands. 4571 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4572 // Read the first operand. 4573 if (parseOperand(Operands, Mnemonic)) { 4574 Parser.EatToEndOfStatement(); 4575 return true; 4576 } 4577 4578 while (getLexer().is(AsmToken::Comma)) { 4579 Parser.Lex(); // Eat the comma. 4580 4581 // Parse and remember the operand. 4582 if (parseOperand(Operands, Mnemonic)) { 4583 Parser.EatToEndOfStatement(); 4584 return true; 4585 } 4586 } 4587 } 4588 4589 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4590 SMLoc Loc = getLexer().getLoc(); 4591 Parser.EatToEndOfStatement(); 4592 return Error(Loc, "unexpected token in argument list"); 4593 } 4594 4595 Parser.Lex(); // Consume the EndOfStatement 4596 4597 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4598 // do and don't have a cc_out optional-def operand. With some spot-checks 4599 // of the operand list, we can figure out which variant we're trying to 4600 // parse and adjust accordingly before actually matching. We shouldn't ever 4601 // try to remove a cc_out operand that was explicitly set on the the 4602 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4603 // table driven matcher doesn't fit well with the ARM instruction set. 4604 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4605 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4606 Operands.erase(Operands.begin() + 1); 4607 delete Op; 4608 } 4609 4610 // ARM mode 'blx' need special handling, as the register operand version 4611 // is predicable, but the label operand version is not. So, we can't rely 4612 // on the Mnemonic based checking to correctly figure out when to put 4613 // a k_CondCode operand in the list. If we're trying to match the label 4614 // version, remove the k_CondCode operand here. 4615 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4616 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4617 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4618 Operands.erase(Operands.begin() + 1); 4619 delete Op; 4620 } 4621 4622 // The vector-compare-to-zero instructions have a literal token "#0" at 4623 // the end that comes to here as an immediate operand. Convert it to a 4624 // token to play nicely with the matcher. 4625 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4626 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4627 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4628 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4629 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4630 if (CE && CE->getValue() == 0) { 4631 Operands.erase(Operands.begin() + 5); 4632 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4633 delete Op; 4634 } 4635 } 4636 // VCMP{E} does the same thing, but with a different operand count. 4637 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4638 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4639 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4640 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4641 if (CE && CE->getValue() == 0) { 4642 Operands.erase(Operands.begin() + 4); 4643 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4644 delete Op; 4645 } 4646 } 4647 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4648 // end. Convert it to a token here. 4649 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4650 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4651 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4652 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4653 if (CE && CE->getValue() == 0) { 4654 Operands.erase(Operands.begin() + 5); 4655 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4656 delete Op; 4657 } 4658 } 4659 4660 return false; 4661} 4662 4663// Validate context-sensitive operand constraints. 4664 4665// return 'true' if register list contains non-low GPR registers, 4666// 'false' otherwise. If Reg is in the register list or is HiReg, set 4667// 'containsReg' to true. 4668static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4669 unsigned HiReg, bool &containsReg) { 4670 containsReg = false; 4671 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4672 unsigned OpReg = Inst.getOperand(i).getReg(); 4673 if (OpReg == Reg) 4674 containsReg = true; 4675 // Anything other than a low register isn't legal here. 4676 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4677 return true; 4678 } 4679 return false; 4680} 4681 4682// Check if the specified regisgter is in the register list of the inst, 4683// starting at the indicated operand number. 4684static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4685 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4686 unsigned OpReg = Inst.getOperand(i).getReg(); 4687 if (OpReg == Reg) 4688 return true; 4689 } 4690 return false; 4691} 4692 4693// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4694// the ARMInsts array) instead. Getting that here requires awkward 4695// API changes, though. Better way? 4696namespace llvm { 4697extern const MCInstrDesc ARMInsts[]; 4698} 4699static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4700 return ARMInsts[Opcode]; 4701} 4702 4703// FIXME: We would really like to be able to tablegen'erate this. 4704bool ARMAsmParser:: 4705validateInstruction(MCInst &Inst, 4706 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4707 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4708 SMLoc Loc = Operands[0]->getStartLoc(); 4709 // Check the IT block state first. 4710 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4711 // being allowed in IT blocks, but not being predicable. It just always 4712 // executes. 4713 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4714 unsigned bit = 1; 4715 if (ITState.FirstCond) 4716 ITState.FirstCond = false; 4717 else 4718 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4719 // The instruction must be predicable. 4720 if (!MCID.isPredicable()) 4721 return Error(Loc, "instructions in IT block must be predicable"); 4722 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4723 unsigned ITCond = bit ? ITState.Cond : 4724 ARMCC::getOppositeCondition(ITState.Cond); 4725 if (Cond != ITCond) { 4726 // Find the condition code Operand to get its SMLoc information. 4727 SMLoc CondLoc; 4728 for (unsigned i = 1; i < Operands.size(); ++i) 4729 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4730 CondLoc = Operands[i]->getStartLoc(); 4731 return Error(CondLoc, "incorrect condition in IT block; got '" + 4732 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4733 "', but expected '" + 4734 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4735 } 4736 // Check for non-'al' condition codes outside of the IT block. 4737 } else if (isThumbTwo() && MCID.isPredicable() && 4738 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4739 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4740 Inst.getOpcode() != ARM::t2B) 4741 return Error(Loc, "predicated instructions must be in IT block"); 4742 4743 switch (Inst.getOpcode()) { 4744 case ARM::LDRD: 4745 case ARM::LDRD_PRE: 4746 case ARM::LDRD_POST: 4747 case ARM::LDREXD: { 4748 // Rt2 must be Rt + 1. 4749 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4750 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4751 if (Rt2 != Rt + 1) 4752 return Error(Operands[3]->getStartLoc(), 4753 "destination operands must be sequential"); 4754 return false; 4755 } 4756 case ARM::STRD: { 4757 // Rt2 must be Rt + 1. 4758 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4759 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4760 if (Rt2 != Rt + 1) 4761 return Error(Operands[3]->getStartLoc(), 4762 "source operands must be sequential"); 4763 return false; 4764 } 4765 case ARM::STRD_PRE: 4766 case ARM::STRD_POST: 4767 case ARM::STREXD: { 4768 // Rt2 must be Rt + 1. 4769 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4770 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4771 if (Rt2 != Rt + 1) 4772 return Error(Operands[3]->getStartLoc(), 4773 "source operands must be sequential"); 4774 return false; 4775 } 4776 case ARM::SBFX: 4777 case ARM::UBFX: { 4778 // width must be in range [1, 32-lsb] 4779 unsigned lsb = Inst.getOperand(2).getImm(); 4780 unsigned widthm1 = Inst.getOperand(3).getImm(); 4781 if (widthm1 >= 32 - lsb) 4782 return Error(Operands[5]->getStartLoc(), 4783 "bitfield width must be in range [1,32-lsb]"); 4784 return false; 4785 } 4786 case ARM::tLDMIA: { 4787 // If we're parsing Thumb2, the .w variant is available and handles 4788 // most cases that are normally illegal for a Thumb1 LDM 4789 // instruction. We'll make the transformation in processInstruction() 4790 // if necessary. 4791 // 4792 // Thumb LDM instructions are writeback iff the base register is not 4793 // in the register list. 4794 unsigned Rn = Inst.getOperand(0).getReg(); 4795 bool hasWritebackToken = 4796 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4797 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4798 bool listContainsBase; 4799 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4800 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4801 "registers must be in range r0-r7"); 4802 // If we should have writeback, then there should be a '!' token. 4803 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4804 return Error(Operands[2]->getStartLoc(), 4805 "writeback operator '!' expected"); 4806 // If we should not have writeback, there must not be a '!'. This is 4807 // true even for the 32-bit wide encodings. 4808 if (listContainsBase && hasWritebackToken) 4809 return Error(Operands[3]->getStartLoc(), 4810 "writeback operator '!' not allowed when base register " 4811 "in register list"); 4812 4813 break; 4814 } 4815 case ARM::t2LDMIA_UPD: { 4816 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4817 return Error(Operands[4]->getStartLoc(), 4818 "writeback operator '!' not allowed when base register " 4819 "in register list"); 4820 break; 4821 } 4822 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 4823 // so only issue a diagnostic for thumb1. The instructions will be 4824 // switched to the t2 encodings in processInstruction() if necessary. 4825 case ARM::tPOP: { 4826 bool listContainsBase; 4827 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 4828 !isThumbTwo()) 4829 return Error(Operands[2]->getStartLoc(), 4830 "registers must be in range r0-r7 or pc"); 4831 break; 4832 } 4833 case ARM::tPUSH: { 4834 bool listContainsBase; 4835 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 4836 !isThumbTwo()) 4837 return Error(Operands[2]->getStartLoc(), 4838 "registers must be in range r0-r7 or lr"); 4839 break; 4840 } 4841 case ARM::tSTMIA_UPD: { 4842 bool listContainsBase; 4843 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4844 return Error(Operands[4]->getStartLoc(), 4845 "registers must be in range r0-r7"); 4846 break; 4847 } 4848 } 4849 4850 return false; 4851} 4852 4853static unsigned getRealVSTLNOpcode(unsigned Opc) { 4854 switch(Opc) { 4855 default: assert(0 && "unexpected opcode!"); 4856 case ARM::VST1LNdWB_fixed_Asm_8: return ARM::VST1LNd8_UPD; 4857 case ARM::VST1LNdWB_fixed_Asm_P8: return ARM::VST1LNd8_UPD; 4858 case ARM::VST1LNdWB_fixed_Asm_I8: return ARM::VST1LNd8_UPD; 4859 case ARM::VST1LNdWB_fixed_Asm_S8: return ARM::VST1LNd8_UPD; 4860 case ARM::VST1LNdWB_fixed_Asm_U8: return ARM::VST1LNd8_UPD; 4861 case ARM::VST1LNdWB_fixed_Asm_16: return ARM::VST1LNd16_UPD; 4862 case ARM::VST1LNdWB_fixed_Asm_P16: return ARM::VST1LNd16_UPD; 4863 case ARM::VST1LNdWB_fixed_Asm_I16: return ARM::VST1LNd16_UPD; 4864 case ARM::VST1LNdWB_fixed_Asm_S16: return ARM::VST1LNd16_UPD; 4865 case ARM::VST1LNdWB_fixed_Asm_U16: return ARM::VST1LNd16_UPD; 4866 case ARM::VST1LNdWB_fixed_Asm_32: return ARM::VST1LNd32_UPD; 4867 case ARM::VST1LNdWB_fixed_Asm_F: return ARM::VST1LNd32_UPD; 4868 case ARM::VST1LNdWB_fixed_Asm_F32: return ARM::VST1LNd32_UPD; 4869 case ARM::VST1LNdWB_fixed_Asm_I32: return ARM::VST1LNd32_UPD; 4870 case ARM::VST1LNdWB_fixed_Asm_S32: return ARM::VST1LNd32_UPD; 4871 case ARM::VST1LNdWB_fixed_Asm_U32: return ARM::VST1LNd32_UPD; 4872 case ARM::VST1LNdWB_register_Asm_8: return ARM::VST1LNd8_UPD; 4873 case ARM::VST1LNdWB_register_Asm_P8: return ARM::VST1LNd8_UPD; 4874 case ARM::VST1LNdWB_register_Asm_I8: return ARM::VST1LNd8_UPD; 4875 case ARM::VST1LNdWB_register_Asm_S8: return ARM::VST1LNd8_UPD; 4876 case ARM::VST1LNdWB_register_Asm_U8: return ARM::VST1LNd8_UPD; 4877 case ARM::VST1LNdWB_register_Asm_16: return ARM::VST1LNd16_UPD; 4878 case ARM::VST1LNdWB_register_Asm_P16: return ARM::VST1LNd16_UPD; 4879 case ARM::VST1LNdWB_register_Asm_I16: return ARM::VST1LNd16_UPD; 4880 case ARM::VST1LNdWB_register_Asm_S16: return ARM::VST1LNd16_UPD; 4881 case ARM::VST1LNdWB_register_Asm_U16: return ARM::VST1LNd16_UPD; 4882 case ARM::VST1LNdWB_register_Asm_32: return ARM::VST1LNd32_UPD; 4883 case ARM::VST1LNdWB_register_Asm_F: return ARM::VST1LNd32_UPD; 4884 case ARM::VST1LNdWB_register_Asm_F32: return ARM::VST1LNd32_UPD; 4885 case ARM::VST1LNdWB_register_Asm_I32: return ARM::VST1LNd32_UPD; 4886 case ARM::VST1LNdWB_register_Asm_S32: return ARM::VST1LNd32_UPD; 4887 case ARM::VST1LNdWB_register_Asm_U32: return ARM::VST1LNd32_UPD; 4888 case ARM::VST1LNdAsm_8: return ARM::VST1LNd8; 4889 case ARM::VST1LNdAsm_P8: return ARM::VST1LNd8; 4890 case ARM::VST1LNdAsm_I8: return ARM::VST1LNd8; 4891 case ARM::VST1LNdAsm_S8: return ARM::VST1LNd8; 4892 case ARM::VST1LNdAsm_U8: return ARM::VST1LNd8; 4893 case ARM::VST1LNdAsm_16: return ARM::VST1LNd16; 4894 case ARM::VST1LNdAsm_P16: return ARM::VST1LNd16; 4895 case ARM::VST1LNdAsm_I16: return ARM::VST1LNd16; 4896 case ARM::VST1LNdAsm_S16: return ARM::VST1LNd16; 4897 case ARM::VST1LNdAsm_U16: return ARM::VST1LNd16; 4898 case ARM::VST1LNdAsm_32: return ARM::VST1LNd32; 4899 case ARM::VST1LNdAsm_F: return ARM::VST1LNd32; 4900 case ARM::VST1LNdAsm_F32: return ARM::VST1LNd32; 4901 case ARM::VST1LNdAsm_I32: return ARM::VST1LNd32; 4902 case ARM::VST1LNdAsm_S32: return ARM::VST1LNd32; 4903 case ARM::VST1LNdAsm_U32: return ARM::VST1LNd32; 4904 } 4905} 4906 4907static unsigned getRealVLDLNOpcode(unsigned Opc) { 4908 switch(Opc) { 4909 default: assert(0 && "unexpected opcode!"); 4910 case ARM::VLD1LNdWB_fixed_Asm_8: return ARM::VLD1LNd8_UPD; 4911 case ARM::VLD1LNdWB_fixed_Asm_P8: return ARM::VLD1LNd8_UPD; 4912 case ARM::VLD1LNdWB_fixed_Asm_I8: return ARM::VLD1LNd8_UPD; 4913 case ARM::VLD1LNdWB_fixed_Asm_S8: return ARM::VLD1LNd8_UPD; 4914 case ARM::VLD1LNdWB_fixed_Asm_U8: return ARM::VLD1LNd8_UPD; 4915 case ARM::VLD1LNdWB_fixed_Asm_16: return ARM::VLD1LNd16_UPD; 4916 case ARM::VLD1LNdWB_fixed_Asm_P16: return ARM::VLD1LNd16_UPD; 4917 case ARM::VLD1LNdWB_fixed_Asm_I16: return ARM::VLD1LNd16_UPD; 4918 case ARM::VLD1LNdWB_fixed_Asm_S16: return ARM::VLD1LNd16_UPD; 4919 case ARM::VLD1LNdWB_fixed_Asm_U16: return ARM::VLD1LNd16_UPD; 4920 case ARM::VLD1LNdWB_fixed_Asm_32: return ARM::VLD1LNd32_UPD; 4921 case ARM::VLD1LNdWB_fixed_Asm_F: return ARM::VLD1LNd32_UPD; 4922 case ARM::VLD1LNdWB_fixed_Asm_F32: return ARM::VLD1LNd32_UPD; 4923 case ARM::VLD1LNdWB_fixed_Asm_I32: return ARM::VLD1LNd32_UPD; 4924 case ARM::VLD1LNdWB_fixed_Asm_S32: return ARM::VLD1LNd32_UPD; 4925 case ARM::VLD1LNdWB_fixed_Asm_U32: return ARM::VLD1LNd32_UPD; 4926 case ARM::VLD1LNdWB_register_Asm_8: return ARM::VLD1LNd8_UPD; 4927 case ARM::VLD1LNdWB_register_Asm_P8: return ARM::VLD1LNd8_UPD; 4928 case ARM::VLD1LNdWB_register_Asm_I8: return ARM::VLD1LNd8_UPD; 4929 case ARM::VLD1LNdWB_register_Asm_S8: return ARM::VLD1LNd8_UPD; 4930 case ARM::VLD1LNdWB_register_Asm_U8: return ARM::VLD1LNd8_UPD; 4931 case ARM::VLD1LNdWB_register_Asm_16: return ARM::VLD1LNd16_UPD; 4932 case ARM::VLD1LNdWB_register_Asm_P16: return ARM::VLD1LNd16_UPD; 4933 case ARM::VLD1LNdWB_register_Asm_I16: return ARM::VLD1LNd16_UPD; 4934 case ARM::VLD1LNdWB_register_Asm_S16: return ARM::VLD1LNd16_UPD; 4935 case ARM::VLD1LNdWB_register_Asm_U16: return ARM::VLD1LNd16_UPD; 4936 case ARM::VLD1LNdWB_register_Asm_32: return ARM::VLD1LNd32_UPD; 4937 case ARM::VLD1LNdWB_register_Asm_F: return ARM::VLD1LNd32_UPD; 4938 case ARM::VLD1LNdWB_register_Asm_F32: return ARM::VLD1LNd32_UPD; 4939 case ARM::VLD1LNdWB_register_Asm_I32: return ARM::VLD1LNd32_UPD; 4940 case ARM::VLD1LNdWB_register_Asm_S32: return ARM::VLD1LNd32_UPD; 4941 case ARM::VLD1LNdWB_register_Asm_U32: return ARM::VLD1LNd32_UPD; 4942 case ARM::VLD1LNdAsm_8: return ARM::VLD1LNd8; 4943 case ARM::VLD1LNdAsm_P8: return ARM::VLD1LNd8; 4944 case ARM::VLD1LNdAsm_I8: return ARM::VLD1LNd8; 4945 case ARM::VLD1LNdAsm_S8: return ARM::VLD1LNd8; 4946 case ARM::VLD1LNdAsm_U8: return ARM::VLD1LNd8; 4947 case ARM::VLD1LNdAsm_16: return ARM::VLD1LNd16; 4948 case ARM::VLD1LNdAsm_P16: return ARM::VLD1LNd16; 4949 case ARM::VLD1LNdAsm_I16: return ARM::VLD1LNd16; 4950 case ARM::VLD1LNdAsm_S16: return ARM::VLD1LNd16; 4951 case ARM::VLD1LNdAsm_U16: return ARM::VLD1LNd16; 4952 case ARM::VLD1LNdAsm_32: return ARM::VLD1LNd32; 4953 case ARM::VLD1LNdAsm_F: return ARM::VLD1LNd32; 4954 case ARM::VLD1LNdAsm_F32: return ARM::VLD1LNd32; 4955 case ARM::VLD1LNdAsm_I32: return ARM::VLD1LNd32; 4956 case ARM::VLD1LNdAsm_S32: return ARM::VLD1LNd32; 4957 case ARM::VLD1LNdAsm_U32: return ARM::VLD1LNd32; 4958 } 4959} 4960 4961bool ARMAsmParser:: 4962processInstruction(MCInst &Inst, 4963 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4964 switch (Inst.getOpcode()) { 4965 // Handle NEON VST1 complex aliases. 4966 case ARM::VST1LNdWB_register_Asm_8: 4967 case ARM::VST1LNdWB_register_Asm_P8: 4968 case ARM::VST1LNdWB_register_Asm_I8: 4969 case ARM::VST1LNdWB_register_Asm_S8: 4970 case ARM::VST1LNdWB_register_Asm_U8: 4971 case ARM::VST1LNdWB_register_Asm_16: 4972 case ARM::VST1LNdWB_register_Asm_P16: 4973 case ARM::VST1LNdWB_register_Asm_I16: 4974 case ARM::VST1LNdWB_register_Asm_S16: 4975 case ARM::VST1LNdWB_register_Asm_U16: 4976 case ARM::VST1LNdWB_register_Asm_32: 4977 case ARM::VST1LNdWB_register_Asm_F: 4978 case ARM::VST1LNdWB_register_Asm_F32: 4979 case ARM::VST1LNdWB_register_Asm_I32: 4980 case ARM::VST1LNdWB_register_Asm_S32: 4981 case ARM::VST1LNdWB_register_Asm_U32: { 4982 MCInst TmpInst; 4983 // Shuffle the operands around so the lane index operand is in the 4984 // right place. 4985 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 4986 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 4987 TmpInst.addOperand(Inst.getOperand(2)); // Rn 4988 TmpInst.addOperand(Inst.getOperand(3)); // alignment 4989 TmpInst.addOperand(Inst.getOperand(4)); // Rm 4990 TmpInst.addOperand(Inst.getOperand(0)); // Vd 4991 TmpInst.addOperand(Inst.getOperand(1)); // lane 4992 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 4993 TmpInst.addOperand(Inst.getOperand(6)); 4994 Inst = TmpInst; 4995 return true; 4996 } 4997 case ARM::VST1LNdWB_fixed_Asm_8: 4998 case ARM::VST1LNdWB_fixed_Asm_P8: 4999 case ARM::VST1LNdWB_fixed_Asm_I8: 5000 case ARM::VST1LNdWB_fixed_Asm_S8: 5001 case ARM::VST1LNdWB_fixed_Asm_U8: 5002 case ARM::VST1LNdWB_fixed_Asm_16: 5003 case ARM::VST1LNdWB_fixed_Asm_P16: 5004 case ARM::VST1LNdWB_fixed_Asm_I16: 5005 case ARM::VST1LNdWB_fixed_Asm_S16: 5006 case ARM::VST1LNdWB_fixed_Asm_U16: 5007 case ARM::VST1LNdWB_fixed_Asm_32: 5008 case ARM::VST1LNdWB_fixed_Asm_F: 5009 case ARM::VST1LNdWB_fixed_Asm_F32: 5010 case ARM::VST1LNdWB_fixed_Asm_I32: 5011 case ARM::VST1LNdWB_fixed_Asm_S32: 5012 case ARM::VST1LNdWB_fixed_Asm_U32: { 5013 MCInst TmpInst; 5014 // Shuffle the operands around so the lane index operand is in the 5015 // right place. 5016 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5017 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5018 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5019 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5020 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5021 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5022 TmpInst.addOperand(Inst.getOperand(1)); // lane 5023 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5024 TmpInst.addOperand(Inst.getOperand(5)); 5025 Inst = TmpInst; 5026 return true; 5027 } 5028 case ARM::VST1LNdAsm_8: 5029 case ARM::VST1LNdAsm_P8: 5030 case ARM::VST1LNdAsm_I8: 5031 case ARM::VST1LNdAsm_S8: 5032 case ARM::VST1LNdAsm_U8: 5033 case ARM::VST1LNdAsm_16: 5034 case ARM::VST1LNdAsm_P16: 5035 case ARM::VST1LNdAsm_I16: 5036 case ARM::VST1LNdAsm_S16: 5037 case ARM::VST1LNdAsm_U16: 5038 case ARM::VST1LNdAsm_32: 5039 case ARM::VST1LNdAsm_F: 5040 case ARM::VST1LNdAsm_F32: 5041 case ARM::VST1LNdAsm_I32: 5042 case ARM::VST1LNdAsm_S32: 5043 case ARM::VST1LNdAsm_U32: { 5044 MCInst TmpInst; 5045 // Shuffle the operands around so the lane index operand is in the 5046 // right place. 5047 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5048 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5049 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5050 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5051 TmpInst.addOperand(Inst.getOperand(1)); // lane 5052 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5053 TmpInst.addOperand(Inst.getOperand(5)); 5054 Inst = TmpInst; 5055 return true; 5056 } 5057 // Handle NEON VLD1 complex aliases. 5058 case ARM::VLD1LNdWB_register_Asm_8: 5059 case ARM::VLD1LNdWB_register_Asm_P8: 5060 case ARM::VLD1LNdWB_register_Asm_I8: 5061 case ARM::VLD1LNdWB_register_Asm_S8: 5062 case ARM::VLD1LNdWB_register_Asm_U8: 5063 case ARM::VLD1LNdWB_register_Asm_16: 5064 case ARM::VLD1LNdWB_register_Asm_P16: 5065 case ARM::VLD1LNdWB_register_Asm_I16: 5066 case ARM::VLD1LNdWB_register_Asm_S16: 5067 case ARM::VLD1LNdWB_register_Asm_U16: 5068 case ARM::VLD1LNdWB_register_Asm_32: 5069 case ARM::VLD1LNdWB_register_Asm_F: 5070 case ARM::VLD1LNdWB_register_Asm_F32: 5071 case ARM::VLD1LNdWB_register_Asm_I32: 5072 case ARM::VLD1LNdWB_register_Asm_S32: 5073 case ARM::VLD1LNdWB_register_Asm_U32: { 5074 MCInst TmpInst; 5075 // Shuffle the operands around so the lane index operand is in the 5076 // right place. 5077 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5078 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5079 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5080 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5081 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5082 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5083 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5084 TmpInst.addOperand(Inst.getOperand(1)); // lane 5085 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5086 TmpInst.addOperand(Inst.getOperand(6)); 5087 Inst = TmpInst; 5088 return true; 5089 } 5090 case ARM::VLD1LNdWB_fixed_Asm_8: 5091 case ARM::VLD1LNdWB_fixed_Asm_P8: 5092 case ARM::VLD1LNdWB_fixed_Asm_I8: 5093 case ARM::VLD1LNdWB_fixed_Asm_S8: 5094 case ARM::VLD1LNdWB_fixed_Asm_U8: 5095 case ARM::VLD1LNdWB_fixed_Asm_16: 5096 case ARM::VLD1LNdWB_fixed_Asm_P16: 5097 case ARM::VLD1LNdWB_fixed_Asm_I16: 5098 case ARM::VLD1LNdWB_fixed_Asm_S16: 5099 case ARM::VLD1LNdWB_fixed_Asm_U16: 5100 case ARM::VLD1LNdWB_fixed_Asm_32: 5101 case ARM::VLD1LNdWB_fixed_Asm_F: 5102 case ARM::VLD1LNdWB_fixed_Asm_F32: 5103 case ARM::VLD1LNdWB_fixed_Asm_I32: 5104 case ARM::VLD1LNdWB_fixed_Asm_S32: 5105 case ARM::VLD1LNdWB_fixed_Asm_U32: { 5106 MCInst TmpInst; 5107 // Shuffle the operands around so the lane index operand is in the 5108 // right place. 5109 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5110 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5111 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5112 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5113 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5114 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5115 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5116 TmpInst.addOperand(Inst.getOperand(1)); // lane 5117 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5118 TmpInst.addOperand(Inst.getOperand(5)); 5119 Inst = TmpInst; 5120 return true; 5121 } 5122 case ARM::VLD1LNdAsm_8: 5123 case ARM::VLD1LNdAsm_P8: 5124 case ARM::VLD1LNdAsm_I8: 5125 case ARM::VLD1LNdAsm_S8: 5126 case ARM::VLD1LNdAsm_U8: 5127 case ARM::VLD1LNdAsm_16: 5128 case ARM::VLD1LNdAsm_P16: 5129 case ARM::VLD1LNdAsm_I16: 5130 case ARM::VLD1LNdAsm_S16: 5131 case ARM::VLD1LNdAsm_U16: 5132 case ARM::VLD1LNdAsm_32: 5133 case ARM::VLD1LNdAsm_F: 5134 case ARM::VLD1LNdAsm_F32: 5135 case ARM::VLD1LNdAsm_I32: 5136 case ARM::VLD1LNdAsm_S32: 5137 case ARM::VLD1LNdAsm_U32: { 5138 MCInst TmpInst; 5139 // Shuffle the operands around so the lane index operand is in the 5140 // right place. 5141 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5142 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5143 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5144 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5145 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5146 TmpInst.addOperand(Inst.getOperand(1)); // lane 5147 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5148 TmpInst.addOperand(Inst.getOperand(5)); 5149 Inst = TmpInst; 5150 return true; 5151 } 5152 // Handle the MOV complex aliases. 5153 case ARM::ASRr: 5154 case ARM::LSRr: 5155 case ARM::LSLr: 5156 case ARM::RORr: { 5157 ARM_AM::ShiftOpc ShiftTy; 5158 switch(Inst.getOpcode()) { 5159 default: llvm_unreachable("unexpected opcode!"); 5160 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 5161 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 5162 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 5163 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 5164 } 5165 // A shift by zero is a plain MOVr, not a MOVsi. 5166 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 5167 MCInst TmpInst; 5168 TmpInst.setOpcode(ARM::MOVsr); 5169 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5170 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5171 TmpInst.addOperand(Inst.getOperand(2)); // Rm 5172 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5173 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5174 TmpInst.addOperand(Inst.getOperand(4)); 5175 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5176 Inst = TmpInst; 5177 return true; 5178 } 5179 case ARM::ASRi: 5180 case ARM::LSRi: 5181 case ARM::LSLi: 5182 case ARM::RORi: { 5183 ARM_AM::ShiftOpc ShiftTy; 5184 switch(Inst.getOpcode()) { 5185 default: llvm_unreachable("unexpected opcode!"); 5186 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 5187 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 5188 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 5189 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 5190 } 5191 // A shift by zero is a plain MOVr, not a MOVsi. 5192 unsigned Amt = Inst.getOperand(2).getImm(); 5193 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 5194 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 5195 MCInst TmpInst; 5196 TmpInst.setOpcode(Opc); 5197 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5198 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5199 if (Opc == ARM::MOVsi) 5200 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5201 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5202 TmpInst.addOperand(Inst.getOperand(4)); 5203 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5204 Inst = TmpInst; 5205 return true; 5206 } 5207 case ARM::RRXi: { 5208 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 5209 MCInst TmpInst; 5210 TmpInst.setOpcode(ARM::MOVsi); 5211 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5212 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5213 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5214 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5215 TmpInst.addOperand(Inst.getOperand(3)); 5216 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 5217 Inst = TmpInst; 5218 return true; 5219 } 5220 case ARM::t2LDMIA_UPD: { 5221 // If this is a load of a single register, then we should use 5222 // a post-indexed LDR instruction instead, per the ARM ARM. 5223 if (Inst.getNumOperands() != 5) 5224 return false; 5225 MCInst TmpInst; 5226 TmpInst.setOpcode(ARM::t2LDR_POST); 5227 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5228 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5229 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5230 TmpInst.addOperand(MCOperand::CreateImm(4)); 5231 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5232 TmpInst.addOperand(Inst.getOperand(3)); 5233 Inst = TmpInst; 5234 return true; 5235 } 5236 case ARM::t2STMDB_UPD: { 5237 // If this is a store of a single register, then we should use 5238 // a pre-indexed STR instruction instead, per the ARM ARM. 5239 if (Inst.getNumOperands() != 5) 5240 return false; 5241 MCInst TmpInst; 5242 TmpInst.setOpcode(ARM::t2STR_PRE); 5243 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5244 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5245 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5246 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5247 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5248 TmpInst.addOperand(Inst.getOperand(3)); 5249 Inst = TmpInst; 5250 return true; 5251 } 5252 case ARM::LDMIA_UPD: 5253 // If this is a load of a single register via a 'pop', then we should use 5254 // a post-indexed LDR instruction instead, per the ARM ARM. 5255 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 5256 Inst.getNumOperands() == 5) { 5257 MCInst TmpInst; 5258 TmpInst.setOpcode(ARM::LDR_POST_IMM); 5259 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5260 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5261 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5262 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 5263 TmpInst.addOperand(MCOperand::CreateImm(4)); 5264 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5265 TmpInst.addOperand(Inst.getOperand(3)); 5266 Inst = TmpInst; 5267 return true; 5268 } 5269 break; 5270 case ARM::STMDB_UPD: 5271 // If this is a store of a single register via a 'push', then we should use 5272 // a pre-indexed STR instruction instead, per the ARM ARM. 5273 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 5274 Inst.getNumOperands() == 5) { 5275 MCInst TmpInst; 5276 TmpInst.setOpcode(ARM::STR_PRE_IMM); 5277 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5278 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5279 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 5280 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5281 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5282 TmpInst.addOperand(Inst.getOperand(3)); 5283 Inst = TmpInst; 5284 } 5285 break; 5286 case ARM::t2ADDri12: 5287 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 5288 // mnemonic was used (not "addw"), encoding T3 is preferred. 5289 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" || 5290 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5291 break; 5292 Inst.setOpcode(ARM::t2ADDri); 5293 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5294 break; 5295 case ARM::t2SUBri12: 5296 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 5297 // mnemonic was used (not "subw"), encoding T3 is preferred. 5298 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" || 5299 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5300 break; 5301 Inst.setOpcode(ARM::t2SUBri); 5302 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5303 break; 5304 case ARM::tADDi8: 5305 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5306 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5307 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5308 // to encoding T1 if <Rd> is omitted." 5309 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5310 Inst.setOpcode(ARM::tADDi3); 5311 return true; 5312 } 5313 break; 5314 case ARM::tSUBi8: 5315 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5316 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5317 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5318 // to encoding T1 if <Rd> is omitted." 5319 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5320 Inst.setOpcode(ARM::tSUBi3); 5321 return true; 5322 } 5323 break; 5324 case ARM::t2ADDrr: { 5325 // If the destination and first source operand are the same, and 5326 // there's no setting of the flags, use encoding T2 instead of T3. 5327 // Note that this is only for ADD, not SUB. This mirrors the system 5328 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 5329 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 5330 Inst.getOperand(5).getReg() != 0 || 5331 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5332 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 5333 break; 5334 MCInst TmpInst; 5335 TmpInst.setOpcode(ARM::tADDhirr); 5336 TmpInst.addOperand(Inst.getOperand(0)); 5337 TmpInst.addOperand(Inst.getOperand(0)); 5338 TmpInst.addOperand(Inst.getOperand(2)); 5339 TmpInst.addOperand(Inst.getOperand(3)); 5340 TmpInst.addOperand(Inst.getOperand(4)); 5341 Inst = TmpInst; 5342 return true; 5343 } 5344 case ARM::tB: 5345 // A Thumb conditional branch outside of an IT block is a tBcc. 5346 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 5347 Inst.setOpcode(ARM::tBcc); 5348 return true; 5349 } 5350 break; 5351 case ARM::t2B: 5352 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 5353 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 5354 Inst.setOpcode(ARM::t2Bcc); 5355 return true; 5356 } 5357 break; 5358 case ARM::t2Bcc: 5359 // If the conditional is AL or we're in an IT block, we really want t2B. 5360 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 5361 Inst.setOpcode(ARM::t2B); 5362 return true; 5363 } 5364 break; 5365 case ARM::tBcc: 5366 // If the conditional is AL, we really want tB. 5367 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 5368 Inst.setOpcode(ARM::tB); 5369 return true; 5370 } 5371 break; 5372 case ARM::tLDMIA: { 5373 // If the register list contains any high registers, or if the writeback 5374 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 5375 // instead if we're in Thumb2. Otherwise, this should have generated 5376 // an error in validateInstruction(). 5377 unsigned Rn = Inst.getOperand(0).getReg(); 5378 bool hasWritebackToken = 5379 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5380 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 5381 bool listContainsBase; 5382 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 5383 (!listContainsBase && !hasWritebackToken) || 5384 (listContainsBase && hasWritebackToken)) { 5385 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5386 assert (isThumbTwo()); 5387 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 5388 // If we're switching to the updating version, we need to insert 5389 // the writeback tied operand. 5390 if (hasWritebackToken) 5391 Inst.insert(Inst.begin(), 5392 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 5393 return true; 5394 } 5395 break; 5396 } 5397 case ARM::tSTMIA_UPD: { 5398 // If the register list contains any high registers, we need to use 5399 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5400 // should have generated an error in validateInstruction(). 5401 unsigned Rn = Inst.getOperand(0).getReg(); 5402 bool listContainsBase; 5403 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 5404 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5405 assert (isThumbTwo()); 5406 Inst.setOpcode(ARM::t2STMIA_UPD); 5407 return true; 5408 } 5409 break; 5410 } 5411 case ARM::tPOP: { 5412 bool listContainsBase; 5413 // If the register list contains any high registers, we need to use 5414 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5415 // should have generated an error in validateInstruction(). 5416 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 5417 return false; 5418 assert (isThumbTwo()); 5419 Inst.setOpcode(ARM::t2LDMIA_UPD); 5420 // Add the base register and writeback operands. 5421 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5422 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5423 return true; 5424 } 5425 case ARM::tPUSH: { 5426 bool listContainsBase; 5427 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 5428 return false; 5429 assert (isThumbTwo()); 5430 Inst.setOpcode(ARM::t2STMDB_UPD); 5431 // Add the base register and writeback operands. 5432 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5433 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5434 return true; 5435 } 5436 case ARM::t2MOVi: { 5437 // If we can use the 16-bit encoding and the user didn't explicitly 5438 // request the 32-bit variant, transform it here. 5439 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5440 Inst.getOperand(1).getImm() <= 255 && 5441 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 5442 Inst.getOperand(4).getReg() == ARM::CPSR) || 5443 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 5444 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5445 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5446 // The operands aren't in the same order for tMOVi8... 5447 MCInst TmpInst; 5448 TmpInst.setOpcode(ARM::tMOVi8); 5449 TmpInst.addOperand(Inst.getOperand(0)); 5450 TmpInst.addOperand(Inst.getOperand(4)); 5451 TmpInst.addOperand(Inst.getOperand(1)); 5452 TmpInst.addOperand(Inst.getOperand(2)); 5453 TmpInst.addOperand(Inst.getOperand(3)); 5454 Inst = TmpInst; 5455 return true; 5456 } 5457 break; 5458 } 5459 case ARM::t2MOVr: { 5460 // If we can use the 16-bit encoding and the user didn't explicitly 5461 // request the 32-bit variant, transform it here. 5462 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5463 isARMLowRegister(Inst.getOperand(1).getReg()) && 5464 Inst.getOperand(2).getImm() == ARMCC::AL && 5465 Inst.getOperand(4).getReg() == ARM::CPSR && 5466 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5467 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5468 // The operands aren't the same for tMOV[S]r... (no cc_out) 5469 MCInst TmpInst; 5470 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 5471 TmpInst.addOperand(Inst.getOperand(0)); 5472 TmpInst.addOperand(Inst.getOperand(1)); 5473 TmpInst.addOperand(Inst.getOperand(2)); 5474 TmpInst.addOperand(Inst.getOperand(3)); 5475 Inst = TmpInst; 5476 return true; 5477 } 5478 break; 5479 } 5480 case ARM::t2SXTH: 5481 case ARM::t2SXTB: 5482 case ARM::t2UXTH: 5483 case ARM::t2UXTB: { 5484 // If we can use the 16-bit encoding and the user didn't explicitly 5485 // request the 32-bit variant, transform it here. 5486 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5487 isARMLowRegister(Inst.getOperand(1).getReg()) && 5488 Inst.getOperand(2).getImm() == 0 && 5489 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5490 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5491 unsigned NewOpc; 5492 switch (Inst.getOpcode()) { 5493 default: llvm_unreachable("Illegal opcode!"); 5494 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 5495 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 5496 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 5497 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 5498 } 5499 // The operands aren't the same for thumb1 (no rotate operand). 5500 MCInst TmpInst; 5501 TmpInst.setOpcode(NewOpc); 5502 TmpInst.addOperand(Inst.getOperand(0)); 5503 TmpInst.addOperand(Inst.getOperand(1)); 5504 TmpInst.addOperand(Inst.getOperand(3)); 5505 TmpInst.addOperand(Inst.getOperand(4)); 5506 Inst = TmpInst; 5507 return true; 5508 } 5509 break; 5510 } 5511 case ARM::t2IT: { 5512 // The mask bits for all but the first condition are represented as 5513 // the low bit of the condition code value implies 't'. We currently 5514 // always have 1 implies 't', so XOR toggle the bits if the low bit 5515 // of the condition code is zero. The encoding also expects the low 5516 // bit of the condition to be encoded as bit 4 of the mask operand, 5517 // so mask that in if needed 5518 MCOperand &MO = Inst.getOperand(1); 5519 unsigned Mask = MO.getImm(); 5520 unsigned OrigMask = Mask; 5521 unsigned TZ = CountTrailingZeros_32(Mask); 5522 if ((Inst.getOperand(0).getImm() & 1) == 0) { 5523 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 5524 for (unsigned i = 3; i != TZ; --i) 5525 Mask ^= 1 << i; 5526 } else 5527 Mask |= 0x10; 5528 MO.setImm(Mask); 5529 5530 // Set up the IT block state according to the IT instruction we just 5531 // matched. 5532 assert(!inITBlock() && "nested IT blocks?!"); 5533 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 5534 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 5535 ITState.CurPosition = 0; 5536 ITState.FirstCond = true; 5537 break; 5538 } 5539 } 5540 return false; 5541} 5542 5543unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 5544 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 5545 // suffix depending on whether they're in an IT block or not. 5546 unsigned Opc = Inst.getOpcode(); 5547 const MCInstrDesc &MCID = getInstDesc(Opc); 5548 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 5549 assert(MCID.hasOptionalDef() && 5550 "optionally flag setting instruction missing optional def operand"); 5551 assert(MCID.NumOperands == Inst.getNumOperands() && 5552 "operand count mismatch!"); 5553 // Find the optional-def operand (cc_out). 5554 unsigned OpNo; 5555 for (OpNo = 0; 5556 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 5557 ++OpNo) 5558 ; 5559 // If we're parsing Thumb1, reject it completely. 5560 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 5561 return Match_MnemonicFail; 5562 // If we're parsing Thumb2, which form is legal depends on whether we're 5563 // in an IT block. 5564 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 5565 !inITBlock()) 5566 return Match_RequiresITBlock; 5567 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 5568 inITBlock()) 5569 return Match_RequiresNotITBlock; 5570 } 5571 // Some high-register supporting Thumb1 encodings only allow both registers 5572 // to be from r0-r7 when in Thumb2. 5573 else if (Opc == ARM::tADDhirr && isThumbOne() && 5574 isARMLowRegister(Inst.getOperand(1).getReg()) && 5575 isARMLowRegister(Inst.getOperand(2).getReg())) 5576 return Match_RequiresThumb2; 5577 // Others only require ARMv6 or later. 5578 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 5579 isARMLowRegister(Inst.getOperand(0).getReg()) && 5580 isARMLowRegister(Inst.getOperand(1).getReg())) 5581 return Match_RequiresV6; 5582 return Match_Success; 5583} 5584 5585bool ARMAsmParser:: 5586MatchAndEmitInstruction(SMLoc IDLoc, 5587 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 5588 MCStreamer &Out) { 5589 MCInst Inst; 5590 unsigned ErrorInfo; 5591 unsigned MatchResult; 5592 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 5593 switch (MatchResult) { 5594 default: break; 5595 case Match_Success: 5596 // Context sensitive operand constraints aren't handled by the matcher, 5597 // so check them here. 5598 if (validateInstruction(Inst, Operands)) { 5599 // Still progress the IT block, otherwise one wrong condition causes 5600 // nasty cascading errors. 5601 forwardITPosition(); 5602 return true; 5603 } 5604 5605 // Some instructions need post-processing to, for example, tweak which 5606 // encoding is selected. Loop on it while changes happen so the 5607 // individual transformations can chain off each other. E.g., 5608 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 5609 while (processInstruction(Inst, Operands)) 5610 ; 5611 5612 // Only move forward at the very end so that everything in validate 5613 // and process gets a consistent answer about whether we're in an IT 5614 // block. 5615 forwardITPosition(); 5616 5617 Out.EmitInstruction(Inst); 5618 return false; 5619 case Match_MissingFeature: 5620 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 5621 return true; 5622 case Match_InvalidOperand: { 5623 SMLoc ErrorLoc = IDLoc; 5624 if (ErrorInfo != ~0U) { 5625 if (ErrorInfo >= Operands.size()) 5626 return Error(IDLoc, "too few operands for instruction"); 5627 5628 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 5629 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 5630 } 5631 5632 return Error(ErrorLoc, "invalid operand for instruction"); 5633 } 5634 case Match_MnemonicFail: 5635 return Error(IDLoc, "invalid instruction"); 5636 case Match_ConversionFail: 5637 // The converter function will have already emited a diagnostic. 5638 return true; 5639 case Match_RequiresNotITBlock: 5640 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 5641 case Match_RequiresITBlock: 5642 return Error(IDLoc, "instruction only valid inside IT block"); 5643 case Match_RequiresV6: 5644 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 5645 case Match_RequiresThumb2: 5646 return Error(IDLoc, "instruction variant requires Thumb2"); 5647 } 5648 5649 llvm_unreachable("Implement any new match types added!"); 5650 return true; 5651} 5652 5653/// parseDirective parses the arm specific directives 5654bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 5655 StringRef IDVal = DirectiveID.getIdentifier(); 5656 if (IDVal == ".word") 5657 return parseDirectiveWord(4, DirectiveID.getLoc()); 5658 else if (IDVal == ".thumb") 5659 return parseDirectiveThumb(DirectiveID.getLoc()); 5660 else if (IDVal == ".arm") 5661 return parseDirectiveARM(DirectiveID.getLoc()); 5662 else if (IDVal == ".thumb_func") 5663 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 5664 else if (IDVal == ".code") 5665 return parseDirectiveCode(DirectiveID.getLoc()); 5666 else if (IDVal == ".syntax") 5667 return parseDirectiveSyntax(DirectiveID.getLoc()); 5668 return true; 5669} 5670 5671/// parseDirectiveWord 5672/// ::= .word [ expression (, expression)* ] 5673bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 5674 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5675 for (;;) { 5676 const MCExpr *Value; 5677 if (getParser().ParseExpression(Value)) 5678 return true; 5679 5680 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 5681 5682 if (getLexer().is(AsmToken::EndOfStatement)) 5683 break; 5684 5685 // FIXME: Improve diagnostic. 5686 if (getLexer().isNot(AsmToken::Comma)) 5687 return Error(L, "unexpected token in directive"); 5688 Parser.Lex(); 5689 } 5690 } 5691 5692 Parser.Lex(); 5693 return false; 5694} 5695 5696/// parseDirectiveThumb 5697/// ::= .thumb 5698bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 5699 if (getLexer().isNot(AsmToken::EndOfStatement)) 5700 return Error(L, "unexpected token in directive"); 5701 Parser.Lex(); 5702 5703 if (!isThumb()) 5704 SwitchMode(); 5705 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5706 return false; 5707} 5708 5709/// parseDirectiveARM 5710/// ::= .arm 5711bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 5712 if (getLexer().isNot(AsmToken::EndOfStatement)) 5713 return Error(L, "unexpected token in directive"); 5714 Parser.Lex(); 5715 5716 if (isThumb()) 5717 SwitchMode(); 5718 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5719 return false; 5720} 5721 5722/// parseDirectiveThumbFunc 5723/// ::= .thumbfunc symbol_name 5724bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 5725 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 5726 bool isMachO = MAI.hasSubsectionsViaSymbols(); 5727 StringRef Name; 5728 5729 // Darwin asm has function name after .thumb_func direction 5730 // ELF doesn't 5731 if (isMachO) { 5732 const AsmToken &Tok = Parser.getTok(); 5733 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 5734 return Error(L, "unexpected token in .thumb_func directive"); 5735 Name = Tok.getIdentifier(); 5736 Parser.Lex(); // Consume the identifier token. 5737 } 5738 5739 if (getLexer().isNot(AsmToken::EndOfStatement)) 5740 return Error(L, "unexpected token in directive"); 5741 Parser.Lex(); 5742 5743 // FIXME: assuming function name will be the line following .thumb_func 5744 if (!isMachO) { 5745 Name = Parser.getTok().getIdentifier(); 5746 } 5747 5748 // Mark symbol as a thumb symbol. 5749 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 5750 getParser().getStreamer().EmitThumbFunc(Func); 5751 return false; 5752} 5753 5754/// parseDirectiveSyntax 5755/// ::= .syntax unified | divided 5756bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 5757 const AsmToken &Tok = Parser.getTok(); 5758 if (Tok.isNot(AsmToken::Identifier)) 5759 return Error(L, "unexpected token in .syntax directive"); 5760 StringRef Mode = Tok.getString(); 5761 if (Mode == "unified" || Mode == "UNIFIED") 5762 Parser.Lex(); 5763 else if (Mode == "divided" || Mode == "DIVIDED") 5764 return Error(L, "'.syntax divided' arm asssembly not supported"); 5765 else 5766 return Error(L, "unrecognized syntax mode in .syntax directive"); 5767 5768 if (getLexer().isNot(AsmToken::EndOfStatement)) 5769 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5770 Parser.Lex(); 5771 5772 // TODO tell the MC streamer the mode 5773 // getParser().getStreamer().Emit???(); 5774 return false; 5775} 5776 5777/// parseDirectiveCode 5778/// ::= .code 16 | 32 5779bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 5780 const AsmToken &Tok = Parser.getTok(); 5781 if (Tok.isNot(AsmToken::Integer)) 5782 return Error(L, "unexpected token in .code directive"); 5783 int64_t Val = Parser.getTok().getIntVal(); 5784 if (Val == 16) 5785 Parser.Lex(); 5786 else if (Val == 32) 5787 Parser.Lex(); 5788 else 5789 return Error(L, "invalid operand to .code directive"); 5790 5791 if (getLexer().isNot(AsmToken::EndOfStatement)) 5792 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5793 Parser.Lex(); 5794 5795 if (Val == 16) { 5796 if (!isThumb()) 5797 SwitchMode(); 5798 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5799 } else { 5800 if (isThumb()) 5801 SwitchMode(); 5802 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5803 } 5804 5805 return false; 5806} 5807 5808extern "C" void LLVMInitializeARMAsmLexer(); 5809 5810/// Force static initialization. 5811extern "C" void LLVMInitializeARMAsmParser() { 5812 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 5813 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 5814 LLVMInitializeARMAsmLexer(); 5815} 5816 5817#define GET_REGISTER_MATCHER 5818#define GET_MATCHER_IMPLEMENTATION 5819#include "ARMGenAsmMatcher.inc" 5820