ARMAsmParser.cpp revision cdcfa280568d5d48ebeba2dcfc87915105e090d1
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringExtras.h" 34#include "llvm/ADT/StringSwitch.h" 35#include "llvm/ADT/Twine.h" 36 37using namespace llvm; 38 39namespace { 40 41class ARMOperand; 42 43class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131#define GET_ASSEMBLER_HEADER 132#include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 165 166 // Asm Match Converter Methods 167 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 168 const SmallVectorImpl<MCParsedAsmOperand*> &); 169 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 170 const SmallVectorImpl<MCParsedAsmOperand*> &); 171 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 172 const SmallVectorImpl<MCParsedAsmOperand*> &); 173 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 174 const SmallVectorImpl<MCParsedAsmOperand*> &); 175 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 202 bool validateInstruction(MCInst &Inst, 203 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 204 void processInstruction(MCInst &Inst, 205 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 206 bool shouldOmitCCOutOperand(StringRef Mnemonic, 207 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 208 209public: 210 enum ARMMatchResultTy { 211 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 212 Match_RequiresNotITBlock, 213 Match_RequiresV6, 214 Match_RequiresThumb2 215 }; 216 217 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 218 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 219 MCAsmParserExtension::Initialize(_Parser); 220 221 // Initialize the set of available features. 222 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 223 224 // Not in an ITBlock to start with. 225 ITState.CurPosition = ~0U; 226 } 227 228 // Implementation of the MCTargetAsmParser interface: 229 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 230 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 231 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 232 bool ParseDirective(AsmToken DirectiveID); 233 234 unsigned checkTargetMatchPredicate(MCInst &Inst); 235 236 bool MatchAndEmitInstruction(SMLoc IDLoc, 237 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 238 MCStreamer &Out); 239}; 240} // end anonymous namespace 241 242namespace { 243 244/// ARMOperand - Instances of this class represent a parsed ARM machine 245/// instruction. 246class ARMOperand : public MCParsedAsmOperand { 247 enum KindTy { 248 k_CondCode, 249 k_CCOut, 250 k_ITCondMask, 251 k_CoprocNum, 252 k_CoprocReg, 253 k_CoprocOption, 254 k_Immediate, 255 k_FPImmediate, 256 k_MemBarrierOpt, 257 k_Memory, 258 k_PostIndexRegister, 259 k_MSRMask, 260 k_ProcIFlags, 261 k_VectorIndex, 262 k_Register, 263 k_RegisterList, 264 k_DPRRegisterList, 265 k_SPRRegisterList, 266 k_VectorList, 267 k_ShiftedRegister, 268 k_ShiftedImmediate, 269 k_ShifterImmediate, 270 k_RotateImmediate, 271 k_BitfieldDescriptor, 272 k_Token 273 } Kind; 274 275 SMLoc StartLoc, EndLoc; 276 SmallVector<unsigned, 8> Registers; 277 278 union { 279 struct { 280 ARMCC::CondCodes Val; 281 } CC; 282 283 struct { 284 unsigned Val; 285 } Cop; 286 287 struct { 288 unsigned Val; 289 } CoprocOption; 290 291 struct { 292 unsigned Mask:4; 293 } ITMask; 294 295 struct { 296 ARM_MB::MemBOpt Val; 297 } MBOpt; 298 299 struct { 300 ARM_PROC::IFlags Val; 301 } IFlags; 302 303 struct { 304 unsigned Val; 305 } MMask; 306 307 struct { 308 const char *Data; 309 unsigned Length; 310 } Tok; 311 312 struct { 313 unsigned RegNum; 314 } Reg; 315 316 // A vector register list is a sequential list of 1 to 4 registers. 317 struct { 318 unsigned RegNum; 319 unsigned Count; 320 } VectorList; 321 322 struct { 323 unsigned Val; 324 } VectorIndex; 325 326 struct { 327 const MCExpr *Val; 328 } Imm; 329 330 struct { 331 unsigned Val; // encoded 8-bit representation 332 } FPImm; 333 334 /// Combined record for all forms of ARM address expressions. 335 struct { 336 unsigned BaseRegNum; 337 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 338 // was specified. 339 const MCConstantExpr *OffsetImm; // Offset immediate value 340 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 341 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 342 unsigned ShiftImm; // shift for OffsetReg. 343 unsigned Alignment; // 0 = no alignment specified 344 // n = alignment in bytes (8, 16, or 32) 345 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 346 } Memory; 347 348 struct { 349 unsigned RegNum; 350 bool isAdd; 351 ARM_AM::ShiftOpc ShiftTy; 352 unsigned ShiftImm; 353 } PostIdxReg; 354 355 struct { 356 bool isASR; 357 unsigned Imm; 358 } ShifterImm; 359 struct { 360 ARM_AM::ShiftOpc ShiftTy; 361 unsigned SrcReg; 362 unsigned ShiftReg; 363 unsigned ShiftImm; 364 } RegShiftedReg; 365 struct { 366 ARM_AM::ShiftOpc ShiftTy; 367 unsigned SrcReg; 368 unsigned ShiftImm; 369 } RegShiftedImm; 370 struct { 371 unsigned Imm; 372 } RotImm; 373 struct { 374 unsigned LSB; 375 unsigned Width; 376 } Bitfield; 377 }; 378 379 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 380public: 381 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 382 Kind = o.Kind; 383 StartLoc = o.StartLoc; 384 EndLoc = o.EndLoc; 385 switch (Kind) { 386 case k_CondCode: 387 CC = o.CC; 388 break; 389 case k_ITCondMask: 390 ITMask = o.ITMask; 391 break; 392 case k_Token: 393 Tok = o.Tok; 394 break; 395 case k_CCOut: 396 case k_Register: 397 Reg = o.Reg; 398 break; 399 case k_RegisterList: 400 case k_DPRRegisterList: 401 case k_SPRRegisterList: 402 Registers = o.Registers; 403 break; 404 case k_VectorList: 405 VectorList = o.VectorList; 406 break; 407 case k_CoprocNum: 408 case k_CoprocReg: 409 Cop = o.Cop; 410 break; 411 case k_CoprocOption: 412 CoprocOption = o.CoprocOption; 413 break; 414 case k_Immediate: 415 Imm = o.Imm; 416 break; 417 case k_FPImmediate: 418 FPImm = o.FPImm; 419 break; 420 case k_MemBarrierOpt: 421 MBOpt = o.MBOpt; 422 break; 423 case k_Memory: 424 Memory = o.Memory; 425 break; 426 case k_PostIndexRegister: 427 PostIdxReg = o.PostIdxReg; 428 break; 429 case k_MSRMask: 430 MMask = o.MMask; 431 break; 432 case k_ProcIFlags: 433 IFlags = o.IFlags; 434 break; 435 case k_ShifterImmediate: 436 ShifterImm = o.ShifterImm; 437 break; 438 case k_ShiftedRegister: 439 RegShiftedReg = o.RegShiftedReg; 440 break; 441 case k_ShiftedImmediate: 442 RegShiftedImm = o.RegShiftedImm; 443 break; 444 case k_RotateImmediate: 445 RotImm = o.RotImm; 446 break; 447 case k_BitfieldDescriptor: 448 Bitfield = o.Bitfield; 449 break; 450 case k_VectorIndex: 451 VectorIndex = o.VectorIndex; 452 break; 453 } 454 } 455 456 /// getStartLoc - Get the location of the first token of this operand. 457 SMLoc getStartLoc() const { return StartLoc; } 458 /// getEndLoc - Get the location of the last token of this operand. 459 SMLoc getEndLoc() const { return EndLoc; } 460 461 ARMCC::CondCodes getCondCode() const { 462 assert(Kind == k_CondCode && "Invalid access!"); 463 return CC.Val; 464 } 465 466 unsigned getCoproc() const { 467 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 468 return Cop.Val; 469 } 470 471 StringRef getToken() const { 472 assert(Kind == k_Token && "Invalid access!"); 473 return StringRef(Tok.Data, Tok.Length); 474 } 475 476 unsigned getReg() const { 477 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 478 return Reg.RegNum; 479 } 480 481 const SmallVectorImpl<unsigned> &getRegList() const { 482 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 483 Kind == k_SPRRegisterList) && "Invalid access!"); 484 return Registers; 485 } 486 487 const MCExpr *getImm() const { 488 assert(Kind == k_Immediate && "Invalid access!"); 489 return Imm.Val; 490 } 491 492 unsigned getFPImm() const { 493 assert(Kind == k_FPImmediate && "Invalid access!"); 494 return FPImm.Val; 495 } 496 497 unsigned getVectorIndex() const { 498 assert(Kind == k_VectorIndex && "Invalid access!"); 499 return VectorIndex.Val; 500 } 501 502 ARM_MB::MemBOpt getMemBarrierOpt() const { 503 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 504 return MBOpt.Val; 505 } 506 507 ARM_PROC::IFlags getProcIFlags() const { 508 assert(Kind == k_ProcIFlags && "Invalid access!"); 509 return IFlags.Val; 510 } 511 512 unsigned getMSRMask() const { 513 assert(Kind == k_MSRMask && "Invalid access!"); 514 return MMask.Val; 515 } 516 517 bool isCoprocNum() const { return Kind == k_CoprocNum; } 518 bool isCoprocReg() const { return Kind == k_CoprocReg; } 519 bool isCoprocOption() const { return Kind == k_CoprocOption; } 520 bool isCondCode() const { return Kind == k_CondCode; } 521 bool isCCOut() const { return Kind == k_CCOut; } 522 bool isITMask() const { return Kind == k_ITCondMask; } 523 bool isITCondCode() const { return Kind == k_CondCode; } 524 bool isImm() const { return Kind == k_Immediate; } 525 bool isFPImm() const { return Kind == k_FPImmediate; } 526 bool isImm8s4() const { 527 if (Kind != k_Immediate) 528 return false; 529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 530 if (!CE) return false; 531 int64_t Value = CE->getValue(); 532 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 533 } 534 bool isImm0_1020s4() const { 535 if (Kind != k_Immediate) 536 return false; 537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 538 if (!CE) return false; 539 int64_t Value = CE->getValue(); 540 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 541 } 542 bool isImm0_508s4() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 549 } 550 bool isImm0_255() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return Value >= 0 && Value < 256; 557 } 558 bool isImm0_7() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return Value >= 0 && Value < 8; 565 } 566 bool isImm0_15() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value >= 0 && Value < 16; 573 } 574 bool isImm0_31() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 32; 581 } 582 bool isImm1_16() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return Value > 0 && Value < 17; 589 } 590 bool isImm1_32() const { 591 if (Kind != k_Immediate) 592 return false; 593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 594 if (!CE) return false; 595 int64_t Value = CE->getValue(); 596 return Value > 0 && Value < 33; 597 } 598 bool isImm0_65535() const { 599 if (Kind != k_Immediate) 600 return false; 601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 602 if (!CE) return false; 603 int64_t Value = CE->getValue(); 604 return Value >= 0 && Value < 65536; 605 } 606 bool isImm0_65535Expr() const { 607 if (Kind != k_Immediate) 608 return false; 609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 610 // If it's not a constant expression, it'll generate a fixup and be 611 // handled later. 612 if (!CE) return true; 613 int64_t Value = CE->getValue(); 614 return Value >= 0 && Value < 65536; 615 } 616 bool isImm24bit() const { 617 if (Kind != k_Immediate) 618 return false; 619 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 620 if (!CE) return false; 621 int64_t Value = CE->getValue(); 622 return Value >= 0 && Value <= 0xffffff; 623 } 624 bool isImmThumbSR() const { 625 if (Kind != k_Immediate) 626 return false; 627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 628 if (!CE) return false; 629 int64_t Value = CE->getValue(); 630 return Value > 0 && Value < 33; 631 } 632 bool isPKHLSLImm() const { 633 if (Kind != k_Immediate) 634 return false; 635 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 636 if (!CE) return false; 637 int64_t Value = CE->getValue(); 638 return Value >= 0 && Value < 32; 639 } 640 bool isPKHASRImm() const { 641 if (Kind != k_Immediate) 642 return false; 643 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 644 if (!CE) return false; 645 int64_t Value = CE->getValue(); 646 return Value > 0 && Value <= 32; 647 } 648 bool isARMSOImm() const { 649 if (Kind != k_Immediate) 650 return false; 651 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 652 if (!CE) return false; 653 int64_t Value = CE->getValue(); 654 return ARM_AM::getSOImmVal(Value) != -1; 655 } 656 bool isT2SOImm() const { 657 if (Kind != k_Immediate) 658 return false; 659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 660 if (!CE) return false; 661 int64_t Value = CE->getValue(); 662 return ARM_AM::getT2SOImmVal(Value) != -1; 663 } 664 bool isSetEndImm() const { 665 if (Kind != k_Immediate) 666 return false; 667 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 668 if (!CE) return false; 669 int64_t Value = CE->getValue(); 670 return Value == 1 || Value == 0; 671 } 672 bool isReg() const { return Kind == k_Register; } 673 bool isRegList() const { return Kind == k_RegisterList; } 674 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 675 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 676 bool isToken() const { return Kind == k_Token; } 677 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 678 bool isMemory() const { return Kind == k_Memory; } 679 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 680 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 681 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 682 bool isRotImm() const { return Kind == k_RotateImmediate; } 683 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 684 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 685 bool isPostIdxReg() const { 686 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 687 } 688 bool isMemNoOffset(bool alignOK = false) const { 689 if (!isMemory()) 690 return false; 691 // No offset of any kind. 692 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 693 (alignOK || Memory.Alignment == 0); 694 } 695 bool isAlignedMemory() const { 696 return isMemNoOffset(true); 697 } 698 bool isAddrMode2() const { 699 if (!isMemory() || Memory.Alignment != 0) return false; 700 // Check for register offset. 701 if (Memory.OffsetRegNum) return true; 702 // Immediate offset in range [-4095, 4095]. 703 if (!Memory.OffsetImm) return true; 704 int64_t Val = Memory.OffsetImm->getValue(); 705 return Val > -4096 && Val < 4096; 706 } 707 bool isAM2OffsetImm() const { 708 if (Kind != k_Immediate) 709 return false; 710 // Immediate offset in range [-4095, 4095]. 711 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 712 if (!CE) return false; 713 int64_t Val = CE->getValue(); 714 return Val > -4096 && Val < 4096; 715 } 716 bool isAddrMode3() const { 717 if (!isMemory() || Memory.Alignment != 0) return false; 718 // No shifts are legal for AM3. 719 if (Memory.ShiftType != ARM_AM::no_shift) return false; 720 // Check for register offset. 721 if (Memory.OffsetRegNum) return true; 722 // Immediate offset in range [-255, 255]. 723 if (!Memory.OffsetImm) return true; 724 int64_t Val = Memory.OffsetImm->getValue(); 725 return Val > -256 && Val < 256; 726 } 727 bool isAM3Offset() const { 728 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 729 return false; 730 if (Kind == k_PostIndexRegister) 731 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 732 // Immediate offset in range [-255, 255]. 733 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 734 if (!CE) return false; 735 int64_t Val = CE->getValue(); 736 // Special case, #-0 is INT32_MIN. 737 return (Val > -256 && Val < 256) || Val == INT32_MIN; 738 } 739 bool isAddrMode5() const { 740 if (!isMemory() || Memory.Alignment != 0) return false; 741 // Check for register offset. 742 if (Memory.OffsetRegNum) return false; 743 // Immediate offset in range [-1020, 1020] and a multiple of 4. 744 if (!Memory.OffsetImm) return true; 745 int64_t Val = Memory.OffsetImm->getValue(); 746 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 747 Val == INT32_MIN; 748 } 749 bool isMemTBB() const { 750 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 751 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 752 return false; 753 return true; 754 } 755 bool isMemTBH() const { 756 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 757 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 758 Memory.Alignment != 0 ) 759 return false; 760 return true; 761 } 762 bool isMemRegOffset() const { 763 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 764 return false; 765 return true; 766 } 767 bool isT2MemRegOffset() const { 768 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 769 Memory.Alignment != 0) 770 return false; 771 // Only lsl #{0, 1, 2, 3} allowed. 772 if (Memory.ShiftType == ARM_AM::no_shift) 773 return true; 774 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 775 return false; 776 return true; 777 } 778 bool isMemThumbRR() const { 779 // Thumb reg+reg addressing is simple. Just two registers, a base and 780 // an offset. No shifts, negations or any other complicating factors. 781 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 782 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 783 return false; 784 return isARMLowRegister(Memory.BaseRegNum) && 785 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 786 } 787 bool isMemThumbRIs4() const { 788 if (!isMemory() || Memory.OffsetRegNum != 0 || 789 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 790 return false; 791 // Immediate offset, multiple of 4 in range [0, 124]. 792 if (!Memory.OffsetImm) return true; 793 int64_t Val = Memory.OffsetImm->getValue(); 794 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 795 } 796 bool isMemThumbRIs2() const { 797 if (!isMemory() || Memory.OffsetRegNum != 0 || 798 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 799 return false; 800 // Immediate offset, multiple of 4 in range [0, 62]. 801 if (!Memory.OffsetImm) return true; 802 int64_t Val = Memory.OffsetImm->getValue(); 803 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 804 } 805 bool isMemThumbRIs1() const { 806 if (!isMemory() || Memory.OffsetRegNum != 0 || 807 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 808 return false; 809 // Immediate offset in range [0, 31]. 810 if (!Memory.OffsetImm) return true; 811 int64_t Val = Memory.OffsetImm->getValue(); 812 return Val >= 0 && Val <= 31; 813 } 814 bool isMemThumbSPI() const { 815 if (!isMemory() || Memory.OffsetRegNum != 0 || 816 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 817 return false; 818 // Immediate offset, multiple of 4 in range [0, 1020]. 819 if (!Memory.OffsetImm) return true; 820 int64_t Val = Memory.OffsetImm->getValue(); 821 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 822 } 823 bool isMemImm8s4Offset() const { 824 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 825 return false; 826 // Immediate offset a multiple of 4 in range [-1020, 1020]. 827 if (!Memory.OffsetImm) return true; 828 int64_t Val = Memory.OffsetImm->getValue(); 829 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 830 } 831 bool isMemImm0_1020s4Offset() const { 832 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 833 return false; 834 // Immediate offset a multiple of 4 in range [0, 1020]. 835 if (!Memory.OffsetImm) return true; 836 int64_t Val = Memory.OffsetImm->getValue(); 837 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 838 } 839 bool isMemImm8Offset() const { 840 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 841 return false; 842 // Immediate offset in range [-255, 255]. 843 if (!Memory.OffsetImm) return true; 844 int64_t Val = Memory.OffsetImm->getValue(); 845 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 846 } 847 bool isMemPosImm8Offset() const { 848 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 849 return false; 850 // Immediate offset in range [0, 255]. 851 if (!Memory.OffsetImm) return true; 852 int64_t Val = Memory.OffsetImm->getValue(); 853 return Val >= 0 && Val < 256; 854 } 855 bool isMemNegImm8Offset() const { 856 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 857 return false; 858 // Immediate offset in range [-255, -1]. 859 if (!Memory.OffsetImm) return true; 860 int64_t Val = Memory.OffsetImm->getValue(); 861 return Val > -256 && Val < 0; 862 } 863 bool isMemUImm12Offset() const { 864 // If we have an immediate that's not a constant, treat it as a label 865 // reference needing a fixup. If it is a constant, it's something else 866 // and we reject it. 867 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 868 return true; 869 870 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 871 return false; 872 // Immediate offset in range [0, 4095]. 873 if (!Memory.OffsetImm) return true; 874 int64_t Val = Memory.OffsetImm->getValue(); 875 return (Val >= 0 && Val < 4096); 876 } 877 bool isMemImm12Offset() const { 878 // If we have an immediate that's not a constant, treat it as a label 879 // reference needing a fixup. If it is a constant, it's something else 880 // and we reject it. 881 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 882 return true; 883 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [-4095, 4095]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 890 } 891 bool isPostIdxImm8() const { 892 if (Kind != k_Immediate) 893 return false; 894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 895 if (!CE) return false; 896 int64_t Val = CE->getValue(); 897 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 898 } 899 bool isPostIdxImm8s4() const { 900 if (Kind != k_Immediate) 901 return false; 902 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 903 if (!CE) return false; 904 int64_t Val = CE->getValue(); 905 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 906 (Val == INT32_MIN); 907 } 908 909 bool isMSRMask() const { return Kind == k_MSRMask; } 910 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 911 912 // NEON operands. 913 bool isVecListOneD() const { 914 if (Kind != k_VectorList) return false; 915 return VectorList.Count == 1; 916 } 917 918 bool isVecListTwoD() const { 919 if (Kind != k_VectorList) return false; 920 return VectorList.Count == 2; 921 } 922 923 bool isVecListThreeD() const { 924 if (Kind != k_VectorList) return false; 925 return VectorList.Count == 3; 926 } 927 928 bool isVectorIndex8() const { 929 if (Kind != k_VectorIndex) return false; 930 return VectorIndex.Val < 8; 931 } 932 bool isVectorIndex16() const { 933 if (Kind != k_VectorIndex) return false; 934 return VectorIndex.Val < 4; 935 } 936 bool isVectorIndex32() const { 937 if (Kind != k_VectorIndex) return false; 938 return VectorIndex.Val < 2; 939 } 940 941 bool isNEONi8splat() const { 942 if (Kind != k_Immediate) 943 return false; 944 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 945 // Must be a constant. 946 if (!CE) return false; 947 int64_t Value = CE->getValue(); 948 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 949 // value. 950 return Value >= 0 && Value < 256; 951 } 952 953 bool isNEONi16splat() const { 954 if (Kind != k_Immediate) 955 return false; 956 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 957 // Must be a constant. 958 if (!CE) return false; 959 int64_t Value = CE->getValue(); 960 // i16 value in the range [0,255] or [0x0100, 0xff00] 961 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 962 } 963 964 bool isNEONi32splat() const { 965 if (Kind != k_Immediate) 966 return false; 967 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 968 // Must be a constant. 969 if (!CE) return false; 970 int64_t Value = CE->getValue(); 971 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 972 return (Value >= 0 && Value < 256) || 973 (Value >= 0x0100 && Value <= 0xff00) || 974 (Value >= 0x010000 && Value <= 0xff0000) || 975 (Value >= 0x01000000 && Value <= 0xff000000); 976 } 977 978 bool isNEONi32vmov() const { 979 if (Kind != k_Immediate) 980 return false; 981 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 982 // Must be a constant. 983 if (!CE) return false; 984 int64_t Value = CE->getValue(); 985 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 986 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 987 return (Value >= 0 && Value < 256) || 988 (Value >= 0x0100 && Value <= 0xff00) || 989 (Value >= 0x010000 && Value <= 0xff0000) || 990 (Value >= 0x01000000 && Value <= 0xff000000) || 991 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 992 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 993 } 994 995 bool isNEONi64splat() const { 996 if (Kind != k_Immediate) 997 return false; 998 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 999 // Must be a constant. 1000 if (!CE) return false; 1001 uint64_t Value = CE->getValue(); 1002 // i64 value with each byte being either 0 or 0xff. 1003 for (unsigned i = 0; i < 8; ++i) 1004 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1005 return true; 1006 } 1007 1008 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1009 // Add as immediates when possible. Null MCExpr = 0. 1010 if (Expr == 0) 1011 Inst.addOperand(MCOperand::CreateImm(0)); 1012 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1013 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1014 else 1015 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1016 } 1017 1018 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1019 assert(N == 2 && "Invalid number of operands!"); 1020 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1021 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1022 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1023 } 1024 1025 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1026 assert(N == 1 && "Invalid number of operands!"); 1027 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1028 } 1029 1030 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1031 assert(N == 1 && "Invalid number of operands!"); 1032 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1033 } 1034 1035 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1036 assert(N == 1 && "Invalid number of operands!"); 1037 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1038 } 1039 1040 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1041 assert(N == 1 && "Invalid number of operands!"); 1042 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1043 } 1044 1045 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1046 assert(N == 1 && "Invalid number of operands!"); 1047 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1048 } 1049 1050 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1051 assert(N == 1 && "Invalid number of operands!"); 1052 Inst.addOperand(MCOperand::CreateReg(getReg())); 1053 } 1054 1055 void addRegOperands(MCInst &Inst, unsigned N) const { 1056 assert(N == 1 && "Invalid number of operands!"); 1057 Inst.addOperand(MCOperand::CreateReg(getReg())); 1058 } 1059 1060 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1061 assert(N == 3 && "Invalid number of operands!"); 1062 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1063 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1064 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1065 Inst.addOperand(MCOperand::CreateImm( 1066 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1067 } 1068 1069 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1070 assert(N == 2 && "Invalid number of operands!"); 1071 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1072 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1073 Inst.addOperand(MCOperand::CreateImm( 1074 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1075 } 1076 1077 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1078 assert(N == 1 && "Invalid number of operands!"); 1079 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1080 ShifterImm.Imm)); 1081 } 1082 1083 void addRegListOperands(MCInst &Inst, unsigned N) const { 1084 assert(N == 1 && "Invalid number of operands!"); 1085 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1086 for (SmallVectorImpl<unsigned>::const_iterator 1087 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1088 Inst.addOperand(MCOperand::CreateReg(*I)); 1089 } 1090 1091 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1092 addRegListOperands(Inst, N); 1093 } 1094 1095 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1096 addRegListOperands(Inst, N); 1097 } 1098 1099 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1100 assert(N == 1 && "Invalid number of operands!"); 1101 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1102 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1103 } 1104 1105 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1106 assert(N == 1 && "Invalid number of operands!"); 1107 // Munge the lsb/width into a bitfield mask. 1108 unsigned lsb = Bitfield.LSB; 1109 unsigned width = Bitfield.Width; 1110 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1111 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1112 (32 - (lsb + width))); 1113 Inst.addOperand(MCOperand::CreateImm(Mask)); 1114 } 1115 1116 void addImmOperands(MCInst &Inst, unsigned N) const { 1117 assert(N == 1 && "Invalid number of operands!"); 1118 addExpr(Inst, getImm()); 1119 } 1120 1121 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1122 assert(N == 1 && "Invalid number of operands!"); 1123 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1124 } 1125 1126 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1127 assert(N == 1 && "Invalid number of operands!"); 1128 // FIXME: We really want to scale the value here, but the LDRD/STRD 1129 // instruction don't encode operands that way yet. 1130 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1131 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1132 } 1133 1134 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1135 assert(N == 1 && "Invalid number of operands!"); 1136 // The immediate is scaled by four in the encoding and is stored 1137 // in the MCInst as such. Lop off the low two bits here. 1138 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1139 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1140 } 1141 1142 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1143 assert(N == 1 && "Invalid number of operands!"); 1144 // The immediate is scaled by four in the encoding and is stored 1145 // in the MCInst as such. Lop off the low two bits here. 1146 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1147 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1148 } 1149 1150 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1151 assert(N == 1 && "Invalid number of operands!"); 1152 addExpr(Inst, getImm()); 1153 } 1154 1155 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1156 assert(N == 1 && "Invalid number of operands!"); 1157 addExpr(Inst, getImm()); 1158 } 1159 1160 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 addExpr(Inst, getImm()); 1163 } 1164 1165 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1166 assert(N == 1 && "Invalid number of operands!"); 1167 addExpr(Inst, getImm()); 1168 } 1169 1170 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1171 assert(N == 1 && "Invalid number of operands!"); 1172 // The constant encodes as the immediate-1, and we store in the instruction 1173 // the bits as encoded, so subtract off one here. 1174 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1175 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1176 } 1177 1178 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1179 assert(N == 1 && "Invalid number of operands!"); 1180 // The constant encodes as the immediate-1, and we store in the instruction 1181 // the bits as encoded, so subtract off one here. 1182 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1183 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1184 } 1185 1186 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1187 assert(N == 1 && "Invalid number of operands!"); 1188 addExpr(Inst, getImm()); 1189 } 1190 1191 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1192 assert(N == 1 && "Invalid number of operands!"); 1193 addExpr(Inst, getImm()); 1194 } 1195 1196 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1197 assert(N == 1 && "Invalid number of operands!"); 1198 addExpr(Inst, getImm()); 1199 } 1200 1201 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1202 assert(N == 1 && "Invalid number of operands!"); 1203 // The constant encodes as the immediate, except for 32, which encodes as 1204 // zero. 1205 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1206 unsigned Imm = CE->getValue(); 1207 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1208 } 1209 1210 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1211 assert(N == 1 && "Invalid number of operands!"); 1212 addExpr(Inst, getImm()); 1213 } 1214 1215 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1216 assert(N == 1 && "Invalid number of operands!"); 1217 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1218 // the instruction as well. 1219 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1220 int Val = CE->getValue(); 1221 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1222 } 1223 1224 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1225 assert(N == 1 && "Invalid number of operands!"); 1226 addExpr(Inst, getImm()); 1227 } 1228 1229 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1230 assert(N == 1 && "Invalid number of operands!"); 1231 addExpr(Inst, getImm()); 1232 } 1233 1234 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1235 assert(N == 1 && "Invalid number of operands!"); 1236 addExpr(Inst, getImm()); 1237 } 1238 1239 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1240 assert(N == 1 && "Invalid number of operands!"); 1241 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1242 } 1243 1244 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1245 assert(N == 1 && "Invalid number of operands!"); 1246 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1247 } 1248 1249 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1250 assert(N == 2 && "Invalid number of operands!"); 1251 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1252 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1253 } 1254 1255 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1256 assert(N == 3 && "Invalid number of operands!"); 1257 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1258 if (!Memory.OffsetRegNum) { 1259 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1260 // Special case for #-0 1261 if (Val == INT32_MIN) Val = 0; 1262 if (Val < 0) Val = -Val; 1263 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1264 } else { 1265 // For register offset, we encode the shift type and negation flag 1266 // here. 1267 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1268 Memory.ShiftImm, Memory.ShiftType); 1269 } 1270 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1271 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1272 Inst.addOperand(MCOperand::CreateImm(Val)); 1273 } 1274 1275 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1276 assert(N == 2 && "Invalid number of operands!"); 1277 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1278 assert(CE && "non-constant AM2OffsetImm operand!"); 1279 int32_t Val = CE->getValue(); 1280 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1281 // Special case for #-0 1282 if (Val == INT32_MIN) Val = 0; 1283 if (Val < 0) Val = -Val; 1284 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1285 Inst.addOperand(MCOperand::CreateReg(0)); 1286 Inst.addOperand(MCOperand::CreateImm(Val)); 1287 } 1288 1289 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1290 assert(N == 3 && "Invalid number of operands!"); 1291 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1292 if (!Memory.OffsetRegNum) { 1293 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1294 // Special case for #-0 1295 if (Val == INT32_MIN) Val = 0; 1296 if (Val < 0) Val = -Val; 1297 Val = ARM_AM::getAM3Opc(AddSub, Val); 1298 } else { 1299 // For register offset, we encode the shift type and negation flag 1300 // here. 1301 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1302 } 1303 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1304 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1305 Inst.addOperand(MCOperand::CreateImm(Val)); 1306 } 1307 1308 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1309 assert(N == 2 && "Invalid number of operands!"); 1310 if (Kind == k_PostIndexRegister) { 1311 int32_t Val = 1312 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1313 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1314 Inst.addOperand(MCOperand::CreateImm(Val)); 1315 return; 1316 } 1317 1318 // Constant offset. 1319 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1320 int32_t Val = CE->getValue(); 1321 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1322 // Special case for #-0 1323 if (Val == INT32_MIN) Val = 0; 1324 if (Val < 0) Val = -Val; 1325 Val = ARM_AM::getAM3Opc(AddSub, Val); 1326 Inst.addOperand(MCOperand::CreateReg(0)); 1327 Inst.addOperand(MCOperand::CreateImm(Val)); 1328 } 1329 1330 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1331 assert(N == 2 && "Invalid number of operands!"); 1332 // The lower two bits are always zero and as such are not encoded. 1333 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1334 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1335 // Special case for #-0 1336 if (Val == INT32_MIN) Val = 0; 1337 if (Val < 0) Val = -Val; 1338 Val = ARM_AM::getAM5Opc(AddSub, Val); 1339 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1340 Inst.addOperand(MCOperand::CreateImm(Val)); 1341 } 1342 1343 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1344 assert(N == 2 && "Invalid number of operands!"); 1345 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1346 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1347 Inst.addOperand(MCOperand::CreateImm(Val)); 1348 } 1349 1350 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1351 assert(N == 2 && "Invalid number of operands!"); 1352 // The lower two bits are always zero and as such are not encoded. 1353 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1354 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1355 Inst.addOperand(MCOperand::CreateImm(Val)); 1356 } 1357 1358 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1359 assert(N == 2 && "Invalid number of operands!"); 1360 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1361 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1362 Inst.addOperand(MCOperand::CreateImm(Val)); 1363 } 1364 1365 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1366 addMemImm8OffsetOperands(Inst, N); 1367 } 1368 1369 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1370 addMemImm8OffsetOperands(Inst, N); 1371 } 1372 1373 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1374 assert(N == 2 && "Invalid number of operands!"); 1375 // If this is an immediate, it's a label reference. 1376 if (Kind == k_Immediate) { 1377 addExpr(Inst, getImm()); 1378 Inst.addOperand(MCOperand::CreateImm(0)); 1379 return; 1380 } 1381 1382 // Otherwise, it's a normal memory reg+offset. 1383 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1384 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1385 Inst.addOperand(MCOperand::CreateImm(Val)); 1386 } 1387 1388 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1389 assert(N == 2 && "Invalid number of operands!"); 1390 // If this is an immediate, it's a label reference. 1391 if (Kind == k_Immediate) { 1392 addExpr(Inst, getImm()); 1393 Inst.addOperand(MCOperand::CreateImm(0)); 1394 return; 1395 } 1396 1397 // Otherwise, it's a normal memory reg+offset. 1398 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1399 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1400 Inst.addOperand(MCOperand::CreateImm(Val)); 1401 } 1402 1403 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1404 assert(N == 2 && "Invalid number of operands!"); 1405 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1406 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1407 } 1408 1409 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1410 assert(N == 2 && "Invalid number of operands!"); 1411 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1412 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1413 } 1414 1415 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1416 assert(N == 3 && "Invalid number of operands!"); 1417 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1418 Memory.ShiftImm, Memory.ShiftType); 1419 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1420 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1421 Inst.addOperand(MCOperand::CreateImm(Val)); 1422 } 1423 1424 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1425 assert(N == 3 && "Invalid number of operands!"); 1426 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1427 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1428 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1429 } 1430 1431 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1432 assert(N == 2 && "Invalid number of operands!"); 1433 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1434 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1435 } 1436 1437 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1438 assert(N == 2 && "Invalid number of operands!"); 1439 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1440 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1441 Inst.addOperand(MCOperand::CreateImm(Val)); 1442 } 1443 1444 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1445 assert(N == 2 && "Invalid number of operands!"); 1446 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1447 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1448 Inst.addOperand(MCOperand::CreateImm(Val)); 1449 } 1450 1451 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1452 assert(N == 2 && "Invalid number of operands!"); 1453 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1454 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1455 Inst.addOperand(MCOperand::CreateImm(Val)); 1456 } 1457 1458 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1459 assert(N == 2 && "Invalid number of operands!"); 1460 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1461 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1462 Inst.addOperand(MCOperand::CreateImm(Val)); 1463 } 1464 1465 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1466 assert(N == 1 && "Invalid number of operands!"); 1467 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1468 assert(CE && "non-constant post-idx-imm8 operand!"); 1469 int Imm = CE->getValue(); 1470 bool isAdd = Imm >= 0; 1471 if (Imm == INT32_MIN) Imm = 0; 1472 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1473 Inst.addOperand(MCOperand::CreateImm(Imm)); 1474 } 1475 1476 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1477 assert(N == 1 && "Invalid number of operands!"); 1478 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1479 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1480 int Imm = CE->getValue(); 1481 bool isAdd = Imm >= 0; 1482 if (Imm == INT32_MIN) Imm = 0; 1483 // Immediate is scaled by 4. 1484 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1485 Inst.addOperand(MCOperand::CreateImm(Imm)); 1486 } 1487 1488 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1489 assert(N == 2 && "Invalid number of operands!"); 1490 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1491 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1492 } 1493 1494 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1495 assert(N == 2 && "Invalid number of operands!"); 1496 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1497 // The sign, shift type, and shift amount are encoded in a single operand 1498 // using the AM2 encoding helpers. 1499 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1500 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1501 PostIdxReg.ShiftTy); 1502 Inst.addOperand(MCOperand::CreateImm(Imm)); 1503 } 1504 1505 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1506 assert(N == 1 && "Invalid number of operands!"); 1507 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1508 } 1509 1510 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1511 assert(N == 1 && "Invalid number of operands!"); 1512 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1513 } 1514 1515 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1516 assert(N == 1 && "Invalid number of operands!"); 1517 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1518 } 1519 1520 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1521 assert(N == 1 && "Invalid number of operands!"); 1522 // Only the first register actually goes on the instruction. The rest 1523 // are implied by the opcode. 1524 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1525 } 1526 1527 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1528 assert(N == 1 && "Invalid number of operands!"); 1529 // Only the first register actually goes on the instruction. The rest 1530 // are implied by the opcode. 1531 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1532 } 1533 1534 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1535 assert(N == 1 && "Invalid number of operands!"); 1536 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1537 } 1538 1539 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1540 assert(N == 1 && "Invalid number of operands!"); 1541 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1542 } 1543 1544 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1545 assert(N == 1 && "Invalid number of operands!"); 1546 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1547 } 1548 1549 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1550 assert(N == 1 && "Invalid number of operands!"); 1551 // The immediate encodes the type of constant as well as the value. 1552 // Mask in that this is an i8 splat. 1553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1554 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1555 } 1556 1557 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1558 assert(N == 1 && "Invalid number of operands!"); 1559 // The immediate encodes the type of constant as well as the value. 1560 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1561 unsigned Value = CE->getValue(); 1562 if (Value >= 256) 1563 Value = (Value >> 8) | 0xa00; 1564 else 1565 Value |= 0x800; 1566 Inst.addOperand(MCOperand::CreateImm(Value)); 1567 } 1568 1569 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1570 assert(N == 1 && "Invalid number of operands!"); 1571 // The immediate encodes the type of constant as well as the value. 1572 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1573 unsigned Value = CE->getValue(); 1574 if (Value >= 256 && Value <= 0xff00) 1575 Value = (Value >> 8) | 0x200; 1576 else if (Value > 0xffff && Value <= 0xff0000) 1577 Value = (Value >> 16) | 0x400; 1578 else if (Value > 0xffffff) 1579 Value = (Value >> 24) | 0x600; 1580 Inst.addOperand(MCOperand::CreateImm(Value)); 1581 } 1582 1583 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1584 assert(N == 1 && "Invalid number of operands!"); 1585 // The immediate encodes the type of constant as well as the value. 1586 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1587 unsigned Value = CE->getValue(); 1588 if (Value >= 256 && Value <= 0xffff) 1589 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1590 else if (Value > 0xffff && Value <= 0xffffff) 1591 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1592 else if (Value > 0xffffff) 1593 Value = (Value >> 24) | 0x600; 1594 Inst.addOperand(MCOperand::CreateImm(Value)); 1595 } 1596 1597 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1598 assert(N == 1 && "Invalid number of operands!"); 1599 // The immediate encodes the type of constant as well as the value. 1600 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1601 uint64_t Value = CE->getValue(); 1602 unsigned Imm = 0; 1603 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1604 Imm |= (Value & 1) << i; 1605 } 1606 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1607 } 1608 1609 virtual void print(raw_ostream &OS) const; 1610 1611 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1612 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1613 Op->ITMask.Mask = Mask; 1614 Op->StartLoc = S; 1615 Op->EndLoc = S; 1616 return Op; 1617 } 1618 1619 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1620 ARMOperand *Op = new ARMOperand(k_CondCode); 1621 Op->CC.Val = CC; 1622 Op->StartLoc = S; 1623 Op->EndLoc = S; 1624 return Op; 1625 } 1626 1627 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1628 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1629 Op->Cop.Val = CopVal; 1630 Op->StartLoc = S; 1631 Op->EndLoc = S; 1632 return Op; 1633 } 1634 1635 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1636 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1637 Op->Cop.Val = CopVal; 1638 Op->StartLoc = S; 1639 Op->EndLoc = S; 1640 return Op; 1641 } 1642 1643 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1644 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1645 Op->Cop.Val = Val; 1646 Op->StartLoc = S; 1647 Op->EndLoc = E; 1648 return Op; 1649 } 1650 1651 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1652 ARMOperand *Op = new ARMOperand(k_CCOut); 1653 Op->Reg.RegNum = RegNum; 1654 Op->StartLoc = S; 1655 Op->EndLoc = S; 1656 return Op; 1657 } 1658 1659 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1660 ARMOperand *Op = new ARMOperand(k_Token); 1661 Op->Tok.Data = Str.data(); 1662 Op->Tok.Length = Str.size(); 1663 Op->StartLoc = S; 1664 Op->EndLoc = S; 1665 return Op; 1666 } 1667 1668 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1669 ARMOperand *Op = new ARMOperand(k_Register); 1670 Op->Reg.RegNum = RegNum; 1671 Op->StartLoc = S; 1672 Op->EndLoc = E; 1673 return Op; 1674 } 1675 1676 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1677 unsigned SrcReg, 1678 unsigned ShiftReg, 1679 unsigned ShiftImm, 1680 SMLoc S, SMLoc E) { 1681 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1682 Op->RegShiftedReg.ShiftTy = ShTy; 1683 Op->RegShiftedReg.SrcReg = SrcReg; 1684 Op->RegShiftedReg.ShiftReg = ShiftReg; 1685 Op->RegShiftedReg.ShiftImm = ShiftImm; 1686 Op->StartLoc = S; 1687 Op->EndLoc = E; 1688 return Op; 1689 } 1690 1691 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1692 unsigned SrcReg, 1693 unsigned ShiftImm, 1694 SMLoc S, SMLoc E) { 1695 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1696 Op->RegShiftedImm.ShiftTy = ShTy; 1697 Op->RegShiftedImm.SrcReg = SrcReg; 1698 Op->RegShiftedImm.ShiftImm = ShiftImm; 1699 Op->StartLoc = S; 1700 Op->EndLoc = E; 1701 return Op; 1702 } 1703 1704 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1705 SMLoc S, SMLoc E) { 1706 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1707 Op->ShifterImm.isASR = isASR; 1708 Op->ShifterImm.Imm = Imm; 1709 Op->StartLoc = S; 1710 Op->EndLoc = E; 1711 return Op; 1712 } 1713 1714 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1715 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1716 Op->RotImm.Imm = Imm; 1717 Op->StartLoc = S; 1718 Op->EndLoc = E; 1719 return Op; 1720 } 1721 1722 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1723 SMLoc S, SMLoc E) { 1724 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1725 Op->Bitfield.LSB = LSB; 1726 Op->Bitfield.Width = Width; 1727 Op->StartLoc = S; 1728 Op->EndLoc = E; 1729 return Op; 1730 } 1731 1732 static ARMOperand * 1733 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1734 SMLoc StartLoc, SMLoc EndLoc) { 1735 KindTy Kind = k_RegisterList; 1736 1737 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1738 Kind = k_DPRRegisterList; 1739 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1740 contains(Regs.front().first)) 1741 Kind = k_SPRRegisterList; 1742 1743 ARMOperand *Op = new ARMOperand(Kind); 1744 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1745 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1746 Op->Registers.push_back(I->first); 1747 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1748 Op->StartLoc = StartLoc; 1749 Op->EndLoc = EndLoc; 1750 return Op; 1751 } 1752 1753 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1754 SMLoc S, SMLoc E) { 1755 ARMOperand *Op = new ARMOperand(k_VectorList); 1756 Op->VectorList.RegNum = RegNum; 1757 Op->VectorList.Count = Count; 1758 Op->StartLoc = S; 1759 Op->EndLoc = E; 1760 return Op; 1761 } 1762 1763 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1764 MCContext &Ctx) { 1765 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1766 Op->VectorIndex.Val = Idx; 1767 Op->StartLoc = S; 1768 Op->EndLoc = E; 1769 return Op; 1770 } 1771 1772 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1773 ARMOperand *Op = new ARMOperand(k_Immediate); 1774 Op->Imm.Val = Val; 1775 Op->StartLoc = S; 1776 Op->EndLoc = E; 1777 return Op; 1778 } 1779 1780 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1781 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1782 Op->FPImm.Val = Val; 1783 Op->StartLoc = S; 1784 Op->EndLoc = S; 1785 return Op; 1786 } 1787 1788 static ARMOperand *CreateMem(unsigned BaseRegNum, 1789 const MCConstantExpr *OffsetImm, 1790 unsigned OffsetRegNum, 1791 ARM_AM::ShiftOpc ShiftType, 1792 unsigned ShiftImm, 1793 unsigned Alignment, 1794 bool isNegative, 1795 SMLoc S, SMLoc E) { 1796 ARMOperand *Op = new ARMOperand(k_Memory); 1797 Op->Memory.BaseRegNum = BaseRegNum; 1798 Op->Memory.OffsetImm = OffsetImm; 1799 Op->Memory.OffsetRegNum = OffsetRegNum; 1800 Op->Memory.ShiftType = ShiftType; 1801 Op->Memory.ShiftImm = ShiftImm; 1802 Op->Memory.Alignment = Alignment; 1803 Op->Memory.isNegative = isNegative; 1804 Op->StartLoc = S; 1805 Op->EndLoc = E; 1806 return Op; 1807 } 1808 1809 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1810 ARM_AM::ShiftOpc ShiftTy, 1811 unsigned ShiftImm, 1812 SMLoc S, SMLoc E) { 1813 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1814 Op->PostIdxReg.RegNum = RegNum; 1815 Op->PostIdxReg.isAdd = isAdd; 1816 Op->PostIdxReg.ShiftTy = ShiftTy; 1817 Op->PostIdxReg.ShiftImm = ShiftImm; 1818 Op->StartLoc = S; 1819 Op->EndLoc = E; 1820 return Op; 1821 } 1822 1823 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1824 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1825 Op->MBOpt.Val = Opt; 1826 Op->StartLoc = S; 1827 Op->EndLoc = S; 1828 return Op; 1829 } 1830 1831 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1832 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1833 Op->IFlags.Val = IFlags; 1834 Op->StartLoc = S; 1835 Op->EndLoc = S; 1836 return Op; 1837 } 1838 1839 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1840 ARMOperand *Op = new ARMOperand(k_MSRMask); 1841 Op->MMask.Val = MMask; 1842 Op->StartLoc = S; 1843 Op->EndLoc = S; 1844 return Op; 1845 } 1846}; 1847 1848} // end anonymous namespace. 1849 1850void ARMOperand::print(raw_ostream &OS) const { 1851 switch (Kind) { 1852 case k_FPImmediate: 1853 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1854 << ") >"; 1855 break; 1856 case k_CondCode: 1857 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1858 break; 1859 case k_CCOut: 1860 OS << "<ccout " << getReg() << ">"; 1861 break; 1862 case k_ITCondMask: { 1863 static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)", 1864 "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)", 1865 "(tee)", "(eee)" }; 1866 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1867 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1868 break; 1869 } 1870 case k_CoprocNum: 1871 OS << "<coprocessor number: " << getCoproc() << ">"; 1872 break; 1873 case k_CoprocReg: 1874 OS << "<coprocessor register: " << getCoproc() << ">"; 1875 break; 1876 case k_CoprocOption: 1877 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1878 break; 1879 case k_MSRMask: 1880 OS << "<mask: " << getMSRMask() << ">"; 1881 break; 1882 case k_Immediate: 1883 getImm()->print(OS); 1884 break; 1885 case k_MemBarrierOpt: 1886 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1887 break; 1888 case k_Memory: 1889 OS << "<memory " 1890 << " base:" << Memory.BaseRegNum; 1891 OS << ">"; 1892 break; 1893 case k_PostIndexRegister: 1894 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1895 << PostIdxReg.RegNum; 1896 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1897 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1898 << PostIdxReg.ShiftImm; 1899 OS << ">"; 1900 break; 1901 case k_ProcIFlags: { 1902 OS << "<ARM_PROC::"; 1903 unsigned IFlags = getProcIFlags(); 1904 for (int i=2; i >= 0; --i) 1905 if (IFlags & (1 << i)) 1906 OS << ARM_PROC::IFlagsToString(1 << i); 1907 OS << ">"; 1908 break; 1909 } 1910 case k_Register: 1911 OS << "<register " << getReg() << ">"; 1912 break; 1913 case k_ShifterImmediate: 1914 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1915 << " #" << ShifterImm.Imm << ">"; 1916 break; 1917 case k_ShiftedRegister: 1918 OS << "<so_reg_reg " 1919 << RegShiftedReg.SrcReg 1920 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1921 << ", " << RegShiftedReg.ShiftReg << ", " 1922 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1923 << ">"; 1924 break; 1925 case k_ShiftedImmediate: 1926 OS << "<so_reg_imm " 1927 << RegShiftedImm.SrcReg 1928 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1929 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1930 << ">"; 1931 break; 1932 case k_RotateImmediate: 1933 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1934 break; 1935 case k_BitfieldDescriptor: 1936 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1937 << ", width: " << Bitfield.Width << ">"; 1938 break; 1939 case k_RegisterList: 1940 case k_DPRRegisterList: 1941 case k_SPRRegisterList: { 1942 OS << "<register_list "; 1943 1944 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1945 for (SmallVectorImpl<unsigned>::const_iterator 1946 I = RegList.begin(), E = RegList.end(); I != E; ) { 1947 OS << *I; 1948 if (++I < E) OS << ", "; 1949 } 1950 1951 OS << ">"; 1952 break; 1953 } 1954 case k_VectorList: 1955 OS << "<vector_list " << VectorList.Count << " * " 1956 << VectorList.RegNum << ">"; 1957 break; 1958 case k_Token: 1959 OS << "'" << getToken() << "'"; 1960 break; 1961 case k_VectorIndex: 1962 OS << "<vectorindex " << getVectorIndex() << ">"; 1963 break; 1964 } 1965} 1966 1967/// @name Auto-generated Match Functions 1968/// { 1969 1970static unsigned MatchRegisterName(StringRef Name); 1971 1972/// } 1973 1974bool ARMAsmParser::ParseRegister(unsigned &RegNo, 1975 SMLoc &StartLoc, SMLoc &EndLoc) { 1976 RegNo = tryParseRegister(); 1977 1978 return (RegNo == (unsigned)-1); 1979} 1980 1981/// Try to parse a register name. The token must be an Identifier when called, 1982/// and if it is a register name the token is eaten and the register number is 1983/// returned. Otherwise return -1. 1984/// 1985int ARMAsmParser::tryParseRegister() { 1986 const AsmToken &Tok = Parser.getTok(); 1987 if (Tok.isNot(AsmToken::Identifier)) return -1; 1988 1989 // FIXME: Validate register for the current architecture; we have to do 1990 // validation later, so maybe there is no need for this here. 1991 std::string upperCase = Tok.getString().str(); 1992 std::string lowerCase = LowercaseString(upperCase); 1993 unsigned RegNum = MatchRegisterName(lowerCase); 1994 if (!RegNum) { 1995 RegNum = StringSwitch<unsigned>(lowerCase) 1996 .Case("r13", ARM::SP) 1997 .Case("r14", ARM::LR) 1998 .Case("r15", ARM::PC) 1999 .Case("ip", ARM::R12) 2000 .Default(0); 2001 } 2002 if (!RegNum) return -1; 2003 2004 Parser.Lex(); // Eat identifier token. 2005 2006 return RegNum; 2007} 2008 2009// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2010// If a recoverable error occurs, return 1. If an irrecoverable error 2011// occurs, return -1. An irrecoverable error is one where tokens have been 2012// consumed in the process of trying to parse the shifter (i.e., when it is 2013// indeed a shifter operand, but malformed). 2014int ARMAsmParser::tryParseShiftRegister( 2015 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2016 SMLoc S = Parser.getTok().getLoc(); 2017 const AsmToken &Tok = Parser.getTok(); 2018 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2019 2020 std::string upperCase = Tok.getString().str(); 2021 std::string lowerCase = LowercaseString(upperCase); 2022 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2023 .Case("lsl", ARM_AM::lsl) 2024 .Case("lsr", ARM_AM::lsr) 2025 .Case("asr", ARM_AM::asr) 2026 .Case("ror", ARM_AM::ror) 2027 .Case("rrx", ARM_AM::rrx) 2028 .Default(ARM_AM::no_shift); 2029 2030 if (ShiftTy == ARM_AM::no_shift) 2031 return 1; 2032 2033 Parser.Lex(); // Eat the operator. 2034 2035 // The source register for the shift has already been added to the 2036 // operand list, so we need to pop it off and combine it into the shifted 2037 // register operand instead. 2038 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2039 if (!PrevOp->isReg()) 2040 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2041 int SrcReg = PrevOp->getReg(); 2042 int64_t Imm = 0; 2043 int ShiftReg = 0; 2044 if (ShiftTy == ARM_AM::rrx) { 2045 // RRX Doesn't have an explicit shift amount. The encoder expects 2046 // the shift register to be the same as the source register. Seems odd, 2047 // but OK. 2048 ShiftReg = SrcReg; 2049 } else { 2050 // Figure out if this is shifted by a constant or a register (for non-RRX). 2051 if (Parser.getTok().is(AsmToken::Hash)) { 2052 Parser.Lex(); // Eat hash. 2053 SMLoc ImmLoc = Parser.getTok().getLoc(); 2054 const MCExpr *ShiftExpr = 0; 2055 if (getParser().ParseExpression(ShiftExpr)) { 2056 Error(ImmLoc, "invalid immediate shift value"); 2057 return -1; 2058 } 2059 // The expression must be evaluatable as an immediate. 2060 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2061 if (!CE) { 2062 Error(ImmLoc, "invalid immediate shift value"); 2063 return -1; 2064 } 2065 // Range check the immediate. 2066 // lsl, ror: 0 <= imm <= 31 2067 // lsr, asr: 0 <= imm <= 32 2068 Imm = CE->getValue(); 2069 if (Imm < 0 || 2070 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2071 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2072 Error(ImmLoc, "immediate shift value out of range"); 2073 return -1; 2074 } 2075 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2076 ShiftReg = tryParseRegister(); 2077 SMLoc L = Parser.getTok().getLoc(); 2078 if (ShiftReg == -1) { 2079 Error (L, "expected immediate or register in shift operand"); 2080 return -1; 2081 } 2082 } else { 2083 Error (Parser.getTok().getLoc(), 2084 "expected immediate or register in shift operand"); 2085 return -1; 2086 } 2087 } 2088 2089 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2090 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2091 ShiftReg, Imm, 2092 S, Parser.getTok().getLoc())); 2093 else 2094 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2095 S, Parser.getTok().getLoc())); 2096 2097 return 0; 2098} 2099 2100 2101/// Try to parse a register name. The token must be an Identifier when called. 2102/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2103/// if there is a "writeback". 'true' if it's not a register. 2104/// 2105/// TODO this is likely to change to allow different register types and or to 2106/// parse for a specific register type. 2107bool ARMAsmParser:: 2108tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2109 SMLoc S = Parser.getTok().getLoc(); 2110 int RegNo = tryParseRegister(); 2111 if (RegNo == -1) 2112 return true; 2113 2114 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2115 2116 const AsmToken &ExclaimTok = Parser.getTok(); 2117 if (ExclaimTok.is(AsmToken::Exclaim)) { 2118 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2119 ExclaimTok.getLoc())); 2120 Parser.Lex(); // Eat exclaim token 2121 return false; 2122 } 2123 2124 // Also check for an index operand. This is only legal for vector registers, 2125 // but that'll get caught OK in operand matching, so we don't need to 2126 // explicitly filter everything else out here. 2127 if (Parser.getTok().is(AsmToken::LBrac)) { 2128 SMLoc SIdx = Parser.getTok().getLoc(); 2129 Parser.Lex(); // Eat left bracket token. 2130 2131 const MCExpr *ImmVal; 2132 if (getParser().ParseExpression(ImmVal)) 2133 return MatchOperand_ParseFail; 2134 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2135 if (!MCE) { 2136 TokError("immediate value expected for vector index"); 2137 return MatchOperand_ParseFail; 2138 } 2139 2140 SMLoc E = Parser.getTok().getLoc(); 2141 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2142 Error(E, "']' expected"); 2143 return MatchOperand_ParseFail; 2144 } 2145 2146 Parser.Lex(); // Eat right bracket token. 2147 2148 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2149 SIdx, E, 2150 getContext())); 2151 } 2152 2153 return false; 2154} 2155 2156/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2157/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2158/// "c5", ... 2159static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2160 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2161 // but efficient. 2162 switch (Name.size()) { 2163 default: break; 2164 case 2: 2165 if (Name[0] != CoprocOp) 2166 return -1; 2167 switch (Name[1]) { 2168 default: return -1; 2169 case '0': return 0; 2170 case '1': return 1; 2171 case '2': return 2; 2172 case '3': return 3; 2173 case '4': return 4; 2174 case '5': return 5; 2175 case '6': return 6; 2176 case '7': return 7; 2177 case '8': return 8; 2178 case '9': return 9; 2179 } 2180 break; 2181 case 3: 2182 if (Name[0] != CoprocOp || Name[1] != '1') 2183 return -1; 2184 switch (Name[2]) { 2185 default: return -1; 2186 case '0': return 10; 2187 case '1': return 11; 2188 case '2': return 12; 2189 case '3': return 13; 2190 case '4': return 14; 2191 case '5': return 15; 2192 } 2193 break; 2194 } 2195 2196 return -1; 2197} 2198 2199/// parseITCondCode - Try to parse a condition code for an IT instruction. 2200ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2201parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2202 SMLoc S = Parser.getTok().getLoc(); 2203 const AsmToken &Tok = Parser.getTok(); 2204 if (!Tok.is(AsmToken::Identifier)) 2205 return MatchOperand_NoMatch; 2206 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2207 .Case("eq", ARMCC::EQ) 2208 .Case("ne", ARMCC::NE) 2209 .Case("hs", ARMCC::HS) 2210 .Case("cs", ARMCC::HS) 2211 .Case("lo", ARMCC::LO) 2212 .Case("cc", ARMCC::LO) 2213 .Case("mi", ARMCC::MI) 2214 .Case("pl", ARMCC::PL) 2215 .Case("vs", ARMCC::VS) 2216 .Case("vc", ARMCC::VC) 2217 .Case("hi", ARMCC::HI) 2218 .Case("ls", ARMCC::LS) 2219 .Case("ge", ARMCC::GE) 2220 .Case("lt", ARMCC::LT) 2221 .Case("gt", ARMCC::GT) 2222 .Case("le", ARMCC::LE) 2223 .Case("al", ARMCC::AL) 2224 .Default(~0U); 2225 if (CC == ~0U) 2226 return MatchOperand_NoMatch; 2227 Parser.Lex(); // Eat the token. 2228 2229 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2230 2231 return MatchOperand_Success; 2232} 2233 2234/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2235/// token must be an Identifier when called, and if it is a coprocessor 2236/// number, the token is eaten and the operand is added to the operand list. 2237ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2238parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2239 SMLoc S = Parser.getTok().getLoc(); 2240 const AsmToken &Tok = Parser.getTok(); 2241 if (Tok.isNot(AsmToken::Identifier)) 2242 return MatchOperand_NoMatch; 2243 2244 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2245 if (Num == -1) 2246 return MatchOperand_NoMatch; 2247 2248 Parser.Lex(); // Eat identifier token. 2249 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2250 return MatchOperand_Success; 2251} 2252 2253/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2254/// token must be an Identifier when called, and if it is a coprocessor 2255/// number, the token is eaten and the operand is added to the operand list. 2256ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2257parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2258 SMLoc S = Parser.getTok().getLoc(); 2259 const AsmToken &Tok = Parser.getTok(); 2260 if (Tok.isNot(AsmToken::Identifier)) 2261 return MatchOperand_NoMatch; 2262 2263 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2264 if (Reg == -1) 2265 return MatchOperand_NoMatch; 2266 2267 Parser.Lex(); // Eat identifier token. 2268 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2269 return MatchOperand_Success; 2270} 2271 2272/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2273/// coproc_option : '{' imm0_255 '}' 2274ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2275parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2276 SMLoc S = Parser.getTok().getLoc(); 2277 2278 // If this isn't a '{', this isn't a coprocessor immediate operand. 2279 if (Parser.getTok().isNot(AsmToken::LCurly)) 2280 return MatchOperand_NoMatch; 2281 Parser.Lex(); // Eat the '{' 2282 2283 const MCExpr *Expr; 2284 SMLoc Loc = Parser.getTok().getLoc(); 2285 if (getParser().ParseExpression(Expr)) { 2286 Error(Loc, "illegal expression"); 2287 return MatchOperand_ParseFail; 2288 } 2289 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2290 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2291 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2292 return MatchOperand_ParseFail; 2293 } 2294 int Val = CE->getValue(); 2295 2296 // Check for and consume the closing '}' 2297 if (Parser.getTok().isNot(AsmToken::RCurly)) 2298 return MatchOperand_ParseFail; 2299 SMLoc E = Parser.getTok().getLoc(); 2300 Parser.Lex(); // Eat the '}' 2301 2302 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2303 return MatchOperand_Success; 2304} 2305 2306// For register list parsing, we need to map from raw GPR register numbering 2307// to the enumeration values. The enumeration values aren't sorted by 2308// register number due to our using "sp", "lr" and "pc" as canonical names. 2309static unsigned getNextRegister(unsigned Reg) { 2310 // If this is a GPR, we need to do it manually, otherwise we can rely 2311 // on the sort ordering of the enumeration since the other reg-classes 2312 // are sane. 2313 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2314 return Reg + 1; 2315 switch(Reg) { 2316 default: assert(0 && "Invalid GPR number!"); 2317 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2318 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2319 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2320 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2321 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2322 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2323 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2324 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2325 } 2326} 2327 2328/// Parse a register list. 2329bool ARMAsmParser:: 2330parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2331 assert(Parser.getTok().is(AsmToken::LCurly) && 2332 "Token is not a Left Curly Brace"); 2333 SMLoc S = Parser.getTok().getLoc(); 2334 Parser.Lex(); // Eat '{' token. 2335 SMLoc RegLoc = Parser.getTok().getLoc(); 2336 2337 // Check the first register in the list to see what register class 2338 // this is a list of. 2339 int Reg = tryParseRegister(); 2340 if (Reg == -1) 2341 return Error(RegLoc, "register expected"); 2342 2343 MCRegisterClass *RC; 2344 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2345 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2346 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2347 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2348 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2349 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2350 else 2351 return Error(RegLoc, "invalid register in register list"); 2352 2353 // The reglist instructions have at most 16 registers, so reserve 2354 // space for that many. 2355 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2356 // Store the first register. 2357 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2358 2359 // This starts immediately after the first register token in the list, 2360 // so we can see either a comma or a minus (range separator) as a legal 2361 // next token. 2362 while (Parser.getTok().is(AsmToken::Comma) || 2363 Parser.getTok().is(AsmToken::Minus)) { 2364 if (Parser.getTok().is(AsmToken::Minus)) { 2365 Parser.Lex(); // Eat the comma. 2366 SMLoc EndLoc = Parser.getTok().getLoc(); 2367 int EndReg = tryParseRegister(); 2368 if (EndReg == -1) 2369 return Error(EndLoc, "register expected"); 2370 // If the register is the same as the start reg, there's nothing 2371 // more to do. 2372 if (Reg == EndReg) 2373 continue; 2374 // The register must be in the same register class as the first. 2375 if (!RC->contains(EndReg)) 2376 return Error(EndLoc, "invalid register in register list"); 2377 // Ranges must go from low to high. 2378 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2379 return Error(EndLoc, "bad range in register list"); 2380 2381 // Add all the registers in the range to the register list. 2382 while (Reg != EndReg) { 2383 Reg = getNextRegister(Reg); 2384 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2385 } 2386 continue; 2387 } 2388 Parser.Lex(); // Eat the comma. 2389 RegLoc = Parser.getTok().getLoc(); 2390 int OldReg = Reg; 2391 Reg = tryParseRegister(); 2392 if (Reg == -1) 2393 return Error(RegLoc, "register expected"); 2394 // The register must be in the same register class as the first. 2395 if (!RC->contains(Reg)) 2396 return Error(RegLoc, "invalid register in register list"); 2397 // List must be monotonically increasing. 2398 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2399 return Error(RegLoc, "register list not in ascending order"); 2400 // VFP register lists must also be contiguous. 2401 // It's OK to use the enumeration values directly here rather, as the 2402 // VFP register classes have the enum sorted properly. 2403 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2404 Reg != OldReg + 1) 2405 return Error(RegLoc, "non-contiguous register range"); 2406 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2407 } 2408 2409 SMLoc E = Parser.getTok().getLoc(); 2410 if (Parser.getTok().isNot(AsmToken::RCurly)) 2411 return Error(E, "'}' expected"); 2412 Parser.Lex(); // Eat '}' token. 2413 2414 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2415 return false; 2416} 2417 2418// parse a vector register list 2419ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2420parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2421 if(Parser.getTok().isNot(AsmToken::LCurly)) 2422 return MatchOperand_NoMatch; 2423 2424 SMLoc S = Parser.getTok().getLoc(); 2425 Parser.Lex(); // Eat '{' token. 2426 SMLoc RegLoc = Parser.getTok().getLoc(); 2427 2428 int Reg = tryParseRegister(); 2429 if (Reg == -1) { 2430 Error(RegLoc, "register expected"); 2431 return MatchOperand_ParseFail; 2432 } 2433 2434 unsigned FirstReg = Reg; 2435 unsigned Count = 1; 2436 while (Parser.getTok().is(AsmToken::Comma)) { 2437 Parser.Lex(); // Eat the comma. 2438 RegLoc = Parser.getTok().getLoc(); 2439 int OldReg = Reg; 2440 Reg = tryParseRegister(); 2441 if (Reg == -1) { 2442 Error(RegLoc, "register expected"); 2443 return MatchOperand_ParseFail; 2444 } 2445 // vector register lists must also be contiguous. 2446 // It's OK to use the enumeration values directly here rather, as the 2447 // VFP register classes have the enum sorted properly. 2448 if (Reg != OldReg + 1) { 2449 Error(RegLoc, "non-contiguous register range"); 2450 return MatchOperand_ParseFail; 2451 } 2452 2453 ++Count; 2454 } 2455 2456 SMLoc E = Parser.getTok().getLoc(); 2457 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2458 Error(E, "'}' expected"); 2459 return MatchOperand_ParseFail; 2460 } 2461 Parser.Lex(); // Eat '}' token. 2462 2463 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2464 return MatchOperand_Success; 2465} 2466 2467/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2468ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2469parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2470 SMLoc S = Parser.getTok().getLoc(); 2471 const AsmToken &Tok = Parser.getTok(); 2472 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2473 StringRef OptStr = Tok.getString(); 2474 2475 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2476 .Case("sy", ARM_MB::SY) 2477 .Case("st", ARM_MB::ST) 2478 .Case("sh", ARM_MB::ISH) 2479 .Case("ish", ARM_MB::ISH) 2480 .Case("shst", ARM_MB::ISHST) 2481 .Case("ishst", ARM_MB::ISHST) 2482 .Case("nsh", ARM_MB::NSH) 2483 .Case("un", ARM_MB::NSH) 2484 .Case("nshst", ARM_MB::NSHST) 2485 .Case("unst", ARM_MB::NSHST) 2486 .Case("osh", ARM_MB::OSH) 2487 .Case("oshst", ARM_MB::OSHST) 2488 .Default(~0U); 2489 2490 if (Opt == ~0U) 2491 return MatchOperand_NoMatch; 2492 2493 Parser.Lex(); // Eat identifier token. 2494 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2495 return MatchOperand_Success; 2496} 2497 2498/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2499ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2500parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2501 SMLoc S = Parser.getTok().getLoc(); 2502 const AsmToken &Tok = Parser.getTok(); 2503 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2504 StringRef IFlagsStr = Tok.getString(); 2505 2506 // An iflags string of "none" is interpreted to mean that none of the AIF 2507 // bits are set. Not a terribly useful instruction, but a valid encoding. 2508 unsigned IFlags = 0; 2509 if (IFlagsStr != "none") { 2510 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2511 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2512 .Case("a", ARM_PROC::A) 2513 .Case("i", ARM_PROC::I) 2514 .Case("f", ARM_PROC::F) 2515 .Default(~0U); 2516 2517 // If some specific iflag is already set, it means that some letter is 2518 // present more than once, this is not acceptable. 2519 if (Flag == ~0U || (IFlags & Flag)) 2520 return MatchOperand_NoMatch; 2521 2522 IFlags |= Flag; 2523 } 2524 } 2525 2526 Parser.Lex(); // Eat identifier token. 2527 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2528 return MatchOperand_Success; 2529} 2530 2531/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2532ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2533parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2534 SMLoc S = Parser.getTok().getLoc(); 2535 const AsmToken &Tok = Parser.getTok(); 2536 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2537 StringRef Mask = Tok.getString(); 2538 2539 if (isMClass()) { 2540 // See ARMv6-M 10.1.1 2541 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2542 .Case("apsr", 0) 2543 .Case("iapsr", 1) 2544 .Case("eapsr", 2) 2545 .Case("xpsr", 3) 2546 .Case("ipsr", 5) 2547 .Case("epsr", 6) 2548 .Case("iepsr", 7) 2549 .Case("msp", 8) 2550 .Case("psp", 9) 2551 .Case("primask", 16) 2552 .Case("basepri", 17) 2553 .Case("basepri_max", 18) 2554 .Case("faultmask", 19) 2555 .Case("control", 20) 2556 .Default(~0U); 2557 2558 if (FlagsVal == ~0U) 2559 return MatchOperand_NoMatch; 2560 2561 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2562 // basepri, basepri_max and faultmask only valid for V7m. 2563 return MatchOperand_NoMatch; 2564 2565 Parser.Lex(); // Eat identifier token. 2566 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2567 return MatchOperand_Success; 2568 } 2569 2570 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2571 size_t Start = 0, Next = Mask.find('_'); 2572 StringRef Flags = ""; 2573 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2574 if (Next != StringRef::npos) 2575 Flags = Mask.slice(Next+1, Mask.size()); 2576 2577 // FlagsVal contains the complete mask: 2578 // 3-0: Mask 2579 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2580 unsigned FlagsVal = 0; 2581 2582 if (SpecReg == "apsr") { 2583 FlagsVal = StringSwitch<unsigned>(Flags) 2584 .Case("nzcvq", 0x8) // same as CPSR_f 2585 .Case("g", 0x4) // same as CPSR_s 2586 .Case("nzcvqg", 0xc) // same as CPSR_fs 2587 .Default(~0U); 2588 2589 if (FlagsVal == ~0U) { 2590 if (!Flags.empty()) 2591 return MatchOperand_NoMatch; 2592 else 2593 FlagsVal = 8; // No flag 2594 } 2595 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2596 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2597 Flags = "fc"; 2598 for (int i = 0, e = Flags.size(); i != e; ++i) { 2599 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2600 .Case("c", 1) 2601 .Case("x", 2) 2602 .Case("s", 4) 2603 .Case("f", 8) 2604 .Default(~0U); 2605 2606 // If some specific flag is already set, it means that some letter is 2607 // present more than once, this is not acceptable. 2608 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2609 return MatchOperand_NoMatch; 2610 FlagsVal |= Flag; 2611 } 2612 } else // No match for special register. 2613 return MatchOperand_NoMatch; 2614 2615 // Special register without flags is NOT equivalent to "fc" flags. 2616 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2617 // two lines would enable gas compatibility at the expense of breaking 2618 // round-tripping. 2619 // 2620 // if (!FlagsVal) 2621 // FlagsVal = 0x9; 2622 2623 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2624 if (SpecReg == "spsr") 2625 FlagsVal |= 16; 2626 2627 Parser.Lex(); // Eat identifier token. 2628 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2629 return MatchOperand_Success; 2630} 2631 2632ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2633parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2634 int Low, int High) { 2635 const AsmToken &Tok = Parser.getTok(); 2636 if (Tok.isNot(AsmToken::Identifier)) { 2637 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2638 return MatchOperand_ParseFail; 2639 } 2640 StringRef ShiftName = Tok.getString(); 2641 std::string LowerOp = LowercaseString(Op); 2642 std::string UpperOp = UppercaseString(Op); 2643 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2644 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2645 return MatchOperand_ParseFail; 2646 } 2647 Parser.Lex(); // Eat shift type token. 2648 2649 // There must be a '#' and a shift amount. 2650 if (Parser.getTok().isNot(AsmToken::Hash)) { 2651 Error(Parser.getTok().getLoc(), "'#' expected"); 2652 return MatchOperand_ParseFail; 2653 } 2654 Parser.Lex(); // Eat hash token. 2655 2656 const MCExpr *ShiftAmount; 2657 SMLoc Loc = Parser.getTok().getLoc(); 2658 if (getParser().ParseExpression(ShiftAmount)) { 2659 Error(Loc, "illegal expression"); 2660 return MatchOperand_ParseFail; 2661 } 2662 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2663 if (!CE) { 2664 Error(Loc, "constant expression expected"); 2665 return MatchOperand_ParseFail; 2666 } 2667 int Val = CE->getValue(); 2668 if (Val < Low || Val > High) { 2669 Error(Loc, "immediate value out of range"); 2670 return MatchOperand_ParseFail; 2671 } 2672 2673 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2674 2675 return MatchOperand_Success; 2676} 2677 2678ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2679parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2680 const AsmToken &Tok = Parser.getTok(); 2681 SMLoc S = Tok.getLoc(); 2682 if (Tok.isNot(AsmToken::Identifier)) { 2683 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2684 return MatchOperand_ParseFail; 2685 } 2686 int Val = StringSwitch<int>(Tok.getString()) 2687 .Case("be", 1) 2688 .Case("le", 0) 2689 .Default(-1); 2690 Parser.Lex(); // Eat the token. 2691 2692 if (Val == -1) { 2693 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2694 return MatchOperand_ParseFail; 2695 } 2696 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2697 getContext()), 2698 S, Parser.getTok().getLoc())); 2699 return MatchOperand_Success; 2700} 2701 2702/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2703/// instructions. Legal values are: 2704/// lsl #n 'n' in [0,31] 2705/// asr #n 'n' in [1,32] 2706/// n == 32 encoded as n == 0. 2707ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2708parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2709 const AsmToken &Tok = Parser.getTok(); 2710 SMLoc S = Tok.getLoc(); 2711 if (Tok.isNot(AsmToken::Identifier)) { 2712 Error(S, "shift operator 'asr' or 'lsl' expected"); 2713 return MatchOperand_ParseFail; 2714 } 2715 StringRef ShiftName = Tok.getString(); 2716 bool isASR; 2717 if (ShiftName == "lsl" || ShiftName == "LSL") 2718 isASR = false; 2719 else if (ShiftName == "asr" || ShiftName == "ASR") 2720 isASR = true; 2721 else { 2722 Error(S, "shift operator 'asr' or 'lsl' expected"); 2723 return MatchOperand_ParseFail; 2724 } 2725 Parser.Lex(); // Eat the operator. 2726 2727 // A '#' and a shift amount. 2728 if (Parser.getTok().isNot(AsmToken::Hash)) { 2729 Error(Parser.getTok().getLoc(), "'#' expected"); 2730 return MatchOperand_ParseFail; 2731 } 2732 Parser.Lex(); // Eat hash token. 2733 2734 const MCExpr *ShiftAmount; 2735 SMLoc E = Parser.getTok().getLoc(); 2736 if (getParser().ParseExpression(ShiftAmount)) { 2737 Error(E, "malformed shift expression"); 2738 return MatchOperand_ParseFail; 2739 } 2740 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2741 if (!CE) { 2742 Error(E, "shift amount must be an immediate"); 2743 return MatchOperand_ParseFail; 2744 } 2745 2746 int64_t Val = CE->getValue(); 2747 if (isASR) { 2748 // Shift amount must be in [1,32] 2749 if (Val < 1 || Val > 32) { 2750 Error(E, "'asr' shift amount must be in range [1,32]"); 2751 return MatchOperand_ParseFail; 2752 } 2753 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2754 if (isThumb() && Val == 32) { 2755 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2756 return MatchOperand_ParseFail; 2757 } 2758 if (Val == 32) Val = 0; 2759 } else { 2760 // Shift amount must be in [1,32] 2761 if (Val < 0 || Val > 31) { 2762 Error(E, "'lsr' shift amount must be in range [0,31]"); 2763 return MatchOperand_ParseFail; 2764 } 2765 } 2766 2767 E = Parser.getTok().getLoc(); 2768 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2769 2770 return MatchOperand_Success; 2771} 2772 2773/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2774/// of instructions. Legal values are: 2775/// ror #n 'n' in {0, 8, 16, 24} 2776ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2777parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2778 const AsmToken &Tok = Parser.getTok(); 2779 SMLoc S = Tok.getLoc(); 2780 if (Tok.isNot(AsmToken::Identifier)) 2781 return MatchOperand_NoMatch; 2782 StringRef ShiftName = Tok.getString(); 2783 if (ShiftName != "ror" && ShiftName != "ROR") 2784 return MatchOperand_NoMatch; 2785 Parser.Lex(); // Eat the operator. 2786 2787 // A '#' and a rotate amount. 2788 if (Parser.getTok().isNot(AsmToken::Hash)) { 2789 Error(Parser.getTok().getLoc(), "'#' expected"); 2790 return MatchOperand_ParseFail; 2791 } 2792 Parser.Lex(); // Eat hash token. 2793 2794 const MCExpr *ShiftAmount; 2795 SMLoc E = Parser.getTok().getLoc(); 2796 if (getParser().ParseExpression(ShiftAmount)) { 2797 Error(E, "malformed rotate expression"); 2798 return MatchOperand_ParseFail; 2799 } 2800 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2801 if (!CE) { 2802 Error(E, "rotate amount must be an immediate"); 2803 return MatchOperand_ParseFail; 2804 } 2805 2806 int64_t Val = CE->getValue(); 2807 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2808 // normally, zero is represented in asm by omitting the rotate operand 2809 // entirely. 2810 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2811 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2812 return MatchOperand_ParseFail; 2813 } 2814 2815 E = Parser.getTok().getLoc(); 2816 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2817 2818 return MatchOperand_Success; 2819} 2820 2821ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2822parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2823 SMLoc S = Parser.getTok().getLoc(); 2824 // The bitfield descriptor is really two operands, the LSB and the width. 2825 if (Parser.getTok().isNot(AsmToken::Hash)) { 2826 Error(Parser.getTok().getLoc(), "'#' expected"); 2827 return MatchOperand_ParseFail; 2828 } 2829 Parser.Lex(); // Eat hash token. 2830 2831 const MCExpr *LSBExpr; 2832 SMLoc E = Parser.getTok().getLoc(); 2833 if (getParser().ParseExpression(LSBExpr)) { 2834 Error(E, "malformed immediate expression"); 2835 return MatchOperand_ParseFail; 2836 } 2837 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2838 if (!CE) { 2839 Error(E, "'lsb' operand must be an immediate"); 2840 return MatchOperand_ParseFail; 2841 } 2842 2843 int64_t LSB = CE->getValue(); 2844 // The LSB must be in the range [0,31] 2845 if (LSB < 0 || LSB > 31) { 2846 Error(E, "'lsb' operand must be in the range [0,31]"); 2847 return MatchOperand_ParseFail; 2848 } 2849 E = Parser.getTok().getLoc(); 2850 2851 // Expect another immediate operand. 2852 if (Parser.getTok().isNot(AsmToken::Comma)) { 2853 Error(Parser.getTok().getLoc(), "too few operands"); 2854 return MatchOperand_ParseFail; 2855 } 2856 Parser.Lex(); // Eat hash token. 2857 if (Parser.getTok().isNot(AsmToken::Hash)) { 2858 Error(Parser.getTok().getLoc(), "'#' expected"); 2859 return MatchOperand_ParseFail; 2860 } 2861 Parser.Lex(); // Eat hash token. 2862 2863 const MCExpr *WidthExpr; 2864 if (getParser().ParseExpression(WidthExpr)) { 2865 Error(E, "malformed immediate expression"); 2866 return MatchOperand_ParseFail; 2867 } 2868 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2869 if (!CE) { 2870 Error(E, "'width' operand must be an immediate"); 2871 return MatchOperand_ParseFail; 2872 } 2873 2874 int64_t Width = CE->getValue(); 2875 // The LSB must be in the range [1,32-lsb] 2876 if (Width < 1 || Width > 32 - LSB) { 2877 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2878 return MatchOperand_ParseFail; 2879 } 2880 E = Parser.getTok().getLoc(); 2881 2882 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2883 2884 return MatchOperand_Success; 2885} 2886 2887ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2888parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2889 // Check for a post-index addressing register operand. Specifically: 2890 // postidx_reg := '+' register {, shift} 2891 // | '-' register {, shift} 2892 // | register {, shift} 2893 2894 // This method must return MatchOperand_NoMatch without consuming any tokens 2895 // in the case where there is no match, as other alternatives take other 2896 // parse methods. 2897 AsmToken Tok = Parser.getTok(); 2898 SMLoc S = Tok.getLoc(); 2899 bool haveEaten = false; 2900 bool isAdd = true; 2901 int Reg = -1; 2902 if (Tok.is(AsmToken::Plus)) { 2903 Parser.Lex(); // Eat the '+' token. 2904 haveEaten = true; 2905 } else if (Tok.is(AsmToken::Minus)) { 2906 Parser.Lex(); // Eat the '-' token. 2907 isAdd = false; 2908 haveEaten = true; 2909 } 2910 if (Parser.getTok().is(AsmToken::Identifier)) 2911 Reg = tryParseRegister(); 2912 if (Reg == -1) { 2913 if (!haveEaten) 2914 return MatchOperand_NoMatch; 2915 Error(Parser.getTok().getLoc(), "register expected"); 2916 return MatchOperand_ParseFail; 2917 } 2918 SMLoc E = Parser.getTok().getLoc(); 2919 2920 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2921 unsigned ShiftImm = 0; 2922 if (Parser.getTok().is(AsmToken::Comma)) { 2923 Parser.Lex(); // Eat the ','. 2924 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2925 return MatchOperand_ParseFail; 2926 } 2927 2928 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2929 ShiftImm, S, E)); 2930 2931 return MatchOperand_Success; 2932} 2933 2934ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2935parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2936 // Check for a post-index addressing register operand. Specifically: 2937 // am3offset := '+' register 2938 // | '-' register 2939 // | register 2940 // | # imm 2941 // | # + imm 2942 // | # - imm 2943 2944 // This method must return MatchOperand_NoMatch without consuming any tokens 2945 // in the case where there is no match, as other alternatives take other 2946 // parse methods. 2947 AsmToken Tok = Parser.getTok(); 2948 SMLoc S = Tok.getLoc(); 2949 2950 // Do immediates first, as we always parse those if we have a '#'. 2951 if (Parser.getTok().is(AsmToken::Hash)) { 2952 Parser.Lex(); // Eat the '#'. 2953 // Explicitly look for a '-', as we need to encode negative zero 2954 // differently. 2955 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2956 const MCExpr *Offset; 2957 if (getParser().ParseExpression(Offset)) 2958 return MatchOperand_ParseFail; 2959 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2960 if (!CE) { 2961 Error(S, "constant expression expected"); 2962 return MatchOperand_ParseFail; 2963 } 2964 SMLoc E = Tok.getLoc(); 2965 // Negative zero is encoded as the flag value INT32_MIN. 2966 int32_t Val = CE->getValue(); 2967 if (isNegative && Val == 0) 2968 Val = INT32_MIN; 2969 2970 Operands.push_back( 2971 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2972 2973 return MatchOperand_Success; 2974 } 2975 2976 2977 bool haveEaten = false; 2978 bool isAdd = true; 2979 int Reg = -1; 2980 if (Tok.is(AsmToken::Plus)) { 2981 Parser.Lex(); // Eat the '+' token. 2982 haveEaten = true; 2983 } else if (Tok.is(AsmToken::Minus)) { 2984 Parser.Lex(); // Eat the '-' token. 2985 isAdd = false; 2986 haveEaten = true; 2987 } 2988 if (Parser.getTok().is(AsmToken::Identifier)) 2989 Reg = tryParseRegister(); 2990 if (Reg == -1) { 2991 if (!haveEaten) 2992 return MatchOperand_NoMatch; 2993 Error(Parser.getTok().getLoc(), "register expected"); 2994 return MatchOperand_ParseFail; 2995 } 2996 SMLoc E = Parser.getTok().getLoc(); 2997 2998 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 2999 0, S, E)); 3000 3001 return MatchOperand_Success; 3002} 3003 3004/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3005/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3006/// when they refer multiple MIOperands inside a single one. 3007bool ARMAsmParser:: 3008cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3009 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3010 // Rt, Rt2 3011 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3012 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3013 // Create a writeback register dummy placeholder. 3014 Inst.addOperand(MCOperand::CreateReg(0)); 3015 // addr 3016 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3017 // pred 3018 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3019 return true; 3020} 3021 3022/// cvtT2StrdPre - Convert parsed operands to MCInst. 3023/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3024/// when they refer multiple MIOperands inside a single one. 3025bool ARMAsmParser:: 3026cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3027 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3028 // Create a writeback register dummy placeholder. 3029 Inst.addOperand(MCOperand::CreateReg(0)); 3030 // Rt, Rt2 3031 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3032 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3033 // addr 3034 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3035 // pred 3036 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3037 return true; 3038} 3039 3040/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3041/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3042/// when they refer multiple MIOperands inside a single one. 3043bool ARMAsmParser:: 3044cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3045 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3046 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3047 3048 // Create a writeback register dummy placeholder. 3049 Inst.addOperand(MCOperand::CreateImm(0)); 3050 3051 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3052 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3053 return true; 3054} 3055 3056/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3057/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3058/// when they refer multiple MIOperands inside a single one. 3059bool ARMAsmParser:: 3060cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3061 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3062 // Create a writeback register dummy placeholder. 3063 Inst.addOperand(MCOperand::CreateImm(0)); 3064 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3065 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3066 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3067 return true; 3068} 3069 3070/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3071/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3072/// when they refer multiple MIOperands inside a single one. 3073bool ARMAsmParser:: 3074cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3075 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3076 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3077 3078 // Create a writeback register dummy placeholder. 3079 Inst.addOperand(MCOperand::CreateImm(0)); 3080 3081 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3082 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3083 return true; 3084} 3085 3086/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3087/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3088/// when they refer multiple MIOperands inside a single one. 3089bool ARMAsmParser:: 3090cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3091 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3092 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3093 3094 // Create a writeback register dummy placeholder. 3095 Inst.addOperand(MCOperand::CreateImm(0)); 3096 3097 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3098 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3099 return true; 3100} 3101 3102 3103/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3104/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3105/// when they refer multiple MIOperands inside a single one. 3106bool ARMAsmParser:: 3107cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3108 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3109 // Create a writeback register dummy placeholder. 3110 Inst.addOperand(MCOperand::CreateImm(0)); 3111 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3112 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3113 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3114 return true; 3115} 3116 3117/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3118/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3119/// when they refer multiple MIOperands inside a single one. 3120bool ARMAsmParser:: 3121cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3122 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3123 // Create a writeback register dummy placeholder. 3124 Inst.addOperand(MCOperand::CreateImm(0)); 3125 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3126 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3127 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3128 return true; 3129} 3130 3131/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3132/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3133/// when they refer multiple MIOperands inside a single one. 3134bool ARMAsmParser:: 3135cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3136 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3137 // Create a writeback register dummy placeholder. 3138 Inst.addOperand(MCOperand::CreateImm(0)); 3139 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3140 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3141 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3142 return true; 3143} 3144 3145/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3146/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3147/// when they refer multiple MIOperands inside a single one. 3148bool ARMAsmParser:: 3149cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3150 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3151 // Rt 3152 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3153 // Create a writeback register dummy placeholder. 3154 Inst.addOperand(MCOperand::CreateImm(0)); 3155 // addr 3156 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3157 // offset 3158 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3159 // pred 3160 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3161 return true; 3162} 3163 3164/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3165/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3166/// when they refer multiple MIOperands inside a single one. 3167bool ARMAsmParser:: 3168cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3169 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3170 // Rt 3171 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3172 // Create a writeback register dummy placeholder. 3173 Inst.addOperand(MCOperand::CreateImm(0)); 3174 // addr 3175 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3176 // offset 3177 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3178 // pred 3179 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3180 return true; 3181} 3182 3183/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3184/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3185/// when they refer multiple MIOperands inside a single one. 3186bool ARMAsmParser:: 3187cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3188 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3189 // Create a writeback register dummy placeholder. 3190 Inst.addOperand(MCOperand::CreateImm(0)); 3191 // Rt 3192 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3193 // addr 3194 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3195 // offset 3196 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3197 // pred 3198 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3199 return true; 3200} 3201 3202/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3203/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3204/// when they refer multiple MIOperands inside a single one. 3205bool ARMAsmParser:: 3206cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3207 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3208 // Create a writeback register dummy placeholder. 3209 Inst.addOperand(MCOperand::CreateImm(0)); 3210 // Rt 3211 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3212 // addr 3213 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3214 // offset 3215 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3216 // pred 3217 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3218 return true; 3219} 3220 3221/// cvtLdrdPre - Convert parsed operands to MCInst. 3222/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3223/// when they refer multiple MIOperands inside a single one. 3224bool ARMAsmParser:: 3225cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3226 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3227 // Rt, Rt2 3228 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3229 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3230 // Create a writeback register dummy placeholder. 3231 Inst.addOperand(MCOperand::CreateImm(0)); 3232 // addr 3233 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3234 // pred 3235 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3236 return true; 3237} 3238 3239/// cvtStrdPre - Convert parsed operands to MCInst. 3240/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3241/// when they refer multiple MIOperands inside a single one. 3242bool ARMAsmParser:: 3243cvtStrdPre(MCInst &Inst, unsigned Opcode, 3244 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3245 // Create a writeback register dummy placeholder. 3246 Inst.addOperand(MCOperand::CreateImm(0)); 3247 // Rt, Rt2 3248 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3249 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3250 // addr 3251 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3252 // pred 3253 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3254 return true; 3255} 3256 3257/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3258/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3259/// when they refer multiple MIOperands inside a single one. 3260bool ARMAsmParser:: 3261cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3262 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3263 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3264 // Create a writeback register dummy placeholder. 3265 Inst.addOperand(MCOperand::CreateImm(0)); 3266 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3267 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3268 return true; 3269} 3270 3271/// cvtThumbMultiple- Convert parsed operands to MCInst. 3272/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3273/// when they refer multiple MIOperands inside a single one. 3274bool ARMAsmParser:: 3275cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3276 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3277 // The second source operand must be the same register as the destination 3278 // operand. 3279 if (Operands.size() == 6 && 3280 (((ARMOperand*)Operands[3])->getReg() != 3281 ((ARMOperand*)Operands[5])->getReg()) && 3282 (((ARMOperand*)Operands[3])->getReg() != 3283 ((ARMOperand*)Operands[4])->getReg())) { 3284 Error(Operands[3]->getStartLoc(), 3285 "destination register must match source register"); 3286 return false; 3287 } 3288 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3289 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3290 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3291 // If we have a three-operand form, use that, else the second source operand 3292 // is just the destination operand again. 3293 if (Operands.size() == 6) 3294 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3295 else 3296 Inst.addOperand(Inst.getOperand(0)); 3297 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3298 3299 return true; 3300} 3301 3302/// Parse an ARM memory expression, return false if successful else return true 3303/// or an error. The first token must be a '[' when called. 3304bool ARMAsmParser:: 3305parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3306 SMLoc S, E; 3307 assert(Parser.getTok().is(AsmToken::LBrac) && 3308 "Token is not a Left Bracket"); 3309 S = Parser.getTok().getLoc(); 3310 Parser.Lex(); // Eat left bracket token. 3311 3312 const AsmToken &BaseRegTok = Parser.getTok(); 3313 int BaseRegNum = tryParseRegister(); 3314 if (BaseRegNum == -1) 3315 return Error(BaseRegTok.getLoc(), "register expected"); 3316 3317 // The next token must either be a comma or a closing bracket. 3318 const AsmToken &Tok = Parser.getTok(); 3319 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3320 return Error(Tok.getLoc(), "malformed memory operand"); 3321 3322 if (Tok.is(AsmToken::RBrac)) { 3323 E = Tok.getLoc(); 3324 Parser.Lex(); // Eat right bracket token. 3325 3326 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3327 0, 0, false, S, E)); 3328 3329 // If there's a pre-indexing writeback marker, '!', just add it as a token 3330 // operand. It's rather odd, but syntactically valid. 3331 if (Parser.getTok().is(AsmToken::Exclaim)) { 3332 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3333 Parser.Lex(); // Eat the '!'. 3334 } 3335 3336 return false; 3337 } 3338 3339 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3340 Parser.Lex(); // Eat the comma. 3341 3342 // If we have a ':', it's an alignment specifier. 3343 if (Parser.getTok().is(AsmToken::Colon)) { 3344 Parser.Lex(); // Eat the ':'. 3345 E = Parser.getTok().getLoc(); 3346 3347 const MCExpr *Expr; 3348 if (getParser().ParseExpression(Expr)) 3349 return true; 3350 3351 // The expression has to be a constant. Memory references with relocations 3352 // don't come through here, as they use the <label> forms of the relevant 3353 // instructions. 3354 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3355 if (!CE) 3356 return Error (E, "constant expression expected"); 3357 3358 unsigned Align = 0; 3359 switch (CE->getValue()) { 3360 default: 3361 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3362 case 64: Align = 8; break; 3363 case 128: Align = 16; break; 3364 case 256: Align = 32; break; 3365 } 3366 3367 // Now we should have the closing ']' 3368 E = Parser.getTok().getLoc(); 3369 if (Parser.getTok().isNot(AsmToken::RBrac)) 3370 return Error(E, "']' expected"); 3371 Parser.Lex(); // Eat right bracket token. 3372 3373 // Don't worry about range checking the value here. That's handled by 3374 // the is*() predicates. 3375 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3376 ARM_AM::no_shift, 0, Align, 3377 false, S, E)); 3378 3379 // If there's a pre-indexing writeback marker, '!', just add it as a token 3380 // operand. 3381 if (Parser.getTok().is(AsmToken::Exclaim)) { 3382 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3383 Parser.Lex(); // Eat the '!'. 3384 } 3385 3386 return false; 3387 } 3388 3389 // If we have a '#', it's an immediate offset, else assume it's a register 3390 // offset. 3391 if (Parser.getTok().is(AsmToken::Hash)) { 3392 Parser.Lex(); // Eat the '#'. 3393 E = Parser.getTok().getLoc(); 3394 3395 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3396 const MCExpr *Offset; 3397 if (getParser().ParseExpression(Offset)) 3398 return true; 3399 3400 // The expression has to be a constant. Memory references with relocations 3401 // don't come through here, as they use the <label> forms of the relevant 3402 // instructions. 3403 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3404 if (!CE) 3405 return Error (E, "constant expression expected"); 3406 3407 // If the constant was #-0, represent it as INT32_MIN. 3408 int32_t Val = CE->getValue(); 3409 if (isNegative && Val == 0) 3410 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3411 3412 // Now we should have the closing ']' 3413 E = Parser.getTok().getLoc(); 3414 if (Parser.getTok().isNot(AsmToken::RBrac)) 3415 return Error(E, "']' expected"); 3416 Parser.Lex(); // Eat right bracket token. 3417 3418 // Don't worry about range checking the value here. That's handled by 3419 // the is*() predicates. 3420 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3421 ARM_AM::no_shift, 0, 0, 3422 false, S, E)); 3423 3424 // If there's a pre-indexing writeback marker, '!', just add it as a token 3425 // operand. 3426 if (Parser.getTok().is(AsmToken::Exclaim)) { 3427 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3428 Parser.Lex(); // Eat the '!'. 3429 } 3430 3431 return false; 3432 } 3433 3434 // The register offset is optionally preceded by a '+' or '-' 3435 bool isNegative = false; 3436 if (Parser.getTok().is(AsmToken::Minus)) { 3437 isNegative = true; 3438 Parser.Lex(); // Eat the '-'. 3439 } else if (Parser.getTok().is(AsmToken::Plus)) { 3440 // Nothing to do. 3441 Parser.Lex(); // Eat the '+'. 3442 } 3443 3444 E = Parser.getTok().getLoc(); 3445 int OffsetRegNum = tryParseRegister(); 3446 if (OffsetRegNum == -1) 3447 return Error(E, "register expected"); 3448 3449 // If there's a shift operator, handle it. 3450 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3451 unsigned ShiftImm = 0; 3452 if (Parser.getTok().is(AsmToken::Comma)) { 3453 Parser.Lex(); // Eat the ','. 3454 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3455 return true; 3456 } 3457 3458 // Now we should have the closing ']' 3459 E = Parser.getTok().getLoc(); 3460 if (Parser.getTok().isNot(AsmToken::RBrac)) 3461 return Error(E, "']' expected"); 3462 Parser.Lex(); // Eat right bracket token. 3463 3464 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3465 ShiftType, ShiftImm, 0, isNegative, 3466 S, E)); 3467 3468 // If there's a pre-indexing writeback marker, '!', just add it as a token 3469 // operand. 3470 if (Parser.getTok().is(AsmToken::Exclaim)) { 3471 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3472 Parser.Lex(); // Eat the '!'. 3473 } 3474 3475 return false; 3476} 3477 3478/// parseMemRegOffsetShift - one of these two: 3479/// ( lsl | lsr | asr | ror ) , # shift_amount 3480/// rrx 3481/// return true if it parses a shift otherwise it returns false. 3482bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3483 unsigned &Amount) { 3484 SMLoc Loc = Parser.getTok().getLoc(); 3485 const AsmToken &Tok = Parser.getTok(); 3486 if (Tok.isNot(AsmToken::Identifier)) 3487 return true; 3488 StringRef ShiftName = Tok.getString(); 3489 if (ShiftName == "lsl" || ShiftName == "LSL") 3490 St = ARM_AM::lsl; 3491 else if (ShiftName == "lsr" || ShiftName == "LSR") 3492 St = ARM_AM::lsr; 3493 else if (ShiftName == "asr" || ShiftName == "ASR") 3494 St = ARM_AM::asr; 3495 else if (ShiftName == "ror" || ShiftName == "ROR") 3496 St = ARM_AM::ror; 3497 else if (ShiftName == "rrx" || ShiftName == "RRX") 3498 St = ARM_AM::rrx; 3499 else 3500 return Error(Loc, "illegal shift operator"); 3501 Parser.Lex(); // Eat shift type token. 3502 3503 // rrx stands alone. 3504 Amount = 0; 3505 if (St != ARM_AM::rrx) { 3506 Loc = Parser.getTok().getLoc(); 3507 // A '#' and a shift amount. 3508 const AsmToken &HashTok = Parser.getTok(); 3509 if (HashTok.isNot(AsmToken::Hash)) 3510 return Error(HashTok.getLoc(), "'#' expected"); 3511 Parser.Lex(); // Eat hash token. 3512 3513 const MCExpr *Expr; 3514 if (getParser().ParseExpression(Expr)) 3515 return true; 3516 // Range check the immediate. 3517 // lsl, ror: 0 <= imm <= 31 3518 // lsr, asr: 0 <= imm <= 32 3519 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3520 if (!CE) 3521 return Error(Loc, "shift amount must be an immediate"); 3522 int64_t Imm = CE->getValue(); 3523 if (Imm < 0 || 3524 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3525 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3526 return Error(Loc, "immediate shift value out of range"); 3527 Amount = Imm; 3528 } 3529 3530 return false; 3531} 3532 3533/// parseFPImm - A floating point immediate expression operand. 3534ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3535parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3536 SMLoc S = Parser.getTok().getLoc(); 3537 3538 if (Parser.getTok().isNot(AsmToken::Hash)) 3539 return MatchOperand_NoMatch; 3540 3541 // Disambiguate the VMOV forms that can accept an FP immediate. 3542 // vmov.f32 <sreg>, #imm 3543 // vmov.f64 <dreg>, #imm 3544 // vmov.f32 <dreg>, #imm @ vector f32x2 3545 // vmov.f32 <qreg>, #imm @ vector f32x4 3546 // 3547 // There are also the NEON VMOV instructions which expect an 3548 // integer constant. Make sure we don't try to parse an FPImm 3549 // for these: 3550 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3551 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3552 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3553 TyOp->getToken() != ".f64")) 3554 return MatchOperand_NoMatch; 3555 3556 Parser.Lex(); // Eat the '#'. 3557 3558 // Handle negation, as that still comes through as a separate token. 3559 bool isNegative = false; 3560 if (Parser.getTok().is(AsmToken::Minus)) { 3561 isNegative = true; 3562 Parser.Lex(); 3563 } 3564 const AsmToken &Tok = Parser.getTok(); 3565 if (Tok.is(AsmToken::Real)) { 3566 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3567 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3568 // If we had a '-' in front, toggle the sign bit. 3569 IntVal ^= (uint64_t)isNegative << 63; 3570 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3571 Parser.Lex(); // Eat the token. 3572 if (Val == -1) { 3573 TokError("floating point value out of range"); 3574 return MatchOperand_ParseFail; 3575 } 3576 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3577 return MatchOperand_Success; 3578 } 3579 if (Tok.is(AsmToken::Integer)) { 3580 int64_t Val = Tok.getIntVal(); 3581 Parser.Lex(); // Eat the token. 3582 if (Val > 255 || Val < 0) { 3583 TokError("encoded floating point value out of range"); 3584 return MatchOperand_ParseFail; 3585 } 3586 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3587 return MatchOperand_Success; 3588 } 3589 3590 TokError("invalid floating point immediate"); 3591 return MatchOperand_ParseFail; 3592} 3593/// Parse a arm instruction operand. For now this parses the operand regardless 3594/// of the mnemonic. 3595bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3596 StringRef Mnemonic) { 3597 SMLoc S, E; 3598 3599 // Check if the current operand has a custom associated parser, if so, try to 3600 // custom parse the operand, or fallback to the general approach. 3601 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3602 if (ResTy == MatchOperand_Success) 3603 return false; 3604 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3605 // there was a match, but an error occurred, in which case, just return that 3606 // the operand parsing failed. 3607 if (ResTy == MatchOperand_ParseFail) 3608 return true; 3609 3610 switch (getLexer().getKind()) { 3611 default: 3612 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3613 return true; 3614 case AsmToken::Identifier: { 3615 // If this is VMRS, check for the apsr_nzcv operand. 3616 if (!tryParseRegisterWithWriteBack(Operands)) 3617 return false; 3618 int Res = tryParseShiftRegister(Operands); 3619 if (Res == 0) // success 3620 return false; 3621 else if (Res == -1) // irrecoverable error 3622 return true; 3623 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3624 S = Parser.getTok().getLoc(); 3625 Parser.Lex(); 3626 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3627 return false; 3628 } 3629 3630 // Fall though for the Identifier case that is not a register or a 3631 // special name. 3632 } 3633 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3634 case AsmToken::Dot: { // . as a branch target 3635 // This was not a register so parse other operands that start with an 3636 // identifier (like labels) as expressions and create them as immediates. 3637 const MCExpr *IdVal; 3638 S = Parser.getTok().getLoc(); 3639 if (getParser().ParseExpression(IdVal)) 3640 return true; 3641 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3642 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3643 return false; 3644 } 3645 case AsmToken::LBrac: 3646 return parseMemory(Operands); 3647 case AsmToken::LCurly: 3648 return parseRegisterList(Operands); 3649 case AsmToken::Hash: { 3650 // #42 -> immediate. 3651 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3652 S = Parser.getTok().getLoc(); 3653 Parser.Lex(); 3654 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3655 const MCExpr *ImmVal; 3656 if (getParser().ParseExpression(ImmVal)) 3657 return true; 3658 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3659 if (!CE) { 3660 Error(S, "constant expression expected"); 3661 return MatchOperand_ParseFail; 3662 } 3663 int32_t Val = CE->getValue(); 3664 if (isNegative && Val == 0) 3665 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3666 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3667 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3668 return false; 3669 } 3670 case AsmToken::Colon: { 3671 // ":lower16:" and ":upper16:" expression prefixes 3672 // FIXME: Check it's an expression prefix, 3673 // e.g. (FOO - :lower16:BAR) isn't legal. 3674 ARMMCExpr::VariantKind RefKind; 3675 if (parsePrefix(RefKind)) 3676 return true; 3677 3678 const MCExpr *SubExprVal; 3679 if (getParser().ParseExpression(SubExprVal)) 3680 return true; 3681 3682 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3683 getContext()); 3684 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3685 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3686 return false; 3687 } 3688 } 3689} 3690 3691// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3692// :lower16: and :upper16:. 3693bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3694 RefKind = ARMMCExpr::VK_ARM_None; 3695 3696 // :lower16: and :upper16: modifiers 3697 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3698 Parser.Lex(); // Eat ':' 3699 3700 if (getLexer().isNot(AsmToken::Identifier)) { 3701 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3702 return true; 3703 } 3704 3705 StringRef IDVal = Parser.getTok().getIdentifier(); 3706 if (IDVal == "lower16") { 3707 RefKind = ARMMCExpr::VK_ARM_LO16; 3708 } else if (IDVal == "upper16") { 3709 RefKind = ARMMCExpr::VK_ARM_HI16; 3710 } else { 3711 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3712 return true; 3713 } 3714 Parser.Lex(); 3715 3716 if (getLexer().isNot(AsmToken::Colon)) { 3717 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3718 return true; 3719 } 3720 Parser.Lex(); // Eat the last ':' 3721 return false; 3722} 3723 3724/// \brief Given a mnemonic, split out possible predication code and carry 3725/// setting letters to form a canonical mnemonic and flags. 3726// 3727// FIXME: Would be nice to autogen this. 3728// FIXME: This is a bit of a maze of special cases. 3729StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3730 unsigned &PredicationCode, 3731 bool &CarrySetting, 3732 unsigned &ProcessorIMod, 3733 StringRef &ITMask) { 3734 PredicationCode = ARMCC::AL; 3735 CarrySetting = false; 3736 ProcessorIMod = 0; 3737 3738 // Ignore some mnemonics we know aren't predicated forms. 3739 // 3740 // FIXME: Would be nice to autogen this. 3741 if ((Mnemonic == "movs" && isThumb()) || 3742 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3743 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3744 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3745 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3746 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3747 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3748 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3749 return Mnemonic; 3750 3751 // First, split out any predication code. Ignore mnemonics we know aren't 3752 // predicated but do have a carry-set and so weren't caught above. 3753 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3754 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3755 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3756 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3757 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3758 .Case("eq", ARMCC::EQ) 3759 .Case("ne", ARMCC::NE) 3760 .Case("hs", ARMCC::HS) 3761 .Case("cs", ARMCC::HS) 3762 .Case("lo", ARMCC::LO) 3763 .Case("cc", ARMCC::LO) 3764 .Case("mi", ARMCC::MI) 3765 .Case("pl", ARMCC::PL) 3766 .Case("vs", ARMCC::VS) 3767 .Case("vc", ARMCC::VC) 3768 .Case("hi", ARMCC::HI) 3769 .Case("ls", ARMCC::LS) 3770 .Case("ge", ARMCC::GE) 3771 .Case("lt", ARMCC::LT) 3772 .Case("gt", ARMCC::GT) 3773 .Case("le", ARMCC::LE) 3774 .Case("al", ARMCC::AL) 3775 .Default(~0U); 3776 if (CC != ~0U) { 3777 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3778 PredicationCode = CC; 3779 } 3780 } 3781 3782 // Next, determine if we have a carry setting bit. We explicitly ignore all 3783 // the instructions we know end in 's'. 3784 if (Mnemonic.endswith("s") && 3785 !(Mnemonic == "cps" || Mnemonic == "mls" || 3786 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3787 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3788 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3789 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3790 (Mnemonic == "movs" && isThumb()))) { 3791 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3792 CarrySetting = true; 3793 } 3794 3795 // The "cps" instruction can have a interrupt mode operand which is glued into 3796 // the mnemonic. Check if this is the case, split it and parse the imod op 3797 if (Mnemonic.startswith("cps")) { 3798 // Split out any imod code. 3799 unsigned IMod = 3800 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3801 .Case("ie", ARM_PROC::IE) 3802 .Case("id", ARM_PROC::ID) 3803 .Default(~0U); 3804 if (IMod != ~0U) { 3805 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3806 ProcessorIMod = IMod; 3807 } 3808 } 3809 3810 // The "it" instruction has the condition mask on the end of the mnemonic. 3811 if (Mnemonic.startswith("it")) { 3812 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3813 Mnemonic = Mnemonic.slice(0, 2); 3814 } 3815 3816 return Mnemonic; 3817} 3818 3819/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3820/// inclusion of carry set or predication code operands. 3821// 3822// FIXME: It would be nice to autogen this. 3823void ARMAsmParser:: 3824getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3825 bool &CanAcceptPredicationCode) { 3826 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3827 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3828 Mnemonic == "add" || Mnemonic == "adc" || 3829 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3830 Mnemonic == "orr" || Mnemonic == "mvn" || 3831 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3832 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3833 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3834 Mnemonic == "mla" || Mnemonic == "smlal" || 3835 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3836 CanAcceptCarrySet = true; 3837 } else 3838 CanAcceptCarrySet = false; 3839 3840 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3841 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3842 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3843 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3844 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3845 (Mnemonic == "clrex" && !isThumb()) || 3846 (Mnemonic == "nop" && isThumbOne()) || 3847 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3848 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3849 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3850 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3851 !isThumb()) || 3852 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3853 CanAcceptPredicationCode = false; 3854 } else 3855 CanAcceptPredicationCode = true; 3856 3857 if (isThumb()) { 3858 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3859 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3860 CanAcceptPredicationCode = false; 3861 } 3862} 3863 3864bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3865 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3866 // FIXME: This is all horribly hacky. We really need a better way to deal 3867 // with optional operands like this in the matcher table. 3868 3869 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3870 // another does not. Specifically, the MOVW instruction does not. So we 3871 // special case it here and remove the defaulted (non-setting) cc_out 3872 // operand if that's the instruction we're trying to match. 3873 // 3874 // We do this as post-processing of the explicit operands rather than just 3875 // conditionally adding the cc_out in the first place because we need 3876 // to check the type of the parsed immediate operand. 3877 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3878 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3879 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3880 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3881 return true; 3882 3883 // Register-register 'add' for thumb does not have a cc_out operand 3884 // when there are only two register operands. 3885 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3886 static_cast<ARMOperand*>(Operands[3])->isReg() && 3887 static_cast<ARMOperand*>(Operands[4])->isReg() && 3888 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3889 return true; 3890 // Register-register 'add' for thumb does not have a cc_out operand 3891 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3892 // have to check the immediate range here since Thumb2 has a variant 3893 // that can handle a different range and has a cc_out operand. 3894 if (((isThumb() && Mnemonic == "add") || 3895 (isThumbTwo() && Mnemonic == "sub")) && 3896 Operands.size() == 6 && 3897 static_cast<ARMOperand*>(Operands[3])->isReg() && 3898 static_cast<ARMOperand*>(Operands[4])->isReg() && 3899 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3900 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3901 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3902 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3903 return true; 3904 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3905 // imm0_4095 variant. That's the least-preferred variant when 3906 // selecting via the generic "add" mnemonic, so to know that we 3907 // should remove the cc_out operand, we have to explicitly check that 3908 // it's not one of the other variants. Ugh. 3909 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3910 Operands.size() == 6 && 3911 static_cast<ARMOperand*>(Operands[3])->isReg() && 3912 static_cast<ARMOperand*>(Operands[4])->isReg() && 3913 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3914 // Nest conditions rather than one big 'if' statement for readability. 3915 // 3916 // If either register is a high reg, it's either one of the SP 3917 // variants (handled above) or a 32-bit encoding, so we just 3918 // check against T3. 3919 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3920 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3921 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3922 return false; 3923 // If both registers are low, we're in an IT block, and the immediate is 3924 // in range, we should use encoding T1 instead, which has a cc_out. 3925 if (inITBlock() && 3926 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3927 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3928 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3929 return false; 3930 3931 // Otherwise, we use encoding T4, which does not have a cc_out 3932 // operand. 3933 return true; 3934 } 3935 3936 // The thumb2 multiply instruction doesn't have a CCOut register, so 3937 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3938 // use the 16-bit encoding or not. 3939 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3940 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3941 static_cast<ARMOperand*>(Operands[3])->isReg() && 3942 static_cast<ARMOperand*>(Operands[4])->isReg() && 3943 static_cast<ARMOperand*>(Operands[5])->isReg() && 3944 // If the registers aren't low regs, the destination reg isn't the 3945 // same as one of the source regs, or the cc_out operand is zero 3946 // outside of an IT block, we have to use the 32-bit encoding, so 3947 // remove the cc_out operand. 3948 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3949 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 3950 !inITBlock() || 3951 (static_cast<ARMOperand*>(Operands[3])->getReg() != 3952 static_cast<ARMOperand*>(Operands[5])->getReg() && 3953 static_cast<ARMOperand*>(Operands[3])->getReg() != 3954 static_cast<ARMOperand*>(Operands[4])->getReg()))) 3955 return true; 3956 3957 3958 3959 // Register-register 'add/sub' for thumb does not have a cc_out operand 3960 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 3961 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 3962 // right, this will result in better diagnostics (which operand is off) 3963 // anyway. 3964 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 3965 (Operands.size() == 5 || Operands.size() == 6) && 3966 static_cast<ARMOperand*>(Operands[3])->isReg() && 3967 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 3968 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3969 return true; 3970 3971 return false; 3972} 3973 3974/// Parse an arm instruction mnemonic followed by its operands. 3975bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 3976 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3977 // Create the leading tokens for the mnemonic, split by '.' characters. 3978 size_t Start = 0, Next = Name.find('.'); 3979 StringRef Mnemonic = Name.slice(Start, Next); 3980 3981 // Split out the predication code and carry setting flag from the mnemonic. 3982 unsigned PredicationCode; 3983 unsigned ProcessorIMod; 3984 bool CarrySetting; 3985 StringRef ITMask; 3986 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 3987 ProcessorIMod, ITMask); 3988 3989 // In Thumb1, only the branch (B) instruction can be predicated. 3990 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 3991 Parser.EatToEndOfStatement(); 3992 return Error(NameLoc, "conditional execution not supported in Thumb1"); 3993 } 3994 3995 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 3996 3997 // Handle the IT instruction ITMask. Convert it to a bitmask. This 3998 // is the mask as it will be for the IT encoding if the conditional 3999 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4000 // where the conditional bit0 is zero, the instruction post-processing 4001 // will adjust the mask accordingly. 4002 if (Mnemonic == "it") { 4003 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4004 if (ITMask.size() > 3) { 4005 Parser.EatToEndOfStatement(); 4006 return Error(Loc, "too many conditions on IT instruction"); 4007 } 4008 unsigned Mask = 8; 4009 for (unsigned i = ITMask.size(); i != 0; --i) { 4010 char pos = ITMask[i - 1]; 4011 if (pos != 't' && pos != 'e') { 4012 Parser.EatToEndOfStatement(); 4013 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4014 } 4015 Mask >>= 1; 4016 if (ITMask[i - 1] == 't') 4017 Mask |= 8; 4018 } 4019 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4020 } 4021 4022 // FIXME: This is all a pretty gross hack. We should automatically handle 4023 // optional operands like this via tblgen. 4024 4025 // Next, add the CCOut and ConditionCode operands, if needed. 4026 // 4027 // For mnemonics which can ever incorporate a carry setting bit or predication 4028 // code, our matching model involves us always generating CCOut and 4029 // ConditionCode operands to match the mnemonic "as written" and then we let 4030 // the matcher deal with finding the right instruction or generating an 4031 // appropriate error. 4032 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4033 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4034 4035 // If we had a carry-set on an instruction that can't do that, issue an 4036 // error. 4037 if (!CanAcceptCarrySet && CarrySetting) { 4038 Parser.EatToEndOfStatement(); 4039 return Error(NameLoc, "instruction '" + Mnemonic + 4040 "' can not set flags, but 's' suffix specified"); 4041 } 4042 // If we had a predication code on an instruction that can't do that, issue an 4043 // error. 4044 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4045 Parser.EatToEndOfStatement(); 4046 return Error(NameLoc, "instruction '" + Mnemonic + 4047 "' is not predicable, but condition code specified"); 4048 } 4049 4050 // Add the carry setting operand, if necessary. 4051 if (CanAcceptCarrySet) { 4052 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4053 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4054 Loc)); 4055 } 4056 4057 // Add the predication code operand, if necessary. 4058 if (CanAcceptPredicationCode) { 4059 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4060 CarrySetting); 4061 Operands.push_back(ARMOperand::CreateCondCode( 4062 ARMCC::CondCodes(PredicationCode), Loc)); 4063 } 4064 4065 // Add the processor imod operand, if necessary. 4066 if (ProcessorIMod) { 4067 Operands.push_back(ARMOperand::CreateImm( 4068 MCConstantExpr::Create(ProcessorIMod, getContext()), 4069 NameLoc, NameLoc)); 4070 } 4071 4072 // Add the remaining tokens in the mnemonic. 4073 while (Next != StringRef::npos) { 4074 Start = Next; 4075 Next = Name.find('.', Start + 1); 4076 StringRef ExtraToken = Name.slice(Start, Next); 4077 4078 // For now, we're only parsing Thumb1 (for the most part), so 4079 // just ignore ".n" qualifiers. We'll use them to restrict 4080 // matching when we do Thumb2. 4081 if (ExtraToken != ".n") { 4082 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4083 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4084 } 4085 } 4086 4087 // Read the remaining operands. 4088 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4089 // Read the first operand. 4090 if (parseOperand(Operands, Mnemonic)) { 4091 Parser.EatToEndOfStatement(); 4092 return true; 4093 } 4094 4095 while (getLexer().is(AsmToken::Comma)) { 4096 Parser.Lex(); // Eat the comma. 4097 4098 // Parse and remember the operand. 4099 if (parseOperand(Operands, Mnemonic)) { 4100 Parser.EatToEndOfStatement(); 4101 return true; 4102 } 4103 } 4104 } 4105 4106 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4107 SMLoc Loc = getLexer().getLoc(); 4108 Parser.EatToEndOfStatement(); 4109 return Error(Loc, "unexpected token in argument list"); 4110 } 4111 4112 Parser.Lex(); // Consume the EndOfStatement 4113 4114 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4115 // do and don't have a cc_out optional-def operand. With some spot-checks 4116 // of the operand list, we can figure out which variant we're trying to 4117 // parse and adjust accordingly before actually matching. We shouldn't ever 4118 // try to remove a cc_out operand that was explicitly set on the the 4119 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4120 // table driven matcher doesn't fit well with the ARM instruction set. 4121 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4122 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4123 Operands.erase(Operands.begin() + 1); 4124 delete Op; 4125 } 4126 4127 // ARM mode 'blx' need special handling, as the register operand version 4128 // is predicable, but the label operand version is not. So, we can't rely 4129 // on the Mnemonic based checking to correctly figure out when to put 4130 // a k_CondCode operand in the list. If we're trying to match the label 4131 // version, remove the k_CondCode operand here. 4132 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4133 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4134 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4135 Operands.erase(Operands.begin() + 1); 4136 delete Op; 4137 } 4138 4139 // The vector-compare-to-zero instructions have a literal token "#0" at 4140 // the end that comes to here as an immediate operand. Convert it to a 4141 // token to play nicely with the matcher. 4142 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4143 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4144 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4145 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4146 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4147 if (CE && CE->getValue() == 0) { 4148 Operands.erase(Operands.begin() + 5); 4149 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4150 delete Op; 4151 } 4152 } 4153 // VCMP{E} does the same thing, but with a different operand count. 4154 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4155 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4156 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4157 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4158 if (CE && CE->getValue() == 0) { 4159 Operands.erase(Operands.begin() + 4); 4160 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4161 delete Op; 4162 } 4163 } 4164 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4165 // end. Convert it to a token here. 4166 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4167 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4168 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4169 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4170 if (CE && CE->getValue() == 0) { 4171 Operands.erase(Operands.begin() + 5); 4172 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4173 delete Op; 4174 } 4175 } 4176 4177 return false; 4178} 4179 4180// Validate context-sensitive operand constraints. 4181 4182// return 'true' if register list contains non-low GPR registers, 4183// 'false' otherwise. If Reg is in the register list or is HiReg, set 4184// 'containsReg' to true. 4185static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4186 unsigned HiReg, bool &containsReg) { 4187 containsReg = false; 4188 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4189 unsigned OpReg = Inst.getOperand(i).getReg(); 4190 if (OpReg == Reg) 4191 containsReg = true; 4192 // Anything other than a low register isn't legal here. 4193 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4194 return true; 4195 } 4196 return false; 4197} 4198 4199// Check if the specified regisgter is in the register list of the inst, 4200// starting at the indicated operand number. 4201static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4202 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4203 unsigned OpReg = Inst.getOperand(i).getReg(); 4204 if (OpReg == Reg) 4205 return true; 4206 } 4207 return false; 4208} 4209 4210// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4211// the ARMInsts array) instead. Getting that here requires awkward 4212// API changes, though. Better way? 4213namespace llvm { 4214extern MCInstrDesc ARMInsts[]; 4215} 4216static MCInstrDesc &getInstDesc(unsigned Opcode) { 4217 return ARMInsts[Opcode]; 4218} 4219 4220// FIXME: We would really like to be able to tablegen'erate this. 4221bool ARMAsmParser:: 4222validateInstruction(MCInst &Inst, 4223 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4224 MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4225 SMLoc Loc = Operands[0]->getStartLoc(); 4226 // Check the IT block state first. 4227 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4228 // being allowed in IT blocks, but not being predicable. It just always 4229 // executes. 4230 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4231 unsigned bit = 1; 4232 if (ITState.FirstCond) 4233 ITState.FirstCond = false; 4234 else 4235 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4236 // The instruction must be predicable. 4237 if (!MCID.isPredicable()) 4238 return Error(Loc, "instructions in IT block must be predicable"); 4239 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4240 unsigned ITCond = bit ? ITState.Cond : 4241 ARMCC::getOppositeCondition(ITState.Cond); 4242 if (Cond != ITCond) { 4243 // Find the condition code Operand to get its SMLoc information. 4244 SMLoc CondLoc; 4245 for (unsigned i = 1; i < Operands.size(); ++i) 4246 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4247 CondLoc = Operands[i]->getStartLoc(); 4248 return Error(CondLoc, "incorrect condition in IT block; got '" + 4249 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4250 "', but expected '" + 4251 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4252 } 4253 // Check for non-'al' condition codes outside of the IT block. 4254 } else if (isThumbTwo() && MCID.isPredicable() && 4255 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4256 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4257 Inst.getOpcode() != ARM::t2B) 4258 return Error(Loc, "predicated instructions must be in IT block"); 4259 4260 switch (Inst.getOpcode()) { 4261 case ARM::LDRD: 4262 case ARM::LDRD_PRE: 4263 case ARM::LDRD_POST: 4264 case ARM::LDREXD: { 4265 // Rt2 must be Rt + 1. 4266 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4267 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4268 if (Rt2 != Rt + 1) 4269 return Error(Operands[3]->getStartLoc(), 4270 "destination operands must be sequential"); 4271 return false; 4272 } 4273 case ARM::STRD: { 4274 // Rt2 must be Rt + 1. 4275 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4276 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4277 if (Rt2 != Rt + 1) 4278 return Error(Operands[3]->getStartLoc(), 4279 "source operands must be sequential"); 4280 return false; 4281 } 4282 case ARM::STRD_PRE: 4283 case ARM::STRD_POST: 4284 case ARM::STREXD: { 4285 // Rt2 must be Rt + 1. 4286 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4287 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4288 if (Rt2 != Rt + 1) 4289 return Error(Operands[3]->getStartLoc(), 4290 "source operands must be sequential"); 4291 return false; 4292 } 4293 case ARM::SBFX: 4294 case ARM::UBFX: { 4295 // width must be in range [1, 32-lsb] 4296 unsigned lsb = Inst.getOperand(2).getImm(); 4297 unsigned widthm1 = Inst.getOperand(3).getImm(); 4298 if (widthm1 >= 32 - lsb) 4299 return Error(Operands[5]->getStartLoc(), 4300 "bitfield width must be in range [1,32-lsb]"); 4301 return false; 4302 } 4303 case ARM::tLDMIA: { 4304 // If we're parsing Thumb2, the .w variant is available and handles 4305 // most cases that are normally illegal for a Thumb1 LDM 4306 // instruction. We'll make the transformation in processInstruction() 4307 // if necessary. 4308 // 4309 // Thumb LDM instructions are writeback iff the base register is not 4310 // in the register list. 4311 unsigned Rn = Inst.getOperand(0).getReg(); 4312 bool hasWritebackToken = 4313 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4314 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4315 bool listContainsBase; 4316 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4317 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4318 "registers must be in range r0-r7"); 4319 // If we should have writeback, then there should be a '!' token. 4320 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4321 return Error(Operands[2]->getStartLoc(), 4322 "writeback operator '!' expected"); 4323 // If we should not have writeback, there must not be a '!'. This is 4324 // true even for the 32-bit wide encodings. 4325 if (listContainsBase && hasWritebackToken) 4326 return Error(Operands[3]->getStartLoc(), 4327 "writeback operator '!' not allowed when base register " 4328 "in register list"); 4329 4330 break; 4331 } 4332 case ARM::t2LDMIA_UPD: { 4333 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4334 return Error(Operands[4]->getStartLoc(), 4335 "writeback operator '!' not allowed when base register " 4336 "in register list"); 4337 break; 4338 } 4339 case ARM::tPOP: { 4340 bool listContainsBase; 4341 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4342 return Error(Operands[2]->getStartLoc(), 4343 "registers must be in range r0-r7 or pc"); 4344 break; 4345 } 4346 case ARM::tPUSH: { 4347 bool listContainsBase; 4348 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4349 return Error(Operands[2]->getStartLoc(), 4350 "registers must be in range r0-r7 or lr"); 4351 break; 4352 } 4353 case ARM::tSTMIA_UPD: { 4354 bool listContainsBase; 4355 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4356 return Error(Operands[4]->getStartLoc(), 4357 "registers must be in range r0-r7"); 4358 break; 4359 } 4360 } 4361 4362 return false; 4363} 4364 4365void ARMAsmParser:: 4366processInstruction(MCInst &Inst, 4367 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4368 switch (Inst.getOpcode()) { 4369 case ARM::LDMIA_UPD: 4370 // If this is a load of a single register via a 'pop', then we should use 4371 // a post-indexed LDR instruction instead, per the ARM ARM. 4372 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4373 Inst.getNumOperands() == 5) { 4374 MCInst TmpInst; 4375 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4376 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4377 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4378 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4379 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4380 TmpInst.addOperand(MCOperand::CreateImm(4)); 4381 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4382 TmpInst.addOperand(Inst.getOperand(3)); 4383 Inst = TmpInst; 4384 } 4385 break; 4386 case ARM::STMDB_UPD: 4387 // If this is a store of a single register via a 'push', then we should use 4388 // a pre-indexed STR instruction instead, per the ARM ARM. 4389 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4390 Inst.getNumOperands() == 5) { 4391 MCInst TmpInst; 4392 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4393 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4394 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4395 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4396 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4397 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4398 TmpInst.addOperand(Inst.getOperand(3)); 4399 Inst = TmpInst; 4400 } 4401 break; 4402 case ARM::tADDi8: 4403 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4404 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4405 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4406 // to encoding T1 if <Rd> is omitted." 4407 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4408 Inst.setOpcode(ARM::tADDi3); 4409 break; 4410 case ARM::tSUBi8: 4411 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4412 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4413 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4414 // to encoding T1 if <Rd> is omitted." 4415 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4416 Inst.setOpcode(ARM::tSUBi3); 4417 break; 4418 case ARM::tB: 4419 // A Thumb conditional branch outside of an IT block is a tBcc. 4420 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4421 Inst.setOpcode(ARM::tBcc); 4422 break; 4423 case ARM::t2B: 4424 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4425 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4426 Inst.setOpcode(ARM::t2Bcc); 4427 break; 4428 case ARM::t2Bcc: 4429 // If the conditional is AL or we're in an IT block, we really want t2B. 4430 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4431 Inst.setOpcode(ARM::t2B); 4432 break; 4433 case ARM::tBcc: 4434 // If the conditional is AL, we really want tB. 4435 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4436 Inst.setOpcode(ARM::tB); 4437 break; 4438 case ARM::tLDMIA: { 4439 // If the register list contains any high registers, or if the writeback 4440 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4441 // instead if we're in Thumb2. Otherwise, this should have generated 4442 // an error in validateInstruction(). 4443 unsigned Rn = Inst.getOperand(0).getReg(); 4444 bool hasWritebackToken = 4445 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4446 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4447 bool listContainsBase; 4448 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4449 (!listContainsBase && !hasWritebackToken) || 4450 (listContainsBase && hasWritebackToken)) { 4451 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4452 assert (isThumbTwo()); 4453 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4454 // If we're switching to the updating version, we need to insert 4455 // the writeback tied operand. 4456 if (hasWritebackToken) 4457 Inst.insert(Inst.begin(), 4458 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4459 } 4460 break; 4461 } 4462 case ARM::tSTMIA_UPD: { 4463 // If the register list contains any high registers, we need to use 4464 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4465 // should have generated an error in validateInstruction(). 4466 unsigned Rn = Inst.getOperand(0).getReg(); 4467 bool listContainsBase; 4468 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4469 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4470 assert (isThumbTwo()); 4471 Inst.setOpcode(ARM::t2STMIA_UPD); 4472 } 4473 break; 4474 } 4475 case ARM::t2MOVi: { 4476 // If we can use the 16-bit encoding and the user didn't explicitly 4477 // request the 32-bit variant, transform it here. 4478 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4479 Inst.getOperand(1).getImm() <= 255 && 4480 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4481 Inst.getOperand(4).getReg() == ARM::CPSR) || 4482 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4483 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4484 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4485 // The operands aren't in the same order for tMOVi8... 4486 MCInst TmpInst; 4487 TmpInst.setOpcode(ARM::tMOVi8); 4488 TmpInst.addOperand(Inst.getOperand(0)); 4489 TmpInst.addOperand(Inst.getOperand(4)); 4490 TmpInst.addOperand(Inst.getOperand(1)); 4491 TmpInst.addOperand(Inst.getOperand(2)); 4492 TmpInst.addOperand(Inst.getOperand(3)); 4493 Inst = TmpInst; 4494 } 4495 break; 4496 } 4497 case ARM::t2MOVr: { 4498 // If we can use the 16-bit encoding and the user didn't explicitly 4499 // request the 32-bit variant, transform it here. 4500 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4501 isARMLowRegister(Inst.getOperand(1).getReg()) && 4502 Inst.getOperand(2).getImm() == ARMCC::AL && 4503 Inst.getOperand(4).getReg() == ARM::CPSR && 4504 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4505 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4506 // The operands aren't the same for tMOV[S]r... (no cc_out) 4507 MCInst TmpInst; 4508 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4509 TmpInst.addOperand(Inst.getOperand(0)); 4510 TmpInst.addOperand(Inst.getOperand(1)); 4511 TmpInst.addOperand(Inst.getOperand(2)); 4512 TmpInst.addOperand(Inst.getOperand(3)); 4513 Inst = TmpInst; 4514 } 4515 break; 4516 } 4517 case ARM::t2SXTH: 4518 case ARM::t2SXTB: 4519 case ARM::t2UXTH: 4520 case ARM::t2UXTB: { 4521 // If we can use the 16-bit encoding and the user didn't explicitly 4522 // request the 32-bit variant, transform it here. 4523 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4524 isARMLowRegister(Inst.getOperand(1).getReg()) && 4525 Inst.getOperand(2).getImm() == 0 && 4526 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4527 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4528 unsigned NewOpc; 4529 switch (Inst.getOpcode()) { 4530 default: llvm_unreachable("Illegal opcode!"); 4531 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4532 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4533 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4534 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4535 } 4536 // The operands aren't the same for thumb1 (no rotate operand). 4537 MCInst TmpInst; 4538 TmpInst.setOpcode(NewOpc); 4539 TmpInst.addOperand(Inst.getOperand(0)); 4540 TmpInst.addOperand(Inst.getOperand(1)); 4541 TmpInst.addOperand(Inst.getOperand(3)); 4542 TmpInst.addOperand(Inst.getOperand(4)); 4543 Inst = TmpInst; 4544 } 4545 break; 4546 } 4547 case ARM::t2IT: { 4548 // The mask bits for all but the first condition are represented as 4549 // the low bit of the condition code value implies 't'. We currently 4550 // always have 1 implies 't', so XOR toggle the bits if the low bit 4551 // of the condition code is zero. The encoding also expects the low 4552 // bit of the condition to be encoded as bit 4 of the mask operand, 4553 // so mask that in if needed 4554 MCOperand &MO = Inst.getOperand(1); 4555 unsigned Mask = MO.getImm(); 4556 unsigned OrigMask = Mask; 4557 unsigned TZ = CountTrailingZeros_32(Mask); 4558 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4559 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4560 for (unsigned i = 3; i != TZ; --i) 4561 Mask ^= 1 << i; 4562 } else 4563 Mask |= 0x10; 4564 MO.setImm(Mask); 4565 4566 // Set up the IT block state according to the IT instruction we just 4567 // matched. 4568 assert(!inITBlock() && "nested IT blocks?!"); 4569 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4570 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4571 ITState.CurPosition = 0; 4572 ITState.FirstCond = true; 4573 break; 4574 } 4575 } 4576} 4577 4578unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4579 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4580 // suffix depending on whether they're in an IT block or not. 4581 unsigned Opc = Inst.getOpcode(); 4582 MCInstrDesc &MCID = getInstDesc(Opc); 4583 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4584 assert(MCID.hasOptionalDef() && 4585 "optionally flag setting instruction missing optional def operand"); 4586 assert(MCID.NumOperands == Inst.getNumOperands() && 4587 "operand count mismatch!"); 4588 // Find the optional-def operand (cc_out). 4589 unsigned OpNo; 4590 for (OpNo = 0; 4591 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4592 ++OpNo) 4593 ; 4594 // If we're parsing Thumb1, reject it completely. 4595 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4596 return Match_MnemonicFail; 4597 // If we're parsing Thumb2, which form is legal depends on whether we're 4598 // in an IT block. 4599 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4600 !inITBlock()) 4601 return Match_RequiresITBlock; 4602 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4603 inITBlock()) 4604 return Match_RequiresNotITBlock; 4605 } 4606 // Some high-register supporting Thumb1 encodings only allow both registers 4607 // to be from r0-r7 when in Thumb2. 4608 else if (Opc == ARM::tADDhirr && isThumbOne() && 4609 isARMLowRegister(Inst.getOperand(1).getReg()) && 4610 isARMLowRegister(Inst.getOperand(2).getReg())) 4611 return Match_RequiresThumb2; 4612 // Others only require ARMv6 or later. 4613 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4614 isARMLowRegister(Inst.getOperand(0).getReg()) && 4615 isARMLowRegister(Inst.getOperand(1).getReg())) 4616 return Match_RequiresV6; 4617 return Match_Success; 4618} 4619 4620bool ARMAsmParser:: 4621MatchAndEmitInstruction(SMLoc IDLoc, 4622 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4623 MCStreamer &Out) { 4624 MCInst Inst; 4625 unsigned ErrorInfo; 4626 unsigned MatchResult; 4627 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4628 switch (MatchResult) { 4629 default: break; 4630 case Match_Success: 4631 // Context sensitive operand constraints aren't handled by the matcher, 4632 // so check them here. 4633 if (validateInstruction(Inst, Operands)) { 4634 // Still progress the IT block, otherwise one wrong condition causes 4635 // nasty cascading errors. 4636 forwardITPosition(); 4637 return true; 4638 } 4639 4640 // Some instructions need post-processing to, for example, tweak which 4641 // encoding is selected. 4642 processInstruction(Inst, Operands); 4643 4644 // Only move forward at the very end so that everything in validate 4645 // and process gets a consistent answer about whether we're in an IT 4646 // block. 4647 forwardITPosition(); 4648 4649 Out.EmitInstruction(Inst); 4650 return false; 4651 case Match_MissingFeature: 4652 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4653 return true; 4654 case Match_InvalidOperand: { 4655 SMLoc ErrorLoc = IDLoc; 4656 if (ErrorInfo != ~0U) { 4657 if (ErrorInfo >= Operands.size()) 4658 return Error(IDLoc, "too few operands for instruction"); 4659 4660 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4661 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4662 } 4663 4664 return Error(ErrorLoc, "invalid operand for instruction"); 4665 } 4666 case Match_MnemonicFail: 4667 return Error(IDLoc, "invalid instruction"); 4668 case Match_ConversionFail: 4669 // The converter function will have already emited a diagnostic. 4670 return true; 4671 case Match_RequiresNotITBlock: 4672 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4673 case Match_RequiresITBlock: 4674 return Error(IDLoc, "instruction only valid inside IT block"); 4675 case Match_RequiresV6: 4676 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4677 case Match_RequiresThumb2: 4678 return Error(IDLoc, "instruction variant requires Thumb2"); 4679 } 4680 4681 llvm_unreachable("Implement any new match types added!"); 4682 return true; 4683} 4684 4685/// parseDirective parses the arm specific directives 4686bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4687 StringRef IDVal = DirectiveID.getIdentifier(); 4688 if (IDVal == ".word") 4689 return parseDirectiveWord(4, DirectiveID.getLoc()); 4690 else if (IDVal == ".thumb") 4691 return parseDirectiveThumb(DirectiveID.getLoc()); 4692 else if (IDVal == ".thumb_func") 4693 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4694 else if (IDVal == ".code") 4695 return parseDirectiveCode(DirectiveID.getLoc()); 4696 else if (IDVal == ".syntax") 4697 return parseDirectiveSyntax(DirectiveID.getLoc()); 4698 return true; 4699} 4700 4701/// parseDirectiveWord 4702/// ::= .word [ expression (, expression)* ] 4703bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4704 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4705 for (;;) { 4706 const MCExpr *Value; 4707 if (getParser().ParseExpression(Value)) 4708 return true; 4709 4710 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4711 4712 if (getLexer().is(AsmToken::EndOfStatement)) 4713 break; 4714 4715 // FIXME: Improve diagnostic. 4716 if (getLexer().isNot(AsmToken::Comma)) 4717 return Error(L, "unexpected token in directive"); 4718 Parser.Lex(); 4719 } 4720 } 4721 4722 Parser.Lex(); 4723 return false; 4724} 4725 4726/// parseDirectiveThumb 4727/// ::= .thumb 4728bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4729 if (getLexer().isNot(AsmToken::EndOfStatement)) 4730 return Error(L, "unexpected token in directive"); 4731 Parser.Lex(); 4732 4733 // TODO: set thumb mode 4734 // TODO: tell the MC streamer the mode 4735 // getParser().getStreamer().Emit???(); 4736 return false; 4737} 4738 4739/// parseDirectiveThumbFunc 4740/// ::= .thumbfunc symbol_name 4741bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4742 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4743 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4744 StringRef Name; 4745 4746 // Darwin asm has function name after .thumb_func direction 4747 // ELF doesn't 4748 if (isMachO) { 4749 const AsmToken &Tok = Parser.getTok(); 4750 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4751 return Error(L, "unexpected token in .thumb_func directive"); 4752 Name = Tok.getString(); 4753 Parser.Lex(); // Consume the identifier token. 4754 } 4755 4756 if (getLexer().isNot(AsmToken::EndOfStatement)) 4757 return Error(L, "unexpected token in directive"); 4758 Parser.Lex(); 4759 4760 // FIXME: assuming function name will be the line following .thumb_func 4761 if (!isMachO) { 4762 Name = Parser.getTok().getString(); 4763 } 4764 4765 // Mark symbol as a thumb symbol. 4766 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4767 getParser().getStreamer().EmitThumbFunc(Func); 4768 return false; 4769} 4770 4771/// parseDirectiveSyntax 4772/// ::= .syntax unified | divided 4773bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4774 const AsmToken &Tok = Parser.getTok(); 4775 if (Tok.isNot(AsmToken::Identifier)) 4776 return Error(L, "unexpected token in .syntax directive"); 4777 StringRef Mode = Tok.getString(); 4778 if (Mode == "unified" || Mode == "UNIFIED") 4779 Parser.Lex(); 4780 else if (Mode == "divided" || Mode == "DIVIDED") 4781 return Error(L, "'.syntax divided' arm asssembly not supported"); 4782 else 4783 return Error(L, "unrecognized syntax mode in .syntax directive"); 4784 4785 if (getLexer().isNot(AsmToken::EndOfStatement)) 4786 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4787 Parser.Lex(); 4788 4789 // TODO tell the MC streamer the mode 4790 // getParser().getStreamer().Emit???(); 4791 return false; 4792} 4793 4794/// parseDirectiveCode 4795/// ::= .code 16 | 32 4796bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4797 const AsmToken &Tok = Parser.getTok(); 4798 if (Tok.isNot(AsmToken::Integer)) 4799 return Error(L, "unexpected token in .code directive"); 4800 int64_t Val = Parser.getTok().getIntVal(); 4801 if (Val == 16) 4802 Parser.Lex(); 4803 else if (Val == 32) 4804 Parser.Lex(); 4805 else 4806 return Error(L, "invalid operand to .code directive"); 4807 4808 if (getLexer().isNot(AsmToken::EndOfStatement)) 4809 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4810 Parser.Lex(); 4811 4812 if (Val == 16) { 4813 if (!isThumb()) 4814 SwitchMode(); 4815 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4816 } else { 4817 if (isThumb()) 4818 SwitchMode(); 4819 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4820 } 4821 4822 return false; 4823} 4824 4825extern "C" void LLVMInitializeARMAsmLexer(); 4826 4827/// Force static initialization. 4828extern "C" void LLVMInitializeARMAsmParser() { 4829 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4830 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4831 LLVMInitializeARMAsmLexer(); 4832} 4833 4834#define GET_REGISTER_MATCHER 4835#define GET_MATCHER_IMPLEMENTATION 4836#include "ARMGenAsmMatcher.inc" 4837