ARMAsmParser.cpp revision 7784f1d2d8b76a7eb9dd9b3fef7213770605532d
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringExtras.h" 34#include "llvm/ADT/StringSwitch.h" 35#include "llvm/ADT/Twine.h" 36 37using namespace llvm; 38 39namespace { 40 41class ARMOperand; 42 43class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131#define GET_ASSEMBLER_HEADER 132#include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 165 166 // Asm Match Converter Methods 167 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 168 const SmallVectorImpl<MCParsedAsmOperand*> &); 169 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 170 const SmallVectorImpl<MCParsedAsmOperand*> &); 171 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 172 const SmallVectorImpl<MCParsedAsmOperand*> &); 173 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 174 const SmallVectorImpl<MCParsedAsmOperand*> &); 175 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 202 bool validateInstruction(MCInst &Inst, 203 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 204 void processInstruction(MCInst &Inst, 205 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 206 bool shouldOmitCCOutOperand(StringRef Mnemonic, 207 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 208 209public: 210 enum ARMMatchResultTy { 211 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 212 Match_RequiresNotITBlock, 213 Match_RequiresV6, 214 Match_RequiresThumb2 215 }; 216 217 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 218 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 219 MCAsmParserExtension::Initialize(_Parser); 220 221 // Initialize the set of available features. 222 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 223 224 // Not in an ITBlock to start with. 225 ITState.CurPosition = ~0U; 226 } 227 228 // Implementation of the MCTargetAsmParser interface: 229 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 230 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 231 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 232 bool ParseDirective(AsmToken DirectiveID); 233 234 unsigned checkTargetMatchPredicate(MCInst &Inst); 235 236 bool MatchAndEmitInstruction(SMLoc IDLoc, 237 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 238 MCStreamer &Out); 239}; 240} // end anonymous namespace 241 242namespace { 243 244/// ARMOperand - Instances of this class represent a parsed ARM machine 245/// instruction. 246class ARMOperand : public MCParsedAsmOperand { 247 enum KindTy { 248 k_CondCode, 249 k_CCOut, 250 k_ITCondMask, 251 k_CoprocNum, 252 k_CoprocReg, 253 k_CoprocOption, 254 k_Immediate, 255 k_FPImmediate, 256 k_MemBarrierOpt, 257 k_Memory, 258 k_PostIndexRegister, 259 k_MSRMask, 260 k_ProcIFlags, 261 k_VectorIndex, 262 k_Register, 263 k_RegisterList, 264 k_DPRRegisterList, 265 k_SPRRegisterList, 266 k_VectorList, 267 k_ShiftedRegister, 268 k_ShiftedImmediate, 269 k_ShifterImmediate, 270 k_RotateImmediate, 271 k_BitfieldDescriptor, 272 k_Token 273 } Kind; 274 275 SMLoc StartLoc, EndLoc; 276 SmallVector<unsigned, 8> Registers; 277 278 union { 279 struct { 280 ARMCC::CondCodes Val; 281 } CC; 282 283 struct { 284 unsigned Val; 285 } Cop; 286 287 struct { 288 unsigned Val; 289 } CoprocOption; 290 291 struct { 292 unsigned Mask:4; 293 } ITMask; 294 295 struct { 296 ARM_MB::MemBOpt Val; 297 } MBOpt; 298 299 struct { 300 ARM_PROC::IFlags Val; 301 } IFlags; 302 303 struct { 304 unsigned Val; 305 } MMask; 306 307 struct { 308 const char *Data; 309 unsigned Length; 310 } Tok; 311 312 struct { 313 unsigned RegNum; 314 } Reg; 315 316 // A vector register list is a sequential list of 1 to 4 registers. 317 struct { 318 unsigned RegNum; 319 unsigned Count; 320 } VectorList; 321 322 struct { 323 unsigned Val; 324 } VectorIndex; 325 326 struct { 327 const MCExpr *Val; 328 } Imm; 329 330 struct { 331 unsigned Val; // encoded 8-bit representation 332 } FPImm; 333 334 /// Combined record for all forms of ARM address expressions. 335 struct { 336 unsigned BaseRegNum; 337 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 338 // was specified. 339 const MCConstantExpr *OffsetImm; // Offset immediate value 340 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 341 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 342 unsigned ShiftImm; // shift for OffsetReg. 343 unsigned Alignment; // 0 = no alignment specified 344 // n = alignment in bytes (8, 16, or 32) 345 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 346 } Memory; 347 348 struct { 349 unsigned RegNum; 350 bool isAdd; 351 ARM_AM::ShiftOpc ShiftTy; 352 unsigned ShiftImm; 353 } PostIdxReg; 354 355 struct { 356 bool isASR; 357 unsigned Imm; 358 } ShifterImm; 359 struct { 360 ARM_AM::ShiftOpc ShiftTy; 361 unsigned SrcReg; 362 unsigned ShiftReg; 363 unsigned ShiftImm; 364 } RegShiftedReg; 365 struct { 366 ARM_AM::ShiftOpc ShiftTy; 367 unsigned SrcReg; 368 unsigned ShiftImm; 369 } RegShiftedImm; 370 struct { 371 unsigned Imm; 372 } RotImm; 373 struct { 374 unsigned LSB; 375 unsigned Width; 376 } Bitfield; 377 }; 378 379 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 380public: 381 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 382 Kind = o.Kind; 383 StartLoc = o.StartLoc; 384 EndLoc = o.EndLoc; 385 switch (Kind) { 386 case k_CondCode: 387 CC = o.CC; 388 break; 389 case k_ITCondMask: 390 ITMask = o.ITMask; 391 break; 392 case k_Token: 393 Tok = o.Tok; 394 break; 395 case k_CCOut: 396 case k_Register: 397 Reg = o.Reg; 398 break; 399 case k_RegisterList: 400 case k_DPRRegisterList: 401 case k_SPRRegisterList: 402 Registers = o.Registers; 403 break; 404 case k_VectorList: 405 VectorList = o.VectorList; 406 break; 407 case k_CoprocNum: 408 case k_CoprocReg: 409 Cop = o.Cop; 410 break; 411 case k_CoprocOption: 412 CoprocOption = o.CoprocOption; 413 break; 414 case k_Immediate: 415 Imm = o.Imm; 416 break; 417 case k_FPImmediate: 418 FPImm = o.FPImm; 419 break; 420 case k_MemBarrierOpt: 421 MBOpt = o.MBOpt; 422 break; 423 case k_Memory: 424 Memory = o.Memory; 425 break; 426 case k_PostIndexRegister: 427 PostIdxReg = o.PostIdxReg; 428 break; 429 case k_MSRMask: 430 MMask = o.MMask; 431 break; 432 case k_ProcIFlags: 433 IFlags = o.IFlags; 434 break; 435 case k_ShifterImmediate: 436 ShifterImm = o.ShifterImm; 437 break; 438 case k_ShiftedRegister: 439 RegShiftedReg = o.RegShiftedReg; 440 break; 441 case k_ShiftedImmediate: 442 RegShiftedImm = o.RegShiftedImm; 443 break; 444 case k_RotateImmediate: 445 RotImm = o.RotImm; 446 break; 447 case k_BitfieldDescriptor: 448 Bitfield = o.Bitfield; 449 break; 450 case k_VectorIndex: 451 VectorIndex = o.VectorIndex; 452 break; 453 } 454 } 455 456 /// getStartLoc - Get the location of the first token of this operand. 457 SMLoc getStartLoc() const { return StartLoc; } 458 /// getEndLoc - Get the location of the last token of this operand. 459 SMLoc getEndLoc() const { return EndLoc; } 460 461 ARMCC::CondCodes getCondCode() const { 462 assert(Kind == k_CondCode && "Invalid access!"); 463 return CC.Val; 464 } 465 466 unsigned getCoproc() const { 467 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 468 return Cop.Val; 469 } 470 471 StringRef getToken() const { 472 assert(Kind == k_Token && "Invalid access!"); 473 return StringRef(Tok.Data, Tok.Length); 474 } 475 476 unsigned getReg() const { 477 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 478 return Reg.RegNum; 479 } 480 481 const SmallVectorImpl<unsigned> &getRegList() const { 482 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 483 Kind == k_SPRRegisterList) && "Invalid access!"); 484 return Registers; 485 } 486 487 const MCExpr *getImm() const { 488 assert(Kind == k_Immediate && "Invalid access!"); 489 return Imm.Val; 490 } 491 492 unsigned getFPImm() const { 493 assert(Kind == k_FPImmediate && "Invalid access!"); 494 return FPImm.Val; 495 } 496 497 unsigned getVectorIndex() const { 498 assert(Kind == k_VectorIndex && "Invalid access!"); 499 return VectorIndex.Val; 500 } 501 502 ARM_MB::MemBOpt getMemBarrierOpt() const { 503 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 504 return MBOpt.Val; 505 } 506 507 ARM_PROC::IFlags getProcIFlags() const { 508 assert(Kind == k_ProcIFlags && "Invalid access!"); 509 return IFlags.Val; 510 } 511 512 unsigned getMSRMask() const { 513 assert(Kind == k_MSRMask && "Invalid access!"); 514 return MMask.Val; 515 } 516 517 bool isCoprocNum() const { return Kind == k_CoprocNum; } 518 bool isCoprocReg() const { return Kind == k_CoprocReg; } 519 bool isCoprocOption() const { return Kind == k_CoprocOption; } 520 bool isCondCode() const { return Kind == k_CondCode; } 521 bool isCCOut() const { return Kind == k_CCOut; } 522 bool isITMask() const { return Kind == k_ITCondMask; } 523 bool isITCondCode() const { return Kind == k_CondCode; } 524 bool isImm() const { return Kind == k_Immediate; } 525 bool isFPImm() const { return Kind == k_FPImmediate; } 526 bool isImm8s4() const { 527 if (Kind != k_Immediate) 528 return false; 529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 530 if (!CE) return false; 531 int64_t Value = CE->getValue(); 532 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 533 } 534 bool isImm0_1020s4() const { 535 if (Kind != k_Immediate) 536 return false; 537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 538 if (!CE) return false; 539 int64_t Value = CE->getValue(); 540 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 541 } 542 bool isImm0_508s4() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 549 } 550 bool isImm0_255() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return Value >= 0 && Value < 256; 557 } 558 bool isImm0_7() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return Value >= 0 && Value < 8; 565 } 566 bool isImm0_15() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value >= 0 && Value < 16; 573 } 574 bool isImm0_31() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 32; 581 } 582 bool isImm1_16() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return Value > 0 && Value < 17; 589 } 590 bool isImm1_32() const { 591 if (Kind != k_Immediate) 592 return false; 593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 594 if (!CE) return false; 595 int64_t Value = CE->getValue(); 596 return Value > 0 && Value < 33; 597 } 598 bool isImm0_65535() const { 599 if (Kind != k_Immediate) 600 return false; 601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 602 if (!CE) return false; 603 int64_t Value = CE->getValue(); 604 return Value >= 0 && Value < 65536; 605 } 606 bool isImm0_65535Expr() const { 607 if (Kind != k_Immediate) 608 return false; 609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 610 // If it's not a constant expression, it'll generate a fixup and be 611 // handled later. 612 if (!CE) return true; 613 int64_t Value = CE->getValue(); 614 return Value >= 0 && Value < 65536; 615 } 616 bool isImm24bit() const { 617 if (Kind != k_Immediate) 618 return false; 619 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 620 if (!CE) return false; 621 int64_t Value = CE->getValue(); 622 return Value >= 0 && Value <= 0xffffff; 623 } 624 bool isImmThumbSR() const { 625 if (Kind != k_Immediate) 626 return false; 627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 628 if (!CE) return false; 629 int64_t Value = CE->getValue(); 630 return Value > 0 && Value < 33; 631 } 632 bool isPKHLSLImm() const { 633 if (Kind != k_Immediate) 634 return false; 635 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 636 if (!CE) return false; 637 int64_t Value = CE->getValue(); 638 return Value >= 0 && Value < 32; 639 } 640 bool isPKHASRImm() const { 641 if (Kind != k_Immediate) 642 return false; 643 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 644 if (!CE) return false; 645 int64_t Value = CE->getValue(); 646 return Value > 0 && Value <= 32; 647 } 648 bool isARMSOImm() const { 649 if (Kind != k_Immediate) 650 return false; 651 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 652 if (!CE) return false; 653 int64_t Value = CE->getValue(); 654 return ARM_AM::getSOImmVal(Value) != -1; 655 } 656 bool isT2SOImm() const { 657 if (Kind != k_Immediate) 658 return false; 659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 660 if (!CE) return false; 661 int64_t Value = CE->getValue(); 662 return ARM_AM::getT2SOImmVal(Value) != -1; 663 } 664 bool isSetEndImm() const { 665 if (Kind != k_Immediate) 666 return false; 667 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 668 if (!CE) return false; 669 int64_t Value = CE->getValue(); 670 return Value == 1 || Value == 0; 671 } 672 bool isReg() const { return Kind == k_Register; } 673 bool isRegList() const { return Kind == k_RegisterList; } 674 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 675 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 676 bool isToken() const { return Kind == k_Token; } 677 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 678 bool isMemory() const { return Kind == k_Memory; } 679 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 680 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 681 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 682 bool isRotImm() const { return Kind == k_RotateImmediate; } 683 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 684 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 685 bool isPostIdxReg() const { 686 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 687 } 688 bool isMemNoOffset(bool alignOK = false) const { 689 if (!isMemory()) 690 return false; 691 // No offset of any kind. 692 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 693 (alignOK || Memory.Alignment == 0); 694 } 695 bool isAlignedMemory() const { 696 return isMemNoOffset(true); 697 } 698 bool isAddrMode2() const { 699 if (!isMemory() || Memory.Alignment != 0) return false; 700 // Check for register offset. 701 if (Memory.OffsetRegNum) return true; 702 // Immediate offset in range [-4095, 4095]. 703 if (!Memory.OffsetImm) return true; 704 int64_t Val = Memory.OffsetImm->getValue(); 705 return Val > -4096 && Val < 4096; 706 } 707 bool isAM2OffsetImm() const { 708 if (Kind != k_Immediate) 709 return false; 710 // Immediate offset in range [-4095, 4095]. 711 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 712 if (!CE) return false; 713 int64_t Val = CE->getValue(); 714 return Val > -4096 && Val < 4096; 715 } 716 bool isAddrMode3() const { 717 if (!isMemory() || Memory.Alignment != 0) return false; 718 // No shifts are legal for AM3. 719 if (Memory.ShiftType != ARM_AM::no_shift) return false; 720 // Check for register offset. 721 if (Memory.OffsetRegNum) return true; 722 // Immediate offset in range [-255, 255]. 723 if (!Memory.OffsetImm) return true; 724 int64_t Val = Memory.OffsetImm->getValue(); 725 return Val > -256 && Val < 256; 726 } 727 bool isAM3Offset() const { 728 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 729 return false; 730 if (Kind == k_PostIndexRegister) 731 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 732 // Immediate offset in range [-255, 255]. 733 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 734 if (!CE) return false; 735 int64_t Val = CE->getValue(); 736 // Special case, #-0 is INT32_MIN. 737 return (Val > -256 && Val < 256) || Val == INT32_MIN; 738 } 739 bool isAddrMode5() const { 740 if (!isMemory() || Memory.Alignment != 0) return false; 741 // Check for register offset. 742 if (Memory.OffsetRegNum) return false; 743 // Immediate offset in range [-1020, 1020] and a multiple of 4. 744 if (!Memory.OffsetImm) return true; 745 int64_t Val = Memory.OffsetImm->getValue(); 746 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 747 Val == INT32_MIN; 748 } 749 bool isMemTBB() const { 750 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 751 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 752 return false; 753 return true; 754 } 755 bool isMemTBH() const { 756 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 757 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 758 Memory.Alignment != 0 ) 759 return false; 760 return true; 761 } 762 bool isMemRegOffset() const { 763 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 764 return false; 765 return true; 766 } 767 bool isT2MemRegOffset() const { 768 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 769 Memory.Alignment != 0) 770 return false; 771 // Only lsl #{0, 1, 2, 3} allowed. 772 if (Memory.ShiftType == ARM_AM::no_shift) 773 return true; 774 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 775 return false; 776 return true; 777 } 778 bool isMemThumbRR() const { 779 // Thumb reg+reg addressing is simple. Just two registers, a base and 780 // an offset. No shifts, negations or any other complicating factors. 781 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 782 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 783 return false; 784 return isARMLowRegister(Memory.BaseRegNum) && 785 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 786 } 787 bool isMemThumbRIs4() const { 788 if (!isMemory() || Memory.OffsetRegNum != 0 || 789 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 790 return false; 791 // Immediate offset, multiple of 4 in range [0, 124]. 792 if (!Memory.OffsetImm) return true; 793 int64_t Val = Memory.OffsetImm->getValue(); 794 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 795 } 796 bool isMemThumbRIs2() const { 797 if (!isMemory() || Memory.OffsetRegNum != 0 || 798 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 799 return false; 800 // Immediate offset, multiple of 4 in range [0, 62]. 801 if (!Memory.OffsetImm) return true; 802 int64_t Val = Memory.OffsetImm->getValue(); 803 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 804 } 805 bool isMemThumbRIs1() const { 806 if (!isMemory() || Memory.OffsetRegNum != 0 || 807 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 808 return false; 809 // Immediate offset in range [0, 31]. 810 if (!Memory.OffsetImm) return true; 811 int64_t Val = Memory.OffsetImm->getValue(); 812 return Val >= 0 && Val <= 31; 813 } 814 bool isMemThumbSPI() const { 815 if (!isMemory() || Memory.OffsetRegNum != 0 || 816 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 817 return false; 818 // Immediate offset, multiple of 4 in range [0, 1020]. 819 if (!Memory.OffsetImm) return true; 820 int64_t Val = Memory.OffsetImm->getValue(); 821 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 822 } 823 bool isMemImm8s4Offset() const { 824 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 825 return false; 826 // Immediate offset a multiple of 4 in range [-1020, 1020]. 827 if (!Memory.OffsetImm) return true; 828 int64_t Val = Memory.OffsetImm->getValue(); 829 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 830 } 831 bool isMemImm0_1020s4Offset() const { 832 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 833 return false; 834 // Immediate offset a multiple of 4 in range [0, 1020]. 835 if (!Memory.OffsetImm) return true; 836 int64_t Val = Memory.OffsetImm->getValue(); 837 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 838 } 839 bool isMemImm8Offset() const { 840 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 841 return false; 842 // Immediate offset in range [-255, 255]. 843 if (!Memory.OffsetImm) return true; 844 int64_t Val = Memory.OffsetImm->getValue(); 845 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 846 } 847 bool isMemPosImm8Offset() const { 848 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 849 return false; 850 // Immediate offset in range [0, 255]. 851 if (!Memory.OffsetImm) return true; 852 int64_t Val = Memory.OffsetImm->getValue(); 853 return Val >= 0 && Val < 256; 854 } 855 bool isMemNegImm8Offset() const { 856 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 857 return false; 858 // Immediate offset in range [-255, -1]. 859 if (!Memory.OffsetImm) return true; 860 int64_t Val = Memory.OffsetImm->getValue(); 861 return Val > -256 && Val < 0; 862 } 863 bool isMemUImm12Offset() const { 864 // If we have an immediate that's not a constant, treat it as a label 865 // reference needing a fixup. If it is a constant, it's something else 866 // and we reject it. 867 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 868 return true; 869 870 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 871 return false; 872 // Immediate offset in range [0, 4095]. 873 if (!Memory.OffsetImm) return true; 874 int64_t Val = Memory.OffsetImm->getValue(); 875 return (Val >= 0 && Val < 4096); 876 } 877 bool isMemImm12Offset() const { 878 // If we have an immediate that's not a constant, treat it as a label 879 // reference needing a fixup. If it is a constant, it's something else 880 // and we reject it. 881 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 882 return true; 883 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [-4095, 4095]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 890 } 891 bool isPostIdxImm8() const { 892 if (Kind != k_Immediate) 893 return false; 894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 895 if (!CE) return false; 896 int64_t Val = CE->getValue(); 897 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 898 } 899 bool isPostIdxImm8s4() const { 900 if (Kind != k_Immediate) 901 return false; 902 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 903 if (!CE) return false; 904 int64_t Val = CE->getValue(); 905 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 906 (Val == INT32_MIN); 907 } 908 909 bool isMSRMask() const { return Kind == k_MSRMask; } 910 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 911 912 // NEON operands. 913 bool isVecListOneD() const { 914 if (Kind != k_VectorList) return false; 915 return VectorList.Count == 1; 916 } 917 918 bool isVectorIndex8() const { 919 if (Kind != k_VectorIndex) return false; 920 return VectorIndex.Val < 8; 921 } 922 bool isVectorIndex16() const { 923 if (Kind != k_VectorIndex) return false; 924 return VectorIndex.Val < 4; 925 } 926 bool isVectorIndex32() const { 927 if (Kind != k_VectorIndex) return false; 928 return VectorIndex.Val < 2; 929 } 930 931 bool isNEONi8splat() const { 932 if (Kind != k_Immediate) 933 return false; 934 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 935 // Must be a constant. 936 if (!CE) return false; 937 int64_t Value = CE->getValue(); 938 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 939 // value. 940 return Value >= 0 && Value < 256; 941 } 942 943 bool isNEONi16splat() const { 944 if (Kind != k_Immediate) 945 return false; 946 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 947 // Must be a constant. 948 if (!CE) return false; 949 int64_t Value = CE->getValue(); 950 // i16 value in the range [0,255] or [0x0100, 0xff00] 951 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 952 } 953 954 bool isNEONi32splat() const { 955 if (Kind != k_Immediate) 956 return false; 957 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 958 // Must be a constant. 959 if (!CE) return false; 960 int64_t Value = CE->getValue(); 961 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 962 return (Value >= 0 && Value < 256) || 963 (Value >= 0x0100 && Value <= 0xff00) || 964 (Value >= 0x010000 && Value <= 0xff0000) || 965 (Value >= 0x01000000 && Value <= 0xff000000); 966 } 967 968 bool isNEONi32vmov() const { 969 if (Kind != k_Immediate) 970 return false; 971 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 972 // Must be a constant. 973 if (!CE) return false; 974 int64_t Value = CE->getValue(); 975 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 976 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 977 return (Value >= 0 && Value < 256) || 978 (Value >= 0x0100 && Value <= 0xff00) || 979 (Value >= 0x010000 && Value <= 0xff0000) || 980 (Value >= 0x01000000 && Value <= 0xff000000) || 981 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 982 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 983 } 984 985 bool isNEONi64splat() const { 986 if (Kind != k_Immediate) 987 return false; 988 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 989 // Must be a constant. 990 if (!CE) return false; 991 uint64_t Value = CE->getValue(); 992 // i64 value with each byte being either 0 or 0xff. 993 for (unsigned i = 0; i < 8; ++i) 994 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 995 return true; 996 } 997 998 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 999 // Add as immediates when possible. Null MCExpr = 0. 1000 if (Expr == 0) 1001 Inst.addOperand(MCOperand::CreateImm(0)); 1002 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1003 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1004 else 1005 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1006 } 1007 1008 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1009 assert(N == 2 && "Invalid number of operands!"); 1010 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1011 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1012 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1013 } 1014 1015 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1016 assert(N == 1 && "Invalid number of operands!"); 1017 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1018 } 1019 1020 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1021 assert(N == 1 && "Invalid number of operands!"); 1022 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1023 } 1024 1025 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1026 assert(N == 1 && "Invalid number of operands!"); 1027 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1028 } 1029 1030 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1031 assert(N == 1 && "Invalid number of operands!"); 1032 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1033 } 1034 1035 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1036 assert(N == 1 && "Invalid number of operands!"); 1037 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1038 } 1039 1040 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1041 assert(N == 1 && "Invalid number of operands!"); 1042 Inst.addOperand(MCOperand::CreateReg(getReg())); 1043 } 1044 1045 void addRegOperands(MCInst &Inst, unsigned N) const { 1046 assert(N == 1 && "Invalid number of operands!"); 1047 Inst.addOperand(MCOperand::CreateReg(getReg())); 1048 } 1049 1050 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1051 assert(N == 3 && "Invalid number of operands!"); 1052 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1053 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1054 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1055 Inst.addOperand(MCOperand::CreateImm( 1056 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1057 } 1058 1059 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1060 assert(N == 2 && "Invalid number of operands!"); 1061 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1062 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1063 Inst.addOperand(MCOperand::CreateImm( 1064 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1065 } 1066 1067 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1068 assert(N == 1 && "Invalid number of operands!"); 1069 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1070 ShifterImm.Imm)); 1071 } 1072 1073 void addRegListOperands(MCInst &Inst, unsigned N) const { 1074 assert(N == 1 && "Invalid number of operands!"); 1075 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1076 for (SmallVectorImpl<unsigned>::const_iterator 1077 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1078 Inst.addOperand(MCOperand::CreateReg(*I)); 1079 } 1080 1081 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1082 addRegListOperands(Inst, N); 1083 } 1084 1085 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1086 addRegListOperands(Inst, N); 1087 } 1088 1089 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1090 assert(N == 1 && "Invalid number of operands!"); 1091 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1092 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1093 } 1094 1095 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1096 assert(N == 1 && "Invalid number of operands!"); 1097 // Munge the lsb/width into a bitfield mask. 1098 unsigned lsb = Bitfield.LSB; 1099 unsigned width = Bitfield.Width; 1100 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1101 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1102 (32 - (lsb + width))); 1103 Inst.addOperand(MCOperand::CreateImm(Mask)); 1104 } 1105 1106 void addImmOperands(MCInst &Inst, unsigned N) const { 1107 assert(N == 1 && "Invalid number of operands!"); 1108 addExpr(Inst, getImm()); 1109 } 1110 1111 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1112 assert(N == 1 && "Invalid number of operands!"); 1113 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1114 } 1115 1116 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1117 assert(N == 1 && "Invalid number of operands!"); 1118 // FIXME: We really want to scale the value here, but the LDRD/STRD 1119 // instruction don't encode operands that way yet. 1120 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1121 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1122 } 1123 1124 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1125 assert(N == 1 && "Invalid number of operands!"); 1126 // The immediate is scaled by four in the encoding and is stored 1127 // in the MCInst as such. Lop off the low two bits here. 1128 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1129 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1130 } 1131 1132 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1133 assert(N == 1 && "Invalid number of operands!"); 1134 // The immediate is scaled by four in the encoding and is stored 1135 // in the MCInst as such. Lop off the low two bits here. 1136 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1137 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1138 } 1139 1140 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1141 assert(N == 1 && "Invalid number of operands!"); 1142 addExpr(Inst, getImm()); 1143 } 1144 1145 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1146 assert(N == 1 && "Invalid number of operands!"); 1147 addExpr(Inst, getImm()); 1148 } 1149 1150 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1151 assert(N == 1 && "Invalid number of operands!"); 1152 addExpr(Inst, getImm()); 1153 } 1154 1155 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1156 assert(N == 1 && "Invalid number of operands!"); 1157 addExpr(Inst, getImm()); 1158 } 1159 1160 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 // The constant encodes as the immediate-1, and we store in the instruction 1163 // the bits as encoded, so subtract off one here. 1164 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1165 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1166 } 1167 1168 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1169 assert(N == 1 && "Invalid number of operands!"); 1170 // The constant encodes as the immediate-1, and we store in the instruction 1171 // the bits as encoded, so subtract off one here. 1172 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1173 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1174 } 1175 1176 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1177 assert(N == 1 && "Invalid number of operands!"); 1178 addExpr(Inst, getImm()); 1179 } 1180 1181 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1182 assert(N == 1 && "Invalid number of operands!"); 1183 addExpr(Inst, getImm()); 1184 } 1185 1186 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1187 assert(N == 1 && "Invalid number of operands!"); 1188 addExpr(Inst, getImm()); 1189 } 1190 1191 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1192 assert(N == 1 && "Invalid number of operands!"); 1193 // The constant encodes as the immediate, except for 32, which encodes as 1194 // zero. 1195 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1196 unsigned Imm = CE->getValue(); 1197 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1198 } 1199 1200 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1201 assert(N == 1 && "Invalid number of operands!"); 1202 addExpr(Inst, getImm()); 1203 } 1204 1205 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1206 assert(N == 1 && "Invalid number of operands!"); 1207 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1208 // the instruction as well. 1209 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1210 int Val = CE->getValue(); 1211 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1212 } 1213 1214 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1215 assert(N == 1 && "Invalid number of operands!"); 1216 addExpr(Inst, getImm()); 1217 } 1218 1219 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1220 assert(N == 1 && "Invalid number of operands!"); 1221 addExpr(Inst, getImm()); 1222 } 1223 1224 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1225 assert(N == 1 && "Invalid number of operands!"); 1226 addExpr(Inst, getImm()); 1227 } 1228 1229 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1230 assert(N == 1 && "Invalid number of operands!"); 1231 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1232 } 1233 1234 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1235 assert(N == 1 && "Invalid number of operands!"); 1236 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1237 } 1238 1239 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1240 assert(N == 2 && "Invalid number of operands!"); 1241 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1242 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1243 } 1244 1245 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1246 assert(N == 3 && "Invalid number of operands!"); 1247 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1248 if (!Memory.OffsetRegNum) { 1249 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1250 // Special case for #-0 1251 if (Val == INT32_MIN) Val = 0; 1252 if (Val < 0) Val = -Val; 1253 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1254 } else { 1255 // For register offset, we encode the shift type and negation flag 1256 // here. 1257 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1258 Memory.ShiftImm, Memory.ShiftType); 1259 } 1260 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1261 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1262 Inst.addOperand(MCOperand::CreateImm(Val)); 1263 } 1264 1265 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1266 assert(N == 2 && "Invalid number of operands!"); 1267 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1268 assert(CE && "non-constant AM2OffsetImm operand!"); 1269 int32_t Val = CE->getValue(); 1270 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1271 // Special case for #-0 1272 if (Val == INT32_MIN) Val = 0; 1273 if (Val < 0) Val = -Val; 1274 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1275 Inst.addOperand(MCOperand::CreateReg(0)); 1276 Inst.addOperand(MCOperand::CreateImm(Val)); 1277 } 1278 1279 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1280 assert(N == 3 && "Invalid number of operands!"); 1281 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1282 if (!Memory.OffsetRegNum) { 1283 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1284 // Special case for #-0 1285 if (Val == INT32_MIN) Val = 0; 1286 if (Val < 0) Val = -Val; 1287 Val = ARM_AM::getAM3Opc(AddSub, Val); 1288 } else { 1289 // For register offset, we encode the shift type and negation flag 1290 // here. 1291 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1292 } 1293 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1294 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1295 Inst.addOperand(MCOperand::CreateImm(Val)); 1296 } 1297 1298 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1299 assert(N == 2 && "Invalid number of operands!"); 1300 if (Kind == k_PostIndexRegister) { 1301 int32_t Val = 1302 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1303 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1304 Inst.addOperand(MCOperand::CreateImm(Val)); 1305 return; 1306 } 1307 1308 // Constant offset. 1309 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1310 int32_t Val = CE->getValue(); 1311 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1312 // Special case for #-0 1313 if (Val == INT32_MIN) Val = 0; 1314 if (Val < 0) Val = -Val; 1315 Val = ARM_AM::getAM3Opc(AddSub, Val); 1316 Inst.addOperand(MCOperand::CreateReg(0)); 1317 Inst.addOperand(MCOperand::CreateImm(Val)); 1318 } 1319 1320 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1321 assert(N == 2 && "Invalid number of operands!"); 1322 // The lower two bits are always zero and as such are not encoded. 1323 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1324 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1325 // Special case for #-0 1326 if (Val == INT32_MIN) Val = 0; 1327 if (Val < 0) Val = -Val; 1328 Val = ARM_AM::getAM5Opc(AddSub, Val); 1329 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1330 Inst.addOperand(MCOperand::CreateImm(Val)); 1331 } 1332 1333 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1334 assert(N == 2 && "Invalid number of operands!"); 1335 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1336 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1337 Inst.addOperand(MCOperand::CreateImm(Val)); 1338 } 1339 1340 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1341 assert(N == 2 && "Invalid number of operands!"); 1342 // The lower two bits are always zero and as such are not encoded. 1343 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1344 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1345 Inst.addOperand(MCOperand::CreateImm(Val)); 1346 } 1347 1348 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1349 assert(N == 2 && "Invalid number of operands!"); 1350 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1351 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1352 Inst.addOperand(MCOperand::CreateImm(Val)); 1353 } 1354 1355 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1356 addMemImm8OffsetOperands(Inst, N); 1357 } 1358 1359 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1360 addMemImm8OffsetOperands(Inst, N); 1361 } 1362 1363 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1364 assert(N == 2 && "Invalid number of operands!"); 1365 // If this is an immediate, it's a label reference. 1366 if (Kind == k_Immediate) { 1367 addExpr(Inst, getImm()); 1368 Inst.addOperand(MCOperand::CreateImm(0)); 1369 return; 1370 } 1371 1372 // Otherwise, it's a normal memory reg+offset. 1373 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1374 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1375 Inst.addOperand(MCOperand::CreateImm(Val)); 1376 } 1377 1378 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1379 assert(N == 2 && "Invalid number of operands!"); 1380 // If this is an immediate, it's a label reference. 1381 if (Kind == k_Immediate) { 1382 addExpr(Inst, getImm()); 1383 Inst.addOperand(MCOperand::CreateImm(0)); 1384 return; 1385 } 1386 1387 // Otherwise, it's a normal memory reg+offset. 1388 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1389 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1390 Inst.addOperand(MCOperand::CreateImm(Val)); 1391 } 1392 1393 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1394 assert(N == 2 && "Invalid number of operands!"); 1395 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1396 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1397 } 1398 1399 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1400 assert(N == 2 && "Invalid number of operands!"); 1401 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1402 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1403 } 1404 1405 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1406 assert(N == 3 && "Invalid number of operands!"); 1407 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1408 Memory.ShiftImm, Memory.ShiftType); 1409 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1410 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1411 Inst.addOperand(MCOperand::CreateImm(Val)); 1412 } 1413 1414 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1415 assert(N == 3 && "Invalid number of operands!"); 1416 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1417 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1418 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1419 } 1420 1421 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1422 assert(N == 2 && "Invalid number of operands!"); 1423 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1424 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1425 } 1426 1427 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1428 assert(N == 2 && "Invalid number of operands!"); 1429 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1430 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1431 Inst.addOperand(MCOperand::CreateImm(Val)); 1432 } 1433 1434 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1435 assert(N == 2 && "Invalid number of operands!"); 1436 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1437 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1438 Inst.addOperand(MCOperand::CreateImm(Val)); 1439 } 1440 1441 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1442 assert(N == 2 && "Invalid number of operands!"); 1443 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1444 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1445 Inst.addOperand(MCOperand::CreateImm(Val)); 1446 } 1447 1448 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1449 assert(N == 2 && "Invalid number of operands!"); 1450 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1451 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1452 Inst.addOperand(MCOperand::CreateImm(Val)); 1453 } 1454 1455 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1456 assert(N == 1 && "Invalid number of operands!"); 1457 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1458 assert(CE && "non-constant post-idx-imm8 operand!"); 1459 int Imm = CE->getValue(); 1460 bool isAdd = Imm >= 0; 1461 if (Imm == INT32_MIN) Imm = 0; 1462 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1463 Inst.addOperand(MCOperand::CreateImm(Imm)); 1464 } 1465 1466 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1467 assert(N == 1 && "Invalid number of operands!"); 1468 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1469 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1470 int Imm = CE->getValue(); 1471 bool isAdd = Imm >= 0; 1472 if (Imm == INT32_MIN) Imm = 0; 1473 // Immediate is scaled by 4. 1474 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1475 Inst.addOperand(MCOperand::CreateImm(Imm)); 1476 } 1477 1478 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1479 assert(N == 2 && "Invalid number of operands!"); 1480 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1481 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1482 } 1483 1484 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1485 assert(N == 2 && "Invalid number of operands!"); 1486 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1487 // The sign, shift type, and shift amount are encoded in a single operand 1488 // using the AM2 encoding helpers. 1489 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1490 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1491 PostIdxReg.ShiftTy); 1492 Inst.addOperand(MCOperand::CreateImm(Imm)); 1493 } 1494 1495 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1496 assert(N == 1 && "Invalid number of operands!"); 1497 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1498 } 1499 1500 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1501 assert(N == 1 && "Invalid number of operands!"); 1502 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1503 } 1504 1505 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1506 assert(N == 1 && "Invalid number of operands!"); 1507 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1508 } 1509 1510 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1511 assert(N == 1 && "Invalid number of operands!"); 1512 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1513 } 1514 1515 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1516 assert(N == 1 && "Invalid number of operands!"); 1517 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1518 } 1519 1520 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1521 assert(N == 1 && "Invalid number of operands!"); 1522 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1523 } 1524 1525 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1526 assert(N == 1 && "Invalid number of operands!"); 1527 // The immediate encodes the type of constant as well as the value. 1528 // Mask in that this is an i8 splat. 1529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1530 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1531 } 1532 1533 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1534 assert(N == 1 && "Invalid number of operands!"); 1535 // The immediate encodes the type of constant as well as the value. 1536 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1537 unsigned Value = CE->getValue(); 1538 if (Value >= 256) 1539 Value = (Value >> 8) | 0xa00; 1540 else 1541 Value |= 0x800; 1542 Inst.addOperand(MCOperand::CreateImm(Value)); 1543 } 1544 1545 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1546 assert(N == 1 && "Invalid number of operands!"); 1547 // The immediate encodes the type of constant as well as the value. 1548 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1549 unsigned Value = CE->getValue(); 1550 if (Value >= 256 && Value <= 0xff00) 1551 Value = (Value >> 8) | 0x200; 1552 else if (Value > 0xffff && Value <= 0xff0000) 1553 Value = (Value >> 16) | 0x400; 1554 else if (Value > 0xffffff) 1555 Value = (Value >> 24) | 0x600; 1556 Inst.addOperand(MCOperand::CreateImm(Value)); 1557 } 1558 1559 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1560 assert(N == 1 && "Invalid number of operands!"); 1561 // The immediate encodes the type of constant as well as the value. 1562 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1563 unsigned Value = CE->getValue(); 1564 if (Value >= 256 && Value <= 0xffff) 1565 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1566 else if (Value > 0xffff && Value <= 0xffffff) 1567 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1568 else if (Value > 0xffffff) 1569 Value = (Value >> 24) | 0x600; 1570 Inst.addOperand(MCOperand::CreateImm(Value)); 1571 } 1572 1573 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1574 assert(N == 1 && "Invalid number of operands!"); 1575 // The immediate encodes the type of constant as well as the value. 1576 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1577 uint64_t Value = CE->getValue(); 1578 unsigned Imm = 0; 1579 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1580 Imm |= (Value & 1) << i; 1581 } 1582 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1583 } 1584 1585 virtual void print(raw_ostream &OS) const; 1586 1587 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1588 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1589 Op->ITMask.Mask = Mask; 1590 Op->StartLoc = S; 1591 Op->EndLoc = S; 1592 return Op; 1593 } 1594 1595 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1596 ARMOperand *Op = new ARMOperand(k_CondCode); 1597 Op->CC.Val = CC; 1598 Op->StartLoc = S; 1599 Op->EndLoc = S; 1600 return Op; 1601 } 1602 1603 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1604 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1605 Op->Cop.Val = CopVal; 1606 Op->StartLoc = S; 1607 Op->EndLoc = S; 1608 return Op; 1609 } 1610 1611 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1612 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1613 Op->Cop.Val = CopVal; 1614 Op->StartLoc = S; 1615 Op->EndLoc = S; 1616 return Op; 1617 } 1618 1619 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1620 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1621 Op->Cop.Val = Val; 1622 Op->StartLoc = S; 1623 Op->EndLoc = E; 1624 return Op; 1625 } 1626 1627 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1628 ARMOperand *Op = new ARMOperand(k_CCOut); 1629 Op->Reg.RegNum = RegNum; 1630 Op->StartLoc = S; 1631 Op->EndLoc = S; 1632 return Op; 1633 } 1634 1635 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1636 ARMOperand *Op = new ARMOperand(k_Token); 1637 Op->Tok.Data = Str.data(); 1638 Op->Tok.Length = Str.size(); 1639 Op->StartLoc = S; 1640 Op->EndLoc = S; 1641 return Op; 1642 } 1643 1644 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1645 ARMOperand *Op = new ARMOperand(k_Register); 1646 Op->Reg.RegNum = RegNum; 1647 Op->StartLoc = S; 1648 Op->EndLoc = E; 1649 return Op; 1650 } 1651 1652 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1653 unsigned SrcReg, 1654 unsigned ShiftReg, 1655 unsigned ShiftImm, 1656 SMLoc S, SMLoc E) { 1657 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1658 Op->RegShiftedReg.ShiftTy = ShTy; 1659 Op->RegShiftedReg.SrcReg = SrcReg; 1660 Op->RegShiftedReg.ShiftReg = ShiftReg; 1661 Op->RegShiftedReg.ShiftImm = ShiftImm; 1662 Op->StartLoc = S; 1663 Op->EndLoc = E; 1664 return Op; 1665 } 1666 1667 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1668 unsigned SrcReg, 1669 unsigned ShiftImm, 1670 SMLoc S, SMLoc E) { 1671 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1672 Op->RegShiftedImm.ShiftTy = ShTy; 1673 Op->RegShiftedImm.SrcReg = SrcReg; 1674 Op->RegShiftedImm.ShiftImm = ShiftImm; 1675 Op->StartLoc = S; 1676 Op->EndLoc = E; 1677 return Op; 1678 } 1679 1680 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1681 SMLoc S, SMLoc E) { 1682 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1683 Op->ShifterImm.isASR = isASR; 1684 Op->ShifterImm.Imm = Imm; 1685 Op->StartLoc = S; 1686 Op->EndLoc = E; 1687 return Op; 1688 } 1689 1690 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1691 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1692 Op->RotImm.Imm = Imm; 1693 Op->StartLoc = S; 1694 Op->EndLoc = E; 1695 return Op; 1696 } 1697 1698 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1699 SMLoc S, SMLoc E) { 1700 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1701 Op->Bitfield.LSB = LSB; 1702 Op->Bitfield.Width = Width; 1703 Op->StartLoc = S; 1704 Op->EndLoc = E; 1705 return Op; 1706 } 1707 1708 static ARMOperand * 1709 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1710 SMLoc StartLoc, SMLoc EndLoc) { 1711 KindTy Kind = k_RegisterList; 1712 1713 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1714 Kind = k_DPRRegisterList; 1715 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1716 contains(Regs.front().first)) 1717 Kind = k_SPRRegisterList; 1718 1719 ARMOperand *Op = new ARMOperand(Kind); 1720 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1721 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1722 Op->Registers.push_back(I->first); 1723 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1724 Op->StartLoc = StartLoc; 1725 Op->EndLoc = EndLoc; 1726 return Op; 1727 } 1728 1729 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1730 SMLoc S, SMLoc E) { 1731 ARMOperand *Op = new ARMOperand(k_VectorList); 1732 Op->VectorList.RegNum = RegNum; 1733 Op->VectorList.Count = Count; 1734 Op->StartLoc = S; 1735 Op->EndLoc = E; 1736 return Op; 1737 } 1738 1739 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1740 MCContext &Ctx) { 1741 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1742 Op->VectorIndex.Val = Idx; 1743 Op->StartLoc = S; 1744 Op->EndLoc = E; 1745 return Op; 1746 } 1747 1748 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1749 ARMOperand *Op = new ARMOperand(k_Immediate); 1750 Op->Imm.Val = Val; 1751 Op->StartLoc = S; 1752 Op->EndLoc = E; 1753 return Op; 1754 } 1755 1756 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1757 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1758 Op->FPImm.Val = Val; 1759 Op->StartLoc = S; 1760 Op->EndLoc = S; 1761 return Op; 1762 } 1763 1764 static ARMOperand *CreateMem(unsigned BaseRegNum, 1765 const MCConstantExpr *OffsetImm, 1766 unsigned OffsetRegNum, 1767 ARM_AM::ShiftOpc ShiftType, 1768 unsigned ShiftImm, 1769 unsigned Alignment, 1770 bool isNegative, 1771 SMLoc S, SMLoc E) { 1772 ARMOperand *Op = new ARMOperand(k_Memory); 1773 Op->Memory.BaseRegNum = BaseRegNum; 1774 Op->Memory.OffsetImm = OffsetImm; 1775 Op->Memory.OffsetRegNum = OffsetRegNum; 1776 Op->Memory.ShiftType = ShiftType; 1777 Op->Memory.ShiftImm = ShiftImm; 1778 Op->Memory.Alignment = Alignment; 1779 Op->Memory.isNegative = isNegative; 1780 Op->StartLoc = S; 1781 Op->EndLoc = E; 1782 return Op; 1783 } 1784 1785 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1786 ARM_AM::ShiftOpc ShiftTy, 1787 unsigned ShiftImm, 1788 SMLoc S, SMLoc E) { 1789 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1790 Op->PostIdxReg.RegNum = RegNum; 1791 Op->PostIdxReg.isAdd = isAdd; 1792 Op->PostIdxReg.ShiftTy = ShiftTy; 1793 Op->PostIdxReg.ShiftImm = ShiftImm; 1794 Op->StartLoc = S; 1795 Op->EndLoc = E; 1796 return Op; 1797 } 1798 1799 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1800 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1801 Op->MBOpt.Val = Opt; 1802 Op->StartLoc = S; 1803 Op->EndLoc = S; 1804 return Op; 1805 } 1806 1807 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1808 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1809 Op->IFlags.Val = IFlags; 1810 Op->StartLoc = S; 1811 Op->EndLoc = S; 1812 return Op; 1813 } 1814 1815 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1816 ARMOperand *Op = new ARMOperand(k_MSRMask); 1817 Op->MMask.Val = MMask; 1818 Op->StartLoc = S; 1819 Op->EndLoc = S; 1820 return Op; 1821 } 1822}; 1823 1824} // end anonymous namespace. 1825 1826void ARMOperand::print(raw_ostream &OS) const { 1827 switch (Kind) { 1828 case k_FPImmediate: 1829 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1830 << ") >"; 1831 break; 1832 case k_CondCode: 1833 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1834 break; 1835 case k_CCOut: 1836 OS << "<ccout " << getReg() << ">"; 1837 break; 1838 case k_ITCondMask: { 1839 static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)", 1840 "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)", 1841 "(tee)", "(eee)" }; 1842 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1843 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1844 break; 1845 } 1846 case k_CoprocNum: 1847 OS << "<coprocessor number: " << getCoproc() << ">"; 1848 break; 1849 case k_CoprocReg: 1850 OS << "<coprocessor register: " << getCoproc() << ">"; 1851 break; 1852 case k_CoprocOption: 1853 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1854 break; 1855 case k_MSRMask: 1856 OS << "<mask: " << getMSRMask() << ">"; 1857 break; 1858 case k_Immediate: 1859 getImm()->print(OS); 1860 break; 1861 case k_MemBarrierOpt: 1862 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1863 break; 1864 case k_Memory: 1865 OS << "<memory " 1866 << " base:" << Memory.BaseRegNum; 1867 OS << ">"; 1868 break; 1869 case k_PostIndexRegister: 1870 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1871 << PostIdxReg.RegNum; 1872 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1873 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1874 << PostIdxReg.ShiftImm; 1875 OS << ">"; 1876 break; 1877 case k_ProcIFlags: { 1878 OS << "<ARM_PROC::"; 1879 unsigned IFlags = getProcIFlags(); 1880 for (int i=2; i >= 0; --i) 1881 if (IFlags & (1 << i)) 1882 OS << ARM_PROC::IFlagsToString(1 << i); 1883 OS << ">"; 1884 break; 1885 } 1886 case k_Register: 1887 OS << "<register " << getReg() << ">"; 1888 break; 1889 case k_ShifterImmediate: 1890 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1891 << " #" << ShifterImm.Imm << ">"; 1892 break; 1893 case k_ShiftedRegister: 1894 OS << "<so_reg_reg " 1895 << RegShiftedReg.SrcReg 1896 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1897 << ", " << RegShiftedReg.ShiftReg << ", " 1898 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1899 << ">"; 1900 break; 1901 case k_ShiftedImmediate: 1902 OS << "<so_reg_imm " 1903 << RegShiftedImm.SrcReg 1904 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1905 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1906 << ">"; 1907 break; 1908 case k_RotateImmediate: 1909 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1910 break; 1911 case k_BitfieldDescriptor: 1912 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1913 << ", width: " << Bitfield.Width << ">"; 1914 break; 1915 case k_RegisterList: 1916 case k_DPRRegisterList: 1917 case k_SPRRegisterList: { 1918 OS << "<register_list "; 1919 1920 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1921 for (SmallVectorImpl<unsigned>::const_iterator 1922 I = RegList.begin(), E = RegList.end(); I != E; ) { 1923 OS << *I; 1924 if (++I < E) OS << ", "; 1925 } 1926 1927 OS << ">"; 1928 break; 1929 } 1930 case k_VectorList: 1931 OS << "<vector_list " << VectorList.Count << " * " 1932 << VectorList.RegNum << ">"; 1933 break; 1934 case k_Token: 1935 OS << "'" << getToken() << "'"; 1936 break; 1937 case k_VectorIndex: 1938 OS << "<vectorindex " << getVectorIndex() << ">"; 1939 break; 1940 } 1941} 1942 1943/// @name Auto-generated Match Functions 1944/// { 1945 1946static unsigned MatchRegisterName(StringRef Name); 1947 1948/// } 1949 1950bool ARMAsmParser::ParseRegister(unsigned &RegNo, 1951 SMLoc &StartLoc, SMLoc &EndLoc) { 1952 RegNo = tryParseRegister(); 1953 1954 return (RegNo == (unsigned)-1); 1955} 1956 1957/// Try to parse a register name. The token must be an Identifier when called, 1958/// and if it is a register name the token is eaten and the register number is 1959/// returned. Otherwise return -1. 1960/// 1961int ARMAsmParser::tryParseRegister() { 1962 const AsmToken &Tok = Parser.getTok(); 1963 if (Tok.isNot(AsmToken::Identifier)) return -1; 1964 1965 // FIXME: Validate register for the current architecture; we have to do 1966 // validation later, so maybe there is no need for this here. 1967 std::string upperCase = Tok.getString().str(); 1968 std::string lowerCase = LowercaseString(upperCase); 1969 unsigned RegNum = MatchRegisterName(lowerCase); 1970 if (!RegNum) { 1971 RegNum = StringSwitch<unsigned>(lowerCase) 1972 .Case("r13", ARM::SP) 1973 .Case("r14", ARM::LR) 1974 .Case("r15", ARM::PC) 1975 .Case("ip", ARM::R12) 1976 .Default(0); 1977 } 1978 if (!RegNum) return -1; 1979 1980 Parser.Lex(); // Eat identifier token. 1981 1982 return RegNum; 1983} 1984 1985// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 1986// If a recoverable error occurs, return 1. If an irrecoverable error 1987// occurs, return -1. An irrecoverable error is one where tokens have been 1988// consumed in the process of trying to parse the shifter (i.e., when it is 1989// indeed a shifter operand, but malformed). 1990int ARMAsmParser::tryParseShiftRegister( 1991 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1992 SMLoc S = Parser.getTok().getLoc(); 1993 const AsmToken &Tok = Parser.getTok(); 1994 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 1995 1996 std::string upperCase = Tok.getString().str(); 1997 std::string lowerCase = LowercaseString(upperCase); 1998 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 1999 .Case("lsl", ARM_AM::lsl) 2000 .Case("lsr", ARM_AM::lsr) 2001 .Case("asr", ARM_AM::asr) 2002 .Case("ror", ARM_AM::ror) 2003 .Case("rrx", ARM_AM::rrx) 2004 .Default(ARM_AM::no_shift); 2005 2006 if (ShiftTy == ARM_AM::no_shift) 2007 return 1; 2008 2009 Parser.Lex(); // Eat the operator. 2010 2011 // The source register for the shift has already been added to the 2012 // operand list, so we need to pop it off and combine it into the shifted 2013 // register operand instead. 2014 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2015 if (!PrevOp->isReg()) 2016 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2017 int SrcReg = PrevOp->getReg(); 2018 int64_t Imm = 0; 2019 int ShiftReg = 0; 2020 if (ShiftTy == ARM_AM::rrx) { 2021 // RRX Doesn't have an explicit shift amount. The encoder expects 2022 // the shift register to be the same as the source register. Seems odd, 2023 // but OK. 2024 ShiftReg = SrcReg; 2025 } else { 2026 // Figure out if this is shifted by a constant or a register (for non-RRX). 2027 if (Parser.getTok().is(AsmToken::Hash)) { 2028 Parser.Lex(); // Eat hash. 2029 SMLoc ImmLoc = Parser.getTok().getLoc(); 2030 const MCExpr *ShiftExpr = 0; 2031 if (getParser().ParseExpression(ShiftExpr)) { 2032 Error(ImmLoc, "invalid immediate shift value"); 2033 return -1; 2034 } 2035 // The expression must be evaluatable as an immediate. 2036 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2037 if (!CE) { 2038 Error(ImmLoc, "invalid immediate shift value"); 2039 return -1; 2040 } 2041 // Range check the immediate. 2042 // lsl, ror: 0 <= imm <= 31 2043 // lsr, asr: 0 <= imm <= 32 2044 Imm = CE->getValue(); 2045 if (Imm < 0 || 2046 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2047 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2048 Error(ImmLoc, "immediate shift value out of range"); 2049 return -1; 2050 } 2051 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2052 ShiftReg = tryParseRegister(); 2053 SMLoc L = Parser.getTok().getLoc(); 2054 if (ShiftReg == -1) { 2055 Error (L, "expected immediate or register in shift operand"); 2056 return -1; 2057 } 2058 } else { 2059 Error (Parser.getTok().getLoc(), 2060 "expected immediate or register in shift operand"); 2061 return -1; 2062 } 2063 } 2064 2065 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2066 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2067 ShiftReg, Imm, 2068 S, Parser.getTok().getLoc())); 2069 else 2070 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2071 S, Parser.getTok().getLoc())); 2072 2073 return 0; 2074} 2075 2076 2077/// Try to parse a register name. The token must be an Identifier when called. 2078/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2079/// if there is a "writeback". 'true' if it's not a register. 2080/// 2081/// TODO this is likely to change to allow different register types and or to 2082/// parse for a specific register type. 2083bool ARMAsmParser:: 2084tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2085 SMLoc S = Parser.getTok().getLoc(); 2086 int RegNo = tryParseRegister(); 2087 if (RegNo == -1) 2088 return true; 2089 2090 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2091 2092 const AsmToken &ExclaimTok = Parser.getTok(); 2093 if (ExclaimTok.is(AsmToken::Exclaim)) { 2094 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2095 ExclaimTok.getLoc())); 2096 Parser.Lex(); // Eat exclaim token 2097 return false; 2098 } 2099 2100 // Also check for an index operand. This is only legal for vector registers, 2101 // but that'll get caught OK in operand matching, so we don't need to 2102 // explicitly filter everything else out here. 2103 if (Parser.getTok().is(AsmToken::LBrac)) { 2104 SMLoc SIdx = Parser.getTok().getLoc(); 2105 Parser.Lex(); // Eat left bracket token. 2106 2107 const MCExpr *ImmVal; 2108 if (getParser().ParseExpression(ImmVal)) 2109 return MatchOperand_ParseFail; 2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2111 if (!MCE) { 2112 TokError("immediate value expected for vector index"); 2113 return MatchOperand_ParseFail; 2114 } 2115 2116 SMLoc E = Parser.getTok().getLoc(); 2117 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2118 Error(E, "']' expected"); 2119 return MatchOperand_ParseFail; 2120 } 2121 2122 Parser.Lex(); // Eat right bracket token. 2123 2124 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2125 SIdx, E, 2126 getContext())); 2127 } 2128 2129 return false; 2130} 2131 2132/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2133/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2134/// "c5", ... 2135static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2136 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2137 // but efficient. 2138 switch (Name.size()) { 2139 default: break; 2140 case 2: 2141 if (Name[0] != CoprocOp) 2142 return -1; 2143 switch (Name[1]) { 2144 default: return -1; 2145 case '0': return 0; 2146 case '1': return 1; 2147 case '2': return 2; 2148 case '3': return 3; 2149 case '4': return 4; 2150 case '5': return 5; 2151 case '6': return 6; 2152 case '7': return 7; 2153 case '8': return 8; 2154 case '9': return 9; 2155 } 2156 break; 2157 case 3: 2158 if (Name[0] != CoprocOp || Name[1] != '1') 2159 return -1; 2160 switch (Name[2]) { 2161 default: return -1; 2162 case '0': return 10; 2163 case '1': return 11; 2164 case '2': return 12; 2165 case '3': return 13; 2166 case '4': return 14; 2167 case '5': return 15; 2168 } 2169 break; 2170 } 2171 2172 return -1; 2173} 2174 2175/// parseITCondCode - Try to parse a condition code for an IT instruction. 2176ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2177parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2178 SMLoc S = Parser.getTok().getLoc(); 2179 const AsmToken &Tok = Parser.getTok(); 2180 if (!Tok.is(AsmToken::Identifier)) 2181 return MatchOperand_NoMatch; 2182 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2183 .Case("eq", ARMCC::EQ) 2184 .Case("ne", ARMCC::NE) 2185 .Case("hs", ARMCC::HS) 2186 .Case("cs", ARMCC::HS) 2187 .Case("lo", ARMCC::LO) 2188 .Case("cc", ARMCC::LO) 2189 .Case("mi", ARMCC::MI) 2190 .Case("pl", ARMCC::PL) 2191 .Case("vs", ARMCC::VS) 2192 .Case("vc", ARMCC::VC) 2193 .Case("hi", ARMCC::HI) 2194 .Case("ls", ARMCC::LS) 2195 .Case("ge", ARMCC::GE) 2196 .Case("lt", ARMCC::LT) 2197 .Case("gt", ARMCC::GT) 2198 .Case("le", ARMCC::LE) 2199 .Case("al", ARMCC::AL) 2200 .Default(~0U); 2201 if (CC == ~0U) 2202 return MatchOperand_NoMatch; 2203 Parser.Lex(); // Eat the token. 2204 2205 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2206 2207 return MatchOperand_Success; 2208} 2209 2210/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2211/// token must be an Identifier when called, and if it is a coprocessor 2212/// number, the token is eaten and the operand is added to the operand list. 2213ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2214parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2215 SMLoc S = Parser.getTok().getLoc(); 2216 const AsmToken &Tok = Parser.getTok(); 2217 if (Tok.isNot(AsmToken::Identifier)) 2218 return MatchOperand_NoMatch; 2219 2220 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2221 if (Num == -1) 2222 return MatchOperand_NoMatch; 2223 2224 Parser.Lex(); // Eat identifier token. 2225 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2226 return MatchOperand_Success; 2227} 2228 2229/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2230/// token must be an Identifier when called, and if it is a coprocessor 2231/// number, the token is eaten and the operand is added to the operand list. 2232ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2233parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2234 SMLoc S = Parser.getTok().getLoc(); 2235 const AsmToken &Tok = Parser.getTok(); 2236 if (Tok.isNot(AsmToken::Identifier)) 2237 return MatchOperand_NoMatch; 2238 2239 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2240 if (Reg == -1) 2241 return MatchOperand_NoMatch; 2242 2243 Parser.Lex(); // Eat identifier token. 2244 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2245 return MatchOperand_Success; 2246} 2247 2248/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2249/// coproc_option : '{' imm0_255 '}' 2250ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2251parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2252 SMLoc S = Parser.getTok().getLoc(); 2253 2254 // If this isn't a '{', this isn't a coprocessor immediate operand. 2255 if (Parser.getTok().isNot(AsmToken::LCurly)) 2256 return MatchOperand_NoMatch; 2257 Parser.Lex(); // Eat the '{' 2258 2259 const MCExpr *Expr; 2260 SMLoc Loc = Parser.getTok().getLoc(); 2261 if (getParser().ParseExpression(Expr)) { 2262 Error(Loc, "illegal expression"); 2263 return MatchOperand_ParseFail; 2264 } 2265 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2266 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2267 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2268 return MatchOperand_ParseFail; 2269 } 2270 int Val = CE->getValue(); 2271 2272 // Check for and consume the closing '}' 2273 if (Parser.getTok().isNot(AsmToken::RCurly)) 2274 return MatchOperand_ParseFail; 2275 SMLoc E = Parser.getTok().getLoc(); 2276 Parser.Lex(); // Eat the '}' 2277 2278 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2279 return MatchOperand_Success; 2280} 2281 2282// For register list parsing, we need to map from raw GPR register numbering 2283// to the enumeration values. The enumeration values aren't sorted by 2284// register number due to our using "sp", "lr" and "pc" as canonical names. 2285static unsigned getNextRegister(unsigned Reg) { 2286 // If this is a GPR, we need to do it manually, otherwise we can rely 2287 // on the sort ordering of the enumeration since the other reg-classes 2288 // are sane. 2289 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2290 return Reg + 1; 2291 switch(Reg) { 2292 default: assert(0 && "Invalid GPR number!"); 2293 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2294 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2295 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2296 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2297 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2298 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2299 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2300 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2301 } 2302} 2303 2304/// Parse a register list. 2305bool ARMAsmParser:: 2306parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2307 assert(Parser.getTok().is(AsmToken::LCurly) && 2308 "Token is not a Left Curly Brace"); 2309 SMLoc S = Parser.getTok().getLoc(); 2310 Parser.Lex(); // Eat '{' token. 2311 SMLoc RegLoc = Parser.getTok().getLoc(); 2312 2313 // Check the first register in the list to see what register class 2314 // this is a list of. 2315 int Reg = tryParseRegister(); 2316 if (Reg == -1) 2317 return Error(RegLoc, "register expected"); 2318 2319 MCRegisterClass *RC; 2320 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2321 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2322 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2323 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2324 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2325 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2326 else 2327 return Error(RegLoc, "invalid register in register list"); 2328 2329 // The reglist instructions have at most 16 registers, so reserve 2330 // space for that many. 2331 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2332 // Store the first register. 2333 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2334 2335 // This starts immediately after the first register token in the list, 2336 // so we can see either a comma or a minus (range separator) as a legal 2337 // next token. 2338 while (Parser.getTok().is(AsmToken::Comma) || 2339 Parser.getTok().is(AsmToken::Minus)) { 2340 if (Parser.getTok().is(AsmToken::Minus)) { 2341 Parser.Lex(); // Eat the comma. 2342 SMLoc EndLoc = Parser.getTok().getLoc(); 2343 int EndReg = tryParseRegister(); 2344 if (EndReg == -1) 2345 return Error(EndLoc, "register expected"); 2346 // If the register is the same as the start reg, there's nothing 2347 // more to do. 2348 if (Reg == EndReg) 2349 continue; 2350 // The register must be in the same register class as the first. 2351 if (!RC->contains(EndReg)) 2352 return Error(EndLoc, "invalid register in register list"); 2353 // Ranges must go from low to high. 2354 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2355 return Error(EndLoc, "bad range in register list"); 2356 2357 // Add all the registers in the range to the register list. 2358 while (Reg != EndReg) { 2359 Reg = getNextRegister(Reg); 2360 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2361 } 2362 continue; 2363 } 2364 Parser.Lex(); // Eat the comma. 2365 RegLoc = Parser.getTok().getLoc(); 2366 int OldReg = Reg; 2367 Reg = tryParseRegister(); 2368 if (Reg == -1) 2369 return Error(RegLoc, "register expected"); 2370 // The register must be in the same register class as the first. 2371 if (!RC->contains(Reg)) 2372 return Error(RegLoc, "invalid register in register list"); 2373 // List must be monotonically increasing. 2374 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2375 return Error(RegLoc, "register list not in ascending order"); 2376 // VFP register lists must also be contiguous. 2377 // It's OK to use the enumeration values directly here rather, as the 2378 // VFP register classes have the enum sorted properly. 2379 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2380 Reg != OldReg + 1) 2381 return Error(RegLoc, "non-contiguous register range"); 2382 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2383 } 2384 2385 SMLoc E = Parser.getTok().getLoc(); 2386 if (Parser.getTok().isNot(AsmToken::RCurly)) 2387 return Error(E, "'}' expected"); 2388 Parser.Lex(); // Eat '}' token. 2389 2390 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2391 return false; 2392} 2393 2394// parse a vector register list 2395ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2396parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2397 if(Parser.getTok().isNot(AsmToken::LCurly)) 2398 return MatchOperand_NoMatch; 2399 2400 SMLoc S = Parser.getTok().getLoc(); 2401 Parser.Lex(); // Eat '{' token. 2402 SMLoc RegLoc = Parser.getTok().getLoc(); 2403 2404 int Reg = tryParseRegister(); 2405 if (Reg == -1) { 2406 Error(RegLoc, "register expected"); 2407 return MatchOperand_ParseFail; 2408 } 2409 2410 unsigned FirstReg = Reg; 2411 unsigned Count = 1; 2412 while (Parser.getTok().is(AsmToken::Comma)) { 2413 Parser.Lex(); // Eat the comma. 2414 RegLoc = Parser.getTok().getLoc(); 2415 int OldReg = Reg; 2416 Reg = tryParseRegister(); 2417 if (Reg == -1) { 2418 Error(RegLoc, "register expected"); 2419 return MatchOperand_ParseFail; 2420 } 2421 // vector register lists must also be contiguous. 2422 // It's OK to use the enumeration values directly here rather, as the 2423 // VFP register classes have the enum sorted properly. 2424 if (Reg != OldReg + 1) { 2425 Error(RegLoc, "non-contiguous register range"); 2426 return MatchOperand_ParseFail; 2427 } 2428 2429 ++Count; 2430 } 2431 2432 SMLoc E = Parser.getTok().getLoc(); 2433 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2434 Error(E, "'}' expected"); 2435 return MatchOperand_ParseFail; 2436 } 2437 Parser.Lex(); // Eat '}' token. 2438 2439 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2440 return MatchOperand_Success; 2441} 2442 2443/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2444ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2445parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2446 SMLoc S = Parser.getTok().getLoc(); 2447 const AsmToken &Tok = Parser.getTok(); 2448 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2449 StringRef OptStr = Tok.getString(); 2450 2451 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2452 .Case("sy", ARM_MB::SY) 2453 .Case("st", ARM_MB::ST) 2454 .Case("sh", ARM_MB::ISH) 2455 .Case("ish", ARM_MB::ISH) 2456 .Case("shst", ARM_MB::ISHST) 2457 .Case("ishst", ARM_MB::ISHST) 2458 .Case("nsh", ARM_MB::NSH) 2459 .Case("un", ARM_MB::NSH) 2460 .Case("nshst", ARM_MB::NSHST) 2461 .Case("unst", ARM_MB::NSHST) 2462 .Case("osh", ARM_MB::OSH) 2463 .Case("oshst", ARM_MB::OSHST) 2464 .Default(~0U); 2465 2466 if (Opt == ~0U) 2467 return MatchOperand_NoMatch; 2468 2469 Parser.Lex(); // Eat identifier token. 2470 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2471 return MatchOperand_Success; 2472} 2473 2474/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2475ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2476parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2477 SMLoc S = Parser.getTok().getLoc(); 2478 const AsmToken &Tok = Parser.getTok(); 2479 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2480 StringRef IFlagsStr = Tok.getString(); 2481 2482 // An iflags string of "none" is interpreted to mean that none of the AIF 2483 // bits are set. Not a terribly useful instruction, but a valid encoding. 2484 unsigned IFlags = 0; 2485 if (IFlagsStr != "none") { 2486 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2487 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2488 .Case("a", ARM_PROC::A) 2489 .Case("i", ARM_PROC::I) 2490 .Case("f", ARM_PROC::F) 2491 .Default(~0U); 2492 2493 // If some specific iflag is already set, it means that some letter is 2494 // present more than once, this is not acceptable. 2495 if (Flag == ~0U || (IFlags & Flag)) 2496 return MatchOperand_NoMatch; 2497 2498 IFlags |= Flag; 2499 } 2500 } 2501 2502 Parser.Lex(); // Eat identifier token. 2503 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2504 return MatchOperand_Success; 2505} 2506 2507/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2508ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2509parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2510 SMLoc S = Parser.getTok().getLoc(); 2511 const AsmToken &Tok = Parser.getTok(); 2512 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2513 StringRef Mask = Tok.getString(); 2514 2515 if (isMClass()) { 2516 // See ARMv6-M 10.1.1 2517 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2518 .Case("apsr", 0) 2519 .Case("iapsr", 1) 2520 .Case("eapsr", 2) 2521 .Case("xpsr", 3) 2522 .Case("ipsr", 5) 2523 .Case("epsr", 6) 2524 .Case("iepsr", 7) 2525 .Case("msp", 8) 2526 .Case("psp", 9) 2527 .Case("primask", 16) 2528 .Case("basepri", 17) 2529 .Case("basepri_max", 18) 2530 .Case("faultmask", 19) 2531 .Case("control", 20) 2532 .Default(~0U); 2533 2534 if (FlagsVal == ~0U) 2535 return MatchOperand_NoMatch; 2536 2537 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2538 // basepri, basepri_max and faultmask only valid for V7m. 2539 return MatchOperand_NoMatch; 2540 2541 Parser.Lex(); // Eat identifier token. 2542 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2543 return MatchOperand_Success; 2544 } 2545 2546 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2547 size_t Start = 0, Next = Mask.find('_'); 2548 StringRef Flags = ""; 2549 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2550 if (Next != StringRef::npos) 2551 Flags = Mask.slice(Next+1, Mask.size()); 2552 2553 // FlagsVal contains the complete mask: 2554 // 3-0: Mask 2555 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2556 unsigned FlagsVal = 0; 2557 2558 if (SpecReg == "apsr") { 2559 FlagsVal = StringSwitch<unsigned>(Flags) 2560 .Case("nzcvq", 0x8) // same as CPSR_f 2561 .Case("g", 0x4) // same as CPSR_s 2562 .Case("nzcvqg", 0xc) // same as CPSR_fs 2563 .Default(~0U); 2564 2565 if (FlagsVal == ~0U) { 2566 if (!Flags.empty()) 2567 return MatchOperand_NoMatch; 2568 else 2569 FlagsVal = 8; // No flag 2570 } 2571 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2572 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2573 Flags = "fc"; 2574 for (int i = 0, e = Flags.size(); i != e; ++i) { 2575 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2576 .Case("c", 1) 2577 .Case("x", 2) 2578 .Case("s", 4) 2579 .Case("f", 8) 2580 .Default(~0U); 2581 2582 // If some specific flag is already set, it means that some letter is 2583 // present more than once, this is not acceptable. 2584 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2585 return MatchOperand_NoMatch; 2586 FlagsVal |= Flag; 2587 } 2588 } else // No match for special register. 2589 return MatchOperand_NoMatch; 2590 2591 // Special register without flags is NOT equivalent to "fc" flags. 2592 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2593 // two lines would enable gas compatibility at the expense of breaking 2594 // round-tripping. 2595 // 2596 // if (!FlagsVal) 2597 // FlagsVal = 0x9; 2598 2599 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2600 if (SpecReg == "spsr") 2601 FlagsVal |= 16; 2602 2603 Parser.Lex(); // Eat identifier token. 2604 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2605 return MatchOperand_Success; 2606} 2607 2608ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2609parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2610 int Low, int High) { 2611 const AsmToken &Tok = Parser.getTok(); 2612 if (Tok.isNot(AsmToken::Identifier)) { 2613 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2614 return MatchOperand_ParseFail; 2615 } 2616 StringRef ShiftName = Tok.getString(); 2617 std::string LowerOp = LowercaseString(Op); 2618 std::string UpperOp = UppercaseString(Op); 2619 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2620 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2621 return MatchOperand_ParseFail; 2622 } 2623 Parser.Lex(); // Eat shift type token. 2624 2625 // There must be a '#' and a shift amount. 2626 if (Parser.getTok().isNot(AsmToken::Hash)) { 2627 Error(Parser.getTok().getLoc(), "'#' expected"); 2628 return MatchOperand_ParseFail; 2629 } 2630 Parser.Lex(); // Eat hash token. 2631 2632 const MCExpr *ShiftAmount; 2633 SMLoc Loc = Parser.getTok().getLoc(); 2634 if (getParser().ParseExpression(ShiftAmount)) { 2635 Error(Loc, "illegal expression"); 2636 return MatchOperand_ParseFail; 2637 } 2638 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2639 if (!CE) { 2640 Error(Loc, "constant expression expected"); 2641 return MatchOperand_ParseFail; 2642 } 2643 int Val = CE->getValue(); 2644 if (Val < Low || Val > High) { 2645 Error(Loc, "immediate value out of range"); 2646 return MatchOperand_ParseFail; 2647 } 2648 2649 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2650 2651 return MatchOperand_Success; 2652} 2653 2654ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2655parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2656 const AsmToken &Tok = Parser.getTok(); 2657 SMLoc S = Tok.getLoc(); 2658 if (Tok.isNot(AsmToken::Identifier)) { 2659 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2660 return MatchOperand_ParseFail; 2661 } 2662 int Val = StringSwitch<int>(Tok.getString()) 2663 .Case("be", 1) 2664 .Case("le", 0) 2665 .Default(-1); 2666 Parser.Lex(); // Eat the token. 2667 2668 if (Val == -1) { 2669 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2670 return MatchOperand_ParseFail; 2671 } 2672 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2673 getContext()), 2674 S, Parser.getTok().getLoc())); 2675 return MatchOperand_Success; 2676} 2677 2678/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2679/// instructions. Legal values are: 2680/// lsl #n 'n' in [0,31] 2681/// asr #n 'n' in [1,32] 2682/// n == 32 encoded as n == 0. 2683ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2684parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2685 const AsmToken &Tok = Parser.getTok(); 2686 SMLoc S = Tok.getLoc(); 2687 if (Tok.isNot(AsmToken::Identifier)) { 2688 Error(S, "shift operator 'asr' or 'lsl' expected"); 2689 return MatchOperand_ParseFail; 2690 } 2691 StringRef ShiftName = Tok.getString(); 2692 bool isASR; 2693 if (ShiftName == "lsl" || ShiftName == "LSL") 2694 isASR = false; 2695 else if (ShiftName == "asr" || ShiftName == "ASR") 2696 isASR = true; 2697 else { 2698 Error(S, "shift operator 'asr' or 'lsl' expected"); 2699 return MatchOperand_ParseFail; 2700 } 2701 Parser.Lex(); // Eat the operator. 2702 2703 // A '#' and a shift amount. 2704 if (Parser.getTok().isNot(AsmToken::Hash)) { 2705 Error(Parser.getTok().getLoc(), "'#' expected"); 2706 return MatchOperand_ParseFail; 2707 } 2708 Parser.Lex(); // Eat hash token. 2709 2710 const MCExpr *ShiftAmount; 2711 SMLoc E = Parser.getTok().getLoc(); 2712 if (getParser().ParseExpression(ShiftAmount)) { 2713 Error(E, "malformed shift expression"); 2714 return MatchOperand_ParseFail; 2715 } 2716 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2717 if (!CE) { 2718 Error(E, "shift amount must be an immediate"); 2719 return MatchOperand_ParseFail; 2720 } 2721 2722 int64_t Val = CE->getValue(); 2723 if (isASR) { 2724 // Shift amount must be in [1,32] 2725 if (Val < 1 || Val > 32) { 2726 Error(E, "'asr' shift amount must be in range [1,32]"); 2727 return MatchOperand_ParseFail; 2728 } 2729 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2730 if (isThumb() && Val == 32) { 2731 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2732 return MatchOperand_ParseFail; 2733 } 2734 if (Val == 32) Val = 0; 2735 } else { 2736 // Shift amount must be in [1,32] 2737 if (Val < 0 || Val > 31) { 2738 Error(E, "'lsr' shift amount must be in range [0,31]"); 2739 return MatchOperand_ParseFail; 2740 } 2741 } 2742 2743 E = Parser.getTok().getLoc(); 2744 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2745 2746 return MatchOperand_Success; 2747} 2748 2749/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2750/// of instructions. Legal values are: 2751/// ror #n 'n' in {0, 8, 16, 24} 2752ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2753parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2754 const AsmToken &Tok = Parser.getTok(); 2755 SMLoc S = Tok.getLoc(); 2756 if (Tok.isNot(AsmToken::Identifier)) 2757 return MatchOperand_NoMatch; 2758 StringRef ShiftName = Tok.getString(); 2759 if (ShiftName != "ror" && ShiftName != "ROR") 2760 return MatchOperand_NoMatch; 2761 Parser.Lex(); // Eat the operator. 2762 2763 // A '#' and a rotate amount. 2764 if (Parser.getTok().isNot(AsmToken::Hash)) { 2765 Error(Parser.getTok().getLoc(), "'#' expected"); 2766 return MatchOperand_ParseFail; 2767 } 2768 Parser.Lex(); // Eat hash token. 2769 2770 const MCExpr *ShiftAmount; 2771 SMLoc E = Parser.getTok().getLoc(); 2772 if (getParser().ParseExpression(ShiftAmount)) { 2773 Error(E, "malformed rotate expression"); 2774 return MatchOperand_ParseFail; 2775 } 2776 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2777 if (!CE) { 2778 Error(E, "rotate amount must be an immediate"); 2779 return MatchOperand_ParseFail; 2780 } 2781 2782 int64_t Val = CE->getValue(); 2783 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2784 // normally, zero is represented in asm by omitting the rotate operand 2785 // entirely. 2786 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2787 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2788 return MatchOperand_ParseFail; 2789 } 2790 2791 E = Parser.getTok().getLoc(); 2792 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2793 2794 return MatchOperand_Success; 2795} 2796 2797ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2798parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2799 SMLoc S = Parser.getTok().getLoc(); 2800 // The bitfield descriptor is really two operands, the LSB and the width. 2801 if (Parser.getTok().isNot(AsmToken::Hash)) { 2802 Error(Parser.getTok().getLoc(), "'#' expected"); 2803 return MatchOperand_ParseFail; 2804 } 2805 Parser.Lex(); // Eat hash token. 2806 2807 const MCExpr *LSBExpr; 2808 SMLoc E = Parser.getTok().getLoc(); 2809 if (getParser().ParseExpression(LSBExpr)) { 2810 Error(E, "malformed immediate expression"); 2811 return MatchOperand_ParseFail; 2812 } 2813 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2814 if (!CE) { 2815 Error(E, "'lsb' operand must be an immediate"); 2816 return MatchOperand_ParseFail; 2817 } 2818 2819 int64_t LSB = CE->getValue(); 2820 // The LSB must be in the range [0,31] 2821 if (LSB < 0 || LSB > 31) { 2822 Error(E, "'lsb' operand must be in the range [0,31]"); 2823 return MatchOperand_ParseFail; 2824 } 2825 E = Parser.getTok().getLoc(); 2826 2827 // Expect another immediate operand. 2828 if (Parser.getTok().isNot(AsmToken::Comma)) { 2829 Error(Parser.getTok().getLoc(), "too few operands"); 2830 return MatchOperand_ParseFail; 2831 } 2832 Parser.Lex(); // Eat hash token. 2833 if (Parser.getTok().isNot(AsmToken::Hash)) { 2834 Error(Parser.getTok().getLoc(), "'#' expected"); 2835 return MatchOperand_ParseFail; 2836 } 2837 Parser.Lex(); // Eat hash token. 2838 2839 const MCExpr *WidthExpr; 2840 if (getParser().ParseExpression(WidthExpr)) { 2841 Error(E, "malformed immediate expression"); 2842 return MatchOperand_ParseFail; 2843 } 2844 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2845 if (!CE) { 2846 Error(E, "'width' operand must be an immediate"); 2847 return MatchOperand_ParseFail; 2848 } 2849 2850 int64_t Width = CE->getValue(); 2851 // The LSB must be in the range [1,32-lsb] 2852 if (Width < 1 || Width > 32 - LSB) { 2853 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2854 return MatchOperand_ParseFail; 2855 } 2856 E = Parser.getTok().getLoc(); 2857 2858 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2859 2860 return MatchOperand_Success; 2861} 2862 2863ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2864parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2865 // Check for a post-index addressing register operand. Specifically: 2866 // postidx_reg := '+' register {, shift} 2867 // | '-' register {, shift} 2868 // | register {, shift} 2869 2870 // This method must return MatchOperand_NoMatch without consuming any tokens 2871 // in the case where there is no match, as other alternatives take other 2872 // parse methods. 2873 AsmToken Tok = Parser.getTok(); 2874 SMLoc S = Tok.getLoc(); 2875 bool haveEaten = false; 2876 bool isAdd = true; 2877 int Reg = -1; 2878 if (Tok.is(AsmToken::Plus)) { 2879 Parser.Lex(); // Eat the '+' token. 2880 haveEaten = true; 2881 } else if (Tok.is(AsmToken::Minus)) { 2882 Parser.Lex(); // Eat the '-' token. 2883 isAdd = false; 2884 haveEaten = true; 2885 } 2886 if (Parser.getTok().is(AsmToken::Identifier)) 2887 Reg = tryParseRegister(); 2888 if (Reg == -1) { 2889 if (!haveEaten) 2890 return MatchOperand_NoMatch; 2891 Error(Parser.getTok().getLoc(), "register expected"); 2892 return MatchOperand_ParseFail; 2893 } 2894 SMLoc E = Parser.getTok().getLoc(); 2895 2896 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2897 unsigned ShiftImm = 0; 2898 if (Parser.getTok().is(AsmToken::Comma)) { 2899 Parser.Lex(); // Eat the ','. 2900 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2901 return MatchOperand_ParseFail; 2902 } 2903 2904 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2905 ShiftImm, S, E)); 2906 2907 return MatchOperand_Success; 2908} 2909 2910ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2911parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2912 // Check for a post-index addressing register operand. Specifically: 2913 // am3offset := '+' register 2914 // | '-' register 2915 // | register 2916 // | # imm 2917 // | # + imm 2918 // | # - imm 2919 2920 // This method must return MatchOperand_NoMatch without consuming any tokens 2921 // in the case where there is no match, as other alternatives take other 2922 // parse methods. 2923 AsmToken Tok = Parser.getTok(); 2924 SMLoc S = Tok.getLoc(); 2925 2926 // Do immediates first, as we always parse those if we have a '#'. 2927 if (Parser.getTok().is(AsmToken::Hash)) { 2928 Parser.Lex(); // Eat the '#'. 2929 // Explicitly look for a '-', as we need to encode negative zero 2930 // differently. 2931 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2932 const MCExpr *Offset; 2933 if (getParser().ParseExpression(Offset)) 2934 return MatchOperand_ParseFail; 2935 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2936 if (!CE) { 2937 Error(S, "constant expression expected"); 2938 return MatchOperand_ParseFail; 2939 } 2940 SMLoc E = Tok.getLoc(); 2941 // Negative zero is encoded as the flag value INT32_MIN. 2942 int32_t Val = CE->getValue(); 2943 if (isNegative && Val == 0) 2944 Val = INT32_MIN; 2945 2946 Operands.push_back( 2947 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2948 2949 return MatchOperand_Success; 2950 } 2951 2952 2953 bool haveEaten = false; 2954 bool isAdd = true; 2955 int Reg = -1; 2956 if (Tok.is(AsmToken::Plus)) { 2957 Parser.Lex(); // Eat the '+' token. 2958 haveEaten = true; 2959 } else if (Tok.is(AsmToken::Minus)) { 2960 Parser.Lex(); // Eat the '-' token. 2961 isAdd = false; 2962 haveEaten = true; 2963 } 2964 if (Parser.getTok().is(AsmToken::Identifier)) 2965 Reg = tryParseRegister(); 2966 if (Reg == -1) { 2967 if (!haveEaten) 2968 return MatchOperand_NoMatch; 2969 Error(Parser.getTok().getLoc(), "register expected"); 2970 return MatchOperand_ParseFail; 2971 } 2972 SMLoc E = Parser.getTok().getLoc(); 2973 2974 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 2975 0, S, E)); 2976 2977 return MatchOperand_Success; 2978} 2979 2980/// cvtT2LdrdPre - Convert parsed operands to MCInst. 2981/// Needed here because the Asm Gen Matcher can't handle properly tied operands 2982/// when they refer multiple MIOperands inside a single one. 2983bool ARMAsmParser:: 2984cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 2985 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2986 // Rt, Rt2 2987 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2988 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2989 // Create a writeback register dummy placeholder. 2990 Inst.addOperand(MCOperand::CreateReg(0)); 2991 // addr 2992 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 2993 // pred 2994 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2995 return true; 2996} 2997 2998/// cvtT2StrdPre - Convert parsed operands to MCInst. 2999/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3000/// when they refer multiple MIOperands inside a single one. 3001bool ARMAsmParser:: 3002cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3003 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3004 // Create a writeback register dummy placeholder. 3005 Inst.addOperand(MCOperand::CreateReg(0)); 3006 // Rt, Rt2 3007 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3008 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3009 // addr 3010 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3011 // pred 3012 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3013 return true; 3014} 3015 3016/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3017/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3018/// when they refer multiple MIOperands inside a single one. 3019bool ARMAsmParser:: 3020cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3021 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3022 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3023 3024 // Create a writeback register dummy placeholder. 3025 Inst.addOperand(MCOperand::CreateImm(0)); 3026 3027 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3028 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3029 return true; 3030} 3031 3032/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3033/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3034/// when they refer multiple MIOperands inside a single one. 3035bool ARMAsmParser:: 3036cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3037 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3038 // Create a writeback register dummy placeholder. 3039 Inst.addOperand(MCOperand::CreateImm(0)); 3040 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3041 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3042 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3043 return true; 3044} 3045 3046/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3047/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3048/// when they refer multiple MIOperands inside a single one. 3049bool ARMAsmParser:: 3050cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3051 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3052 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3053 3054 // Create a writeback register dummy placeholder. 3055 Inst.addOperand(MCOperand::CreateImm(0)); 3056 3057 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3058 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3059 return true; 3060} 3061 3062/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3063/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3064/// when they refer multiple MIOperands inside a single one. 3065bool ARMAsmParser:: 3066cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3067 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3068 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3069 3070 // Create a writeback register dummy placeholder. 3071 Inst.addOperand(MCOperand::CreateImm(0)); 3072 3073 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3074 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3075 return true; 3076} 3077 3078 3079/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3080/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3081/// when they refer multiple MIOperands inside a single one. 3082bool ARMAsmParser:: 3083cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3084 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3085 // Create a writeback register dummy placeholder. 3086 Inst.addOperand(MCOperand::CreateImm(0)); 3087 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3088 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3089 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3090 return true; 3091} 3092 3093/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3094/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3095/// when they refer multiple MIOperands inside a single one. 3096bool ARMAsmParser:: 3097cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3098 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3099 // Create a writeback register dummy placeholder. 3100 Inst.addOperand(MCOperand::CreateImm(0)); 3101 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3102 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3103 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3104 return true; 3105} 3106 3107/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3108/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3109/// when they refer multiple MIOperands inside a single one. 3110bool ARMAsmParser:: 3111cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3112 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3113 // Create a writeback register dummy placeholder. 3114 Inst.addOperand(MCOperand::CreateImm(0)); 3115 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3116 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3117 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3118 return true; 3119} 3120 3121/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3122/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3123/// when they refer multiple MIOperands inside a single one. 3124bool ARMAsmParser:: 3125cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3126 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3127 // Rt 3128 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3129 // Create a writeback register dummy placeholder. 3130 Inst.addOperand(MCOperand::CreateImm(0)); 3131 // addr 3132 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3133 // offset 3134 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3135 // pred 3136 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3137 return true; 3138} 3139 3140/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3141/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3142/// when they refer multiple MIOperands inside a single one. 3143bool ARMAsmParser:: 3144cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3145 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3146 // Rt 3147 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3148 // Create a writeback register dummy placeholder. 3149 Inst.addOperand(MCOperand::CreateImm(0)); 3150 // addr 3151 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3152 // offset 3153 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3154 // pred 3155 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3156 return true; 3157} 3158 3159/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3160/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3161/// when they refer multiple MIOperands inside a single one. 3162bool ARMAsmParser:: 3163cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3164 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3165 // Create a writeback register dummy placeholder. 3166 Inst.addOperand(MCOperand::CreateImm(0)); 3167 // Rt 3168 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3169 // addr 3170 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3171 // offset 3172 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3173 // pred 3174 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3175 return true; 3176} 3177 3178/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3179/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3180/// when they refer multiple MIOperands inside a single one. 3181bool ARMAsmParser:: 3182cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3183 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3184 // Create a writeback register dummy placeholder. 3185 Inst.addOperand(MCOperand::CreateImm(0)); 3186 // Rt 3187 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3188 // addr 3189 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3190 // offset 3191 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3192 // pred 3193 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3194 return true; 3195} 3196 3197/// cvtLdrdPre - Convert parsed operands to MCInst. 3198/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3199/// when they refer multiple MIOperands inside a single one. 3200bool ARMAsmParser:: 3201cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3202 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3203 // Rt, Rt2 3204 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3205 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3206 // Create a writeback register dummy placeholder. 3207 Inst.addOperand(MCOperand::CreateImm(0)); 3208 // addr 3209 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3210 // pred 3211 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3212 return true; 3213} 3214 3215/// cvtStrdPre - Convert parsed operands to MCInst. 3216/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3217/// when they refer multiple MIOperands inside a single one. 3218bool ARMAsmParser:: 3219cvtStrdPre(MCInst &Inst, unsigned Opcode, 3220 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3221 // Create a writeback register dummy placeholder. 3222 Inst.addOperand(MCOperand::CreateImm(0)); 3223 // Rt, Rt2 3224 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3225 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3226 // addr 3227 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3228 // pred 3229 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3230 return true; 3231} 3232 3233/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3234/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3235/// when they refer multiple MIOperands inside a single one. 3236bool ARMAsmParser:: 3237cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3238 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3239 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3240 // Create a writeback register dummy placeholder. 3241 Inst.addOperand(MCOperand::CreateImm(0)); 3242 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3243 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3244 return true; 3245} 3246 3247/// cvtThumbMultiple- Convert parsed operands to MCInst. 3248/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3249/// when they refer multiple MIOperands inside a single one. 3250bool ARMAsmParser:: 3251cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3252 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3253 // The second source operand must be the same register as the destination 3254 // operand. 3255 if (Operands.size() == 6 && 3256 (((ARMOperand*)Operands[3])->getReg() != 3257 ((ARMOperand*)Operands[5])->getReg()) && 3258 (((ARMOperand*)Operands[3])->getReg() != 3259 ((ARMOperand*)Operands[4])->getReg())) { 3260 Error(Operands[3]->getStartLoc(), 3261 "destination register must match source register"); 3262 return false; 3263 } 3264 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3265 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3266 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3267 // If we have a three-operand form, use that, else the second source operand 3268 // is just the destination operand again. 3269 if (Operands.size() == 6) 3270 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3271 else 3272 Inst.addOperand(Inst.getOperand(0)); 3273 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3274 3275 return true; 3276} 3277 3278/// Parse an ARM memory expression, return false if successful else return true 3279/// or an error. The first token must be a '[' when called. 3280bool ARMAsmParser:: 3281parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3282 SMLoc S, E; 3283 assert(Parser.getTok().is(AsmToken::LBrac) && 3284 "Token is not a Left Bracket"); 3285 S = Parser.getTok().getLoc(); 3286 Parser.Lex(); // Eat left bracket token. 3287 3288 const AsmToken &BaseRegTok = Parser.getTok(); 3289 int BaseRegNum = tryParseRegister(); 3290 if (BaseRegNum == -1) 3291 return Error(BaseRegTok.getLoc(), "register expected"); 3292 3293 // The next token must either be a comma or a closing bracket. 3294 const AsmToken &Tok = Parser.getTok(); 3295 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3296 return Error(Tok.getLoc(), "malformed memory operand"); 3297 3298 if (Tok.is(AsmToken::RBrac)) { 3299 E = Tok.getLoc(); 3300 Parser.Lex(); // Eat right bracket token. 3301 3302 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3303 0, 0, false, S, E)); 3304 3305 // If there's a pre-indexing writeback marker, '!', just add it as a token 3306 // operand. It's rather odd, but syntactically valid. 3307 if (Parser.getTok().is(AsmToken::Exclaim)) { 3308 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3309 Parser.Lex(); // Eat the '!'. 3310 } 3311 3312 return false; 3313 } 3314 3315 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3316 Parser.Lex(); // Eat the comma. 3317 3318 // If we have a ':', it's an alignment specifier. 3319 if (Parser.getTok().is(AsmToken::Colon)) { 3320 Parser.Lex(); // Eat the ':'. 3321 E = Parser.getTok().getLoc(); 3322 3323 const MCExpr *Expr; 3324 if (getParser().ParseExpression(Expr)) 3325 return true; 3326 3327 // The expression has to be a constant. Memory references with relocations 3328 // don't come through here, as they use the <label> forms of the relevant 3329 // instructions. 3330 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3331 if (!CE) 3332 return Error (E, "constant expression expected"); 3333 3334 unsigned Align = 0; 3335 switch (CE->getValue()) { 3336 default: 3337 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3338 case 64: Align = 8; break; 3339 case 128: Align = 16; break; 3340 case 256: Align = 32; break; 3341 } 3342 3343 // Now we should have the closing ']' 3344 E = Parser.getTok().getLoc(); 3345 if (Parser.getTok().isNot(AsmToken::RBrac)) 3346 return Error(E, "']' expected"); 3347 Parser.Lex(); // Eat right bracket token. 3348 3349 // Don't worry about range checking the value here. That's handled by 3350 // the is*() predicates. 3351 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3352 ARM_AM::no_shift, 0, Align, 3353 false, S, E)); 3354 3355 // If there's a pre-indexing writeback marker, '!', just add it as a token 3356 // operand. 3357 if (Parser.getTok().is(AsmToken::Exclaim)) { 3358 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3359 Parser.Lex(); // Eat the '!'. 3360 } 3361 3362 return false; 3363 } 3364 3365 // If we have a '#', it's an immediate offset, else assume it's a register 3366 // offset. 3367 if (Parser.getTok().is(AsmToken::Hash)) { 3368 Parser.Lex(); // Eat the '#'. 3369 E = Parser.getTok().getLoc(); 3370 3371 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3372 const MCExpr *Offset; 3373 if (getParser().ParseExpression(Offset)) 3374 return true; 3375 3376 // The expression has to be a constant. Memory references with relocations 3377 // don't come through here, as they use the <label> forms of the relevant 3378 // instructions. 3379 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3380 if (!CE) 3381 return Error (E, "constant expression expected"); 3382 3383 // If the constant was #-0, represent it as INT32_MIN. 3384 int32_t Val = CE->getValue(); 3385 if (isNegative && Val == 0) 3386 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3387 3388 // Now we should have the closing ']' 3389 E = Parser.getTok().getLoc(); 3390 if (Parser.getTok().isNot(AsmToken::RBrac)) 3391 return Error(E, "']' expected"); 3392 Parser.Lex(); // Eat right bracket token. 3393 3394 // Don't worry about range checking the value here. That's handled by 3395 // the is*() predicates. 3396 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3397 ARM_AM::no_shift, 0, 0, 3398 false, S, E)); 3399 3400 // If there's a pre-indexing writeback marker, '!', just add it as a token 3401 // operand. 3402 if (Parser.getTok().is(AsmToken::Exclaim)) { 3403 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3404 Parser.Lex(); // Eat the '!'. 3405 } 3406 3407 return false; 3408 } 3409 3410 // The register offset is optionally preceded by a '+' or '-' 3411 bool isNegative = false; 3412 if (Parser.getTok().is(AsmToken::Minus)) { 3413 isNegative = true; 3414 Parser.Lex(); // Eat the '-'. 3415 } else if (Parser.getTok().is(AsmToken::Plus)) { 3416 // Nothing to do. 3417 Parser.Lex(); // Eat the '+'. 3418 } 3419 3420 E = Parser.getTok().getLoc(); 3421 int OffsetRegNum = tryParseRegister(); 3422 if (OffsetRegNum == -1) 3423 return Error(E, "register expected"); 3424 3425 // If there's a shift operator, handle it. 3426 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3427 unsigned ShiftImm = 0; 3428 if (Parser.getTok().is(AsmToken::Comma)) { 3429 Parser.Lex(); // Eat the ','. 3430 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3431 return true; 3432 } 3433 3434 // Now we should have the closing ']' 3435 E = Parser.getTok().getLoc(); 3436 if (Parser.getTok().isNot(AsmToken::RBrac)) 3437 return Error(E, "']' expected"); 3438 Parser.Lex(); // Eat right bracket token. 3439 3440 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3441 ShiftType, ShiftImm, 0, isNegative, 3442 S, E)); 3443 3444 // If there's a pre-indexing writeback marker, '!', just add it as a token 3445 // operand. 3446 if (Parser.getTok().is(AsmToken::Exclaim)) { 3447 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3448 Parser.Lex(); // Eat the '!'. 3449 } 3450 3451 return false; 3452} 3453 3454/// parseMemRegOffsetShift - one of these two: 3455/// ( lsl | lsr | asr | ror ) , # shift_amount 3456/// rrx 3457/// return true if it parses a shift otherwise it returns false. 3458bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3459 unsigned &Amount) { 3460 SMLoc Loc = Parser.getTok().getLoc(); 3461 const AsmToken &Tok = Parser.getTok(); 3462 if (Tok.isNot(AsmToken::Identifier)) 3463 return true; 3464 StringRef ShiftName = Tok.getString(); 3465 if (ShiftName == "lsl" || ShiftName == "LSL") 3466 St = ARM_AM::lsl; 3467 else if (ShiftName == "lsr" || ShiftName == "LSR") 3468 St = ARM_AM::lsr; 3469 else if (ShiftName == "asr" || ShiftName == "ASR") 3470 St = ARM_AM::asr; 3471 else if (ShiftName == "ror" || ShiftName == "ROR") 3472 St = ARM_AM::ror; 3473 else if (ShiftName == "rrx" || ShiftName == "RRX") 3474 St = ARM_AM::rrx; 3475 else 3476 return Error(Loc, "illegal shift operator"); 3477 Parser.Lex(); // Eat shift type token. 3478 3479 // rrx stands alone. 3480 Amount = 0; 3481 if (St != ARM_AM::rrx) { 3482 Loc = Parser.getTok().getLoc(); 3483 // A '#' and a shift amount. 3484 const AsmToken &HashTok = Parser.getTok(); 3485 if (HashTok.isNot(AsmToken::Hash)) 3486 return Error(HashTok.getLoc(), "'#' expected"); 3487 Parser.Lex(); // Eat hash token. 3488 3489 const MCExpr *Expr; 3490 if (getParser().ParseExpression(Expr)) 3491 return true; 3492 // Range check the immediate. 3493 // lsl, ror: 0 <= imm <= 31 3494 // lsr, asr: 0 <= imm <= 32 3495 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3496 if (!CE) 3497 return Error(Loc, "shift amount must be an immediate"); 3498 int64_t Imm = CE->getValue(); 3499 if (Imm < 0 || 3500 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3501 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3502 return Error(Loc, "immediate shift value out of range"); 3503 Amount = Imm; 3504 } 3505 3506 return false; 3507} 3508 3509/// parseFPImm - A floating point immediate expression operand. 3510ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3511parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3512 SMLoc S = Parser.getTok().getLoc(); 3513 3514 if (Parser.getTok().isNot(AsmToken::Hash)) 3515 return MatchOperand_NoMatch; 3516 3517 // Disambiguate the VMOV forms that can accept an FP immediate. 3518 // vmov.f32 <sreg>, #imm 3519 // vmov.f64 <dreg>, #imm 3520 // vmov.f32 <dreg>, #imm @ vector f32x2 3521 // vmov.f32 <qreg>, #imm @ vector f32x4 3522 // 3523 // There are also the NEON VMOV instructions which expect an 3524 // integer constant. Make sure we don't try to parse an FPImm 3525 // for these: 3526 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3527 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3528 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3529 TyOp->getToken() != ".f64")) 3530 return MatchOperand_NoMatch; 3531 3532 Parser.Lex(); // Eat the '#'. 3533 3534 // Handle negation, as that still comes through as a separate token. 3535 bool isNegative = false; 3536 if (Parser.getTok().is(AsmToken::Minus)) { 3537 isNegative = true; 3538 Parser.Lex(); 3539 } 3540 const AsmToken &Tok = Parser.getTok(); 3541 if (Tok.is(AsmToken::Real)) { 3542 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3543 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3544 // If we had a '-' in front, toggle the sign bit. 3545 IntVal ^= (uint64_t)isNegative << 63; 3546 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3547 Parser.Lex(); // Eat the token. 3548 if (Val == -1) { 3549 TokError("floating point value out of range"); 3550 return MatchOperand_ParseFail; 3551 } 3552 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3553 return MatchOperand_Success; 3554 } 3555 if (Tok.is(AsmToken::Integer)) { 3556 int64_t Val = Tok.getIntVal(); 3557 Parser.Lex(); // Eat the token. 3558 if (Val > 255 || Val < 0) { 3559 TokError("encoded floating point value out of range"); 3560 return MatchOperand_ParseFail; 3561 } 3562 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3563 return MatchOperand_Success; 3564 } 3565 3566 TokError("invalid floating point immediate"); 3567 return MatchOperand_ParseFail; 3568} 3569/// Parse a arm instruction operand. For now this parses the operand regardless 3570/// of the mnemonic. 3571bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3572 StringRef Mnemonic) { 3573 SMLoc S, E; 3574 3575 // Check if the current operand has a custom associated parser, if so, try to 3576 // custom parse the operand, or fallback to the general approach. 3577 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3578 if (ResTy == MatchOperand_Success) 3579 return false; 3580 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3581 // there was a match, but an error occurred, in which case, just return that 3582 // the operand parsing failed. 3583 if (ResTy == MatchOperand_ParseFail) 3584 return true; 3585 3586 switch (getLexer().getKind()) { 3587 default: 3588 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3589 return true; 3590 case AsmToken::Identifier: { 3591 // If this is VMRS, check for the apsr_nzcv operand. 3592 if (!tryParseRegisterWithWriteBack(Operands)) 3593 return false; 3594 int Res = tryParseShiftRegister(Operands); 3595 if (Res == 0) // success 3596 return false; 3597 else if (Res == -1) // irrecoverable error 3598 return true; 3599 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3600 S = Parser.getTok().getLoc(); 3601 Parser.Lex(); 3602 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3603 return false; 3604 } 3605 3606 // Fall though for the Identifier case that is not a register or a 3607 // special name. 3608 } 3609 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3610 case AsmToken::Dot: { // . as a branch target 3611 // This was not a register so parse other operands that start with an 3612 // identifier (like labels) as expressions and create them as immediates. 3613 const MCExpr *IdVal; 3614 S = Parser.getTok().getLoc(); 3615 if (getParser().ParseExpression(IdVal)) 3616 return true; 3617 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3618 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3619 return false; 3620 } 3621 case AsmToken::LBrac: 3622 return parseMemory(Operands); 3623 case AsmToken::LCurly: 3624 return parseRegisterList(Operands); 3625 case AsmToken::Hash: { 3626 // #42 -> immediate. 3627 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3628 S = Parser.getTok().getLoc(); 3629 Parser.Lex(); 3630 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3631 const MCExpr *ImmVal; 3632 if (getParser().ParseExpression(ImmVal)) 3633 return true; 3634 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3635 if (!CE) { 3636 Error(S, "constant expression expected"); 3637 return MatchOperand_ParseFail; 3638 } 3639 int32_t Val = CE->getValue(); 3640 if (isNegative && Val == 0) 3641 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3642 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3643 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3644 return false; 3645 } 3646 case AsmToken::Colon: { 3647 // ":lower16:" and ":upper16:" expression prefixes 3648 // FIXME: Check it's an expression prefix, 3649 // e.g. (FOO - :lower16:BAR) isn't legal. 3650 ARMMCExpr::VariantKind RefKind; 3651 if (parsePrefix(RefKind)) 3652 return true; 3653 3654 const MCExpr *SubExprVal; 3655 if (getParser().ParseExpression(SubExprVal)) 3656 return true; 3657 3658 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3659 getContext()); 3660 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3661 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3662 return false; 3663 } 3664 } 3665} 3666 3667// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3668// :lower16: and :upper16:. 3669bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3670 RefKind = ARMMCExpr::VK_ARM_None; 3671 3672 // :lower16: and :upper16: modifiers 3673 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3674 Parser.Lex(); // Eat ':' 3675 3676 if (getLexer().isNot(AsmToken::Identifier)) { 3677 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3678 return true; 3679 } 3680 3681 StringRef IDVal = Parser.getTok().getIdentifier(); 3682 if (IDVal == "lower16") { 3683 RefKind = ARMMCExpr::VK_ARM_LO16; 3684 } else if (IDVal == "upper16") { 3685 RefKind = ARMMCExpr::VK_ARM_HI16; 3686 } else { 3687 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3688 return true; 3689 } 3690 Parser.Lex(); 3691 3692 if (getLexer().isNot(AsmToken::Colon)) { 3693 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3694 return true; 3695 } 3696 Parser.Lex(); // Eat the last ':' 3697 return false; 3698} 3699 3700/// \brief Given a mnemonic, split out possible predication code and carry 3701/// setting letters to form a canonical mnemonic and flags. 3702// 3703// FIXME: Would be nice to autogen this. 3704// FIXME: This is a bit of a maze of special cases. 3705StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3706 unsigned &PredicationCode, 3707 bool &CarrySetting, 3708 unsigned &ProcessorIMod, 3709 StringRef &ITMask) { 3710 PredicationCode = ARMCC::AL; 3711 CarrySetting = false; 3712 ProcessorIMod = 0; 3713 3714 // Ignore some mnemonics we know aren't predicated forms. 3715 // 3716 // FIXME: Would be nice to autogen this. 3717 if ((Mnemonic == "movs" && isThumb()) || 3718 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3719 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3720 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3721 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3722 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3723 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3724 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3725 return Mnemonic; 3726 3727 // First, split out any predication code. Ignore mnemonics we know aren't 3728 // predicated but do have a carry-set and so weren't caught above. 3729 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3730 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3731 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3732 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3733 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3734 .Case("eq", ARMCC::EQ) 3735 .Case("ne", ARMCC::NE) 3736 .Case("hs", ARMCC::HS) 3737 .Case("cs", ARMCC::HS) 3738 .Case("lo", ARMCC::LO) 3739 .Case("cc", ARMCC::LO) 3740 .Case("mi", ARMCC::MI) 3741 .Case("pl", ARMCC::PL) 3742 .Case("vs", ARMCC::VS) 3743 .Case("vc", ARMCC::VC) 3744 .Case("hi", ARMCC::HI) 3745 .Case("ls", ARMCC::LS) 3746 .Case("ge", ARMCC::GE) 3747 .Case("lt", ARMCC::LT) 3748 .Case("gt", ARMCC::GT) 3749 .Case("le", ARMCC::LE) 3750 .Case("al", ARMCC::AL) 3751 .Default(~0U); 3752 if (CC != ~0U) { 3753 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3754 PredicationCode = CC; 3755 } 3756 } 3757 3758 // Next, determine if we have a carry setting bit. We explicitly ignore all 3759 // the instructions we know end in 's'. 3760 if (Mnemonic.endswith("s") && 3761 !(Mnemonic == "cps" || Mnemonic == "mls" || 3762 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3763 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3764 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3765 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3766 (Mnemonic == "movs" && isThumb()))) { 3767 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3768 CarrySetting = true; 3769 } 3770 3771 // The "cps" instruction can have a interrupt mode operand which is glued into 3772 // the mnemonic. Check if this is the case, split it and parse the imod op 3773 if (Mnemonic.startswith("cps")) { 3774 // Split out any imod code. 3775 unsigned IMod = 3776 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3777 .Case("ie", ARM_PROC::IE) 3778 .Case("id", ARM_PROC::ID) 3779 .Default(~0U); 3780 if (IMod != ~0U) { 3781 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3782 ProcessorIMod = IMod; 3783 } 3784 } 3785 3786 // The "it" instruction has the condition mask on the end of the mnemonic. 3787 if (Mnemonic.startswith("it")) { 3788 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3789 Mnemonic = Mnemonic.slice(0, 2); 3790 } 3791 3792 return Mnemonic; 3793} 3794 3795/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3796/// inclusion of carry set or predication code operands. 3797// 3798// FIXME: It would be nice to autogen this. 3799void ARMAsmParser:: 3800getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3801 bool &CanAcceptPredicationCode) { 3802 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3803 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3804 Mnemonic == "add" || Mnemonic == "adc" || 3805 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3806 Mnemonic == "orr" || Mnemonic == "mvn" || 3807 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3808 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3809 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3810 Mnemonic == "mla" || Mnemonic == "smlal" || 3811 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3812 CanAcceptCarrySet = true; 3813 } else 3814 CanAcceptCarrySet = false; 3815 3816 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3817 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3818 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3819 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3820 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3821 (Mnemonic == "clrex" && !isThumb()) || 3822 (Mnemonic == "nop" && isThumbOne()) || 3823 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3824 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3825 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3826 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3827 !isThumb()) || 3828 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3829 CanAcceptPredicationCode = false; 3830 } else 3831 CanAcceptPredicationCode = true; 3832 3833 if (isThumb()) { 3834 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3835 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3836 CanAcceptPredicationCode = false; 3837 } 3838} 3839 3840bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3841 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3842 // FIXME: This is all horribly hacky. We really need a better way to deal 3843 // with optional operands like this in the matcher table. 3844 3845 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3846 // another does not. Specifically, the MOVW instruction does not. So we 3847 // special case it here and remove the defaulted (non-setting) cc_out 3848 // operand if that's the instruction we're trying to match. 3849 // 3850 // We do this as post-processing of the explicit operands rather than just 3851 // conditionally adding the cc_out in the first place because we need 3852 // to check the type of the parsed immediate operand. 3853 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3854 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3855 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3856 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3857 return true; 3858 3859 // Register-register 'add' for thumb does not have a cc_out operand 3860 // when there are only two register operands. 3861 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3862 static_cast<ARMOperand*>(Operands[3])->isReg() && 3863 static_cast<ARMOperand*>(Operands[4])->isReg() && 3864 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3865 return true; 3866 // Register-register 'add' for thumb does not have a cc_out operand 3867 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3868 // have to check the immediate range here since Thumb2 has a variant 3869 // that can handle a different range and has a cc_out operand. 3870 if (((isThumb() && Mnemonic == "add") || 3871 (isThumbTwo() && Mnemonic == "sub")) && 3872 Operands.size() == 6 && 3873 static_cast<ARMOperand*>(Operands[3])->isReg() && 3874 static_cast<ARMOperand*>(Operands[4])->isReg() && 3875 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3876 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3877 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3878 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3879 return true; 3880 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3881 // imm0_4095 variant. That's the least-preferred variant when 3882 // selecting via the generic "add" mnemonic, so to know that we 3883 // should remove the cc_out operand, we have to explicitly check that 3884 // it's not one of the other variants. Ugh. 3885 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3886 Operands.size() == 6 && 3887 static_cast<ARMOperand*>(Operands[3])->isReg() && 3888 static_cast<ARMOperand*>(Operands[4])->isReg() && 3889 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3890 // Nest conditions rather than one big 'if' statement for readability. 3891 // 3892 // If either register is a high reg, it's either one of the SP 3893 // variants (handled above) or a 32-bit encoding, so we just 3894 // check against T3. 3895 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3896 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3897 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3898 return false; 3899 // If both registers are low, we're in an IT block, and the immediate is 3900 // in range, we should use encoding T1 instead, which has a cc_out. 3901 if (inITBlock() && 3902 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3903 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3904 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3905 return false; 3906 3907 // Otherwise, we use encoding T4, which does not have a cc_out 3908 // operand. 3909 return true; 3910 } 3911 3912 // The thumb2 multiply instruction doesn't have a CCOut register, so 3913 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3914 // use the 16-bit encoding or not. 3915 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3916 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3917 static_cast<ARMOperand*>(Operands[3])->isReg() && 3918 static_cast<ARMOperand*>(Operands[4])->isReg() && 3919 static_cast<ARMOperand*>(Operands[5])->isReg() && 3920 // If the registers aren't low regs, the destination reg isn't the 3921 // same as one of the source regs, or the cc_out operand is zero 3922 // outside of an IT block, we have to use the 32-bit encoding, so 3923 // remove the cc_out operand. 3924 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3925 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 3926 !inITBlock() || 3927 (static_cast<ARMOperand*>(Operands[3])->getReg() != 3928 static_cast<ARMOperand*>(Operands[5])->getReg() && 3929 static_cast<ARMOperand*>(Operands[3])->getReg() != 3930 static_cast<ARMOperand*>(Operands[4])->getReg()))) 3931 return true; 3932 3933 3934 3935 // Register-register 'add/sub' for thumb does not have a cc_out operand 3936 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 3937 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 3938 // right, this will result in better diagnostics (which operand is off) 3939 // anyway. 3940 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 3941 (Operands.size() == 5 || Operands.size() == 6) && 3942 static_cast<ARMOperand*>(Operands[3])->isReg() && 3943 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 3944 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3945 return true; 3946 3947 return false; 3948} 3949 3950/// Parse an arm instruction mnemonic followed by its operands. 3951bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 3952 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3953 // Create the leading tokens for the mnemonic, split by '.' characters. 3954 size_t Start = 0, Next = Name.find('.'); 3955 StringRef Mnemonic = Name.slice(Start, Next); 3956 3957 // Split out the predication code and carry setting flag from the mnemonic. 3958 unsigned PredicationCode; 3959 unsigned ProcessorIMod; 3960 bool CarrySetting; 3961 StringRef ITMask; 3962 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 3963 ProcessorIMod, ITMask); 3964 3965 // In Thumb1, only the branch (B) instruction can be predicated. 3966 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 3967 Parser.EatToEndOfStatement(); 3968 return Error(NameLoc, "conditional execution not supported in Thumb1"); 3969 } 3970 3971 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 3972 3973 // Handle the IT instruction ITMask. Convert it to a bitmask. This 3974 // is the mask as it will be for the IT encoding if the conditional 3975 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 3976 // where the conditional bit0 is zero, the instruction post-processing 3977 // will adjust the mask accordingly. 3978 if (Mnemonic == "it") { 3979 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 3980 if (ITMask.size() > 3) { 3981 Parser.EatToEndOfStatement(); 3982 return Error(Loc, "too many conditions on IT instruction"); 3983 } 3984 unsigned Mask = 8; 3985 for (unsigned i = ITMask.size(); i != 0; --i) { 3986 char pos = ITMask[i - 1]; 3987 if (pos != 't' && pos != 'e') { 3988 Parser.EatToEndOfStatement(); 3989 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 3990 } 3991 Mask >>= 1; 3992 if (ITMask[i - 1] == 't') 3993 Mask |= 8; 3994 } 3995 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 3996 } 3997 3998 // FIXME: This is all a pretty gross hack. We should automatically handle 3999 // optional operands like this via tblgen. 4000 4001 // Next, add the CCOut and ConditionCode operands, if needed. 4002 // 4003 // For mnemonics which can ever incorporate a carry setting bit or predication 4004 // code, our matching model involves us always generating CCOut and 4005 // ConditionCode operands to match the mnemonic "as written" and then we let 4006 // the matcher deal with finding the right instruction or generating an 4007 // appropriate error. 4008 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4009 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4010 4011 // If we had a carry-set on an instruction that can't do that, issue an 4012 // error. 4013 if (!CanAcceptCarrySet && CarrySetting) { 4014 Parser.EatToEndOfStatement(); 4015 return Error(NameLoc, "instruction '" + Mnemonic + 4016 "' can not set flags, but 's' suffix specified"); 4017 } 4018 // If we had a predication code on an instruction that can't do that, issue an 4019 // error. 4020 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4021 Parser.EatToEndOfStatement(); 4022 return Error(NameLoc, "instruction '" + Mnemonic + 4023 "' is not predicable, but condition code specified"); 4024 } 4025 4026 // Add the carry setting operand, if necessary. 4027 if (CanAcceptCarrySet) { 4028 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4029 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4030 Loc)); 4031 } 4032 4033 // Add the predication code operand, if necessary. 4034 if (CanAcceptPredicationCode) { 4035 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4036 CarrySetting); 4037 Operands.push_back(ARMOperand::CreateCondCode( 4038 ARMCC::CondCodes(PredicationCode), Loc)); 4039 } 4040 4041 // Add the processor imod operand, if necessary. 4042 if (ProcessorIMod) { 4043 Operands.push_back(ARMOperand::CreateImm( 4044 MCConstantExpr::Create(ProcessorIMod, getContext()), 4045 NameLoc, NameLoc)); 4046 } 4047 4048 // Add the remaining tokens in the mnemonic. 4049 while (Next != StringRef::npos) { 4050 Start = Next; 4051 Next = Name.find('.', Start + 1); 4052 StringRef ExtraToken = Name.slice(Start, Next); 4053 4054 // For now, we're only parsing Thumb1 (for the most part), so 4055 // just ignore ".n" qualifiers. We'll use them to restrict 4056 // matching when we do Thumb2. 4057 if (ExtraToken != ".n") { 4058 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4059 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4060 } 4061 } 4062 4063 // Read the remaining operands. 4064 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4065 // Read the first operand. 4066 if (parseOperand(Operands, Mnemonic)) { 4067 Parser.EatToEndOfStatement(); 4068 return true; 4069 } 4070 4071 while (getLexer().is(AsmToken::Comma)) { 4072 Parser.Lex(); // Eat the comma. 4073 4074 // Parse and remember the operand. 4075 if (parseOperand(Operands, Mnemonic)) { 4076 Parser.EatToEndOfStatement(); 4077 return true; 4078 } 4079 } 4080 } 4081 4082 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4083 SMLoc Loc = getLexer().getLoc(); 4084 Parser.EatToEndOfStatement(); 4085 return Error(Loc, "unexpected token in argument list"); 4086 } 4087 4088 Parser.Lex(); // Consume the EndOfStatement 4089 4090 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4091 // do and don't have a cc_out optional-def operand. With some spot-checks 4092 // of the operand list, we can figure out which variant we're trying to 4093 // parse and adjust accordingly before actually matching. We shouldn't ever 4094 // try to remove a cc_out operand that was explicitly set on the the 4095 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4096 // table driven matcher doesn't fit well with the ARM instruction set. 4097 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4098 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4099 Operands.erase(Operands.begin() + 1); 4100 delete Op; 4101 } 4102 4103 // ARM mode 'blx' need special handling, as the register operand version 4104 // is predicable, but the label operand version is not. So, we can't rely 4105 // on the Mnemonic based checking to correctly figure out when to put 4106 // a k_CondCode operand in the list. If we're trying to match the label 4107 // version, remove the k_CondCode operand here. 4108 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4109 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4110 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4111 Operands.erase(Operands.begin() + 1); 4112 delete Op; 4113 } 4114 4115 // The vector-compare-to-zero instructions have a literal token "#0" at 4116 // the end that comes to here as an immediate operand. Convert it to a 4117 // token to play nicely with the matcher. 4118 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4119 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4120 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4121 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4122 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4123 if (CE && CE->getValue() == 0) { 4124 Operands.erase(Operands.begin() + 5); 4125 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4126 delete Op; 4127 } 4128 } 4129 // VCMP{E} does the same thing, but with a different operand count. 4130 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4131 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4132 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4133 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4134 if (CE && CE->getValue() == 0) { 4135 Operands.erase(Operands.begin() + 4); 4136 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4137 delete Op; 4138 } 4139 } 4140 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4141 // end. Convert it to a token here. 4142 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4143 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4144 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4145 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4146 if (CE && CE->getValue() == 0) { 4147 Operands.erase(Operands.begin() + 5); 4148 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4149 delete Op; 4150 } 4151 } 4152 4153 return false; 4154} 4155 4156// Validate context-sensitive operand constraints. 4157 4158// return 'true' if register list contains non-low GPR registers, 4159// 'false' otherwise. If Reg is in the register list or is HiReg, set 4160// 'containsReg' to true. 4161static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4162 unsigned HiReg, bool &containsReg) { 4163 containsReg = false; 4164 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4165 unsigned OpReg = Inst.getOperand(i).getReg(); 4166 if (OpReg == Reg) 4167 containsReg = true; 4168 // Anything other than a low register isn't legal here. 4169 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4170 return true; 4171 } 4172 return false; 4173} 4174 4175// Check if the specified regisgter is in the register list of the inst, 4176// starting at the indicated operand number. 4177static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4178 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4179 unsigned OpReg = Inst.getOperand(i).getReg(); 4180 if (OpReg == Reg) 4181 return true; 4182 } 4183 return false; 4184} 4185 4186// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4187// the ARMInsts array) instead. Getting that here requires awkward 4188// API changes, though. Better way? 4189namespace llvm { 4190extern MCInstrDesc ARMInsts[]; 4191} 4192static MCInstrDesc &getInstDesc(unsigned Opcode) { 4193 return ARMInsts[Opcode]; 4194} 4195 4196// FIXME: We would really like to be able to tablegen'erate this. 4197bool ARMAsmParser:: 4198validateInstruction(MCInst &Inst, 4199 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4200 MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4201 SMLoc Loc = Operands[0]->getStartLoc(); 4202 // Check the IT block state first. 4203 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4204 // being allowed in IT blocks, but not being predicable. It just always 4205 // executes. 4206 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4207 unsigned bit = 1; 4208 if (ITState.FirstCond) 4209 ITState.FirstCond = false; 4210 else 4211 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4212 // The instruction must be predicable. 4213 if (!MCID.isPredicable()) 4214 return Error(Loc, "instructions in IT block must be predicable"); 4215 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4216 unsigned ITCond = bit ? ITState.Cond : 4217 ARMCC::getOppositeCondition(ITState.Cond); 4218 if (Cond != ITCond) { 4219 // Find the condition code Operand to get its SMLoc information. 4220 SMLoc CondLoc; 4221 for (unsigned i = 1; i < Operands.size(); ++i) 4222 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4223 CondLoc = Operands[i]->getStartLoc(); 4224 return Error(CondLoc, "incorrect condition in IT block; got '" + 4225 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4226 "', but expected '" + 4227 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4228 } 4229 // Check for non-'al' condition codes outside of the IT block. 4230 } else if (isThumbTwo() && MCID.isPredicable() && 4231 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4232 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4233 Inst.getOpcode() != ARM::t2B) 4234 return Error(Loc, "predicated instructions must be in IT block"); 4235 4236 switch (Inst.getOpcode()) { 4237 case ARM::LDRD: 4238 case ARM::LDRD_PRE: 4239 case ARM::LDRD_POST: 4240 case ARM::LDREXD: { 4241 // Rt2 must be Rt + 1. 4242 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4243 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4244 if (Rt2 != Rt + 1) 4245 return Error(Operands[3]->getStartLoc(), 4246 "destination operands must be sequential"); 4247 return false; 4248 } 4249 case ARM::STRD: { 4250 // Rt2 must be Rt + 1. 4251 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4252 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4253 if (Rt2 != Rt + 1) 4254 return Error(Operands[3]->getStartLoc(), 4255 "source operands must be sequential"); 4256 return false; 4257 } 4258 case ARM::STRD_PRE: 4259 case ARM::STRD_POST: 4260 case ARM::STREXD: { 4261 // Rt2 must be Rt + 1. 4262 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4263 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4264 if (Rt2 != Rt + 1) 4265 return Error(Operands[3]->getStartLoc(), 4266 "source operands must be sequential"); 4267 return false; 4268 } 4269 case ARM::SBFX: 4270 case ARM::UBFX: { 4271 // width must be in range [1, 32-lsb] 4272 unsigned lsb = Inst.getOperand(2).getImm(); 4273 unsigned widthm1 = Inst.getOperand(3).getImm(); 4274 if (widthm1 >= 32 - lsb) 4275 return Error(Operands[5]->getStartLoc(), 4276 "bitfield width must be in range [1,32-lsb]"); 4277 return false; 4278 } 4279 case ARM::tLDMIA: { 4280 // If we're parsing Thumb2, the .w variant is available and handles 4281 // most cases that are normally illegal for a Thumb1 LDM 4282 // instruction. We'll make the transformation in processInstruction() 4283 // if necessary. 4284 // 4285 // Thumb LDM instructions are writeback iff the base register is not 4286 // in the register list. 4287 unsigned Rn = Inst.getOperand(0).getReg(); 4288 bool hasWritebackToken = 4289 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4290 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4291 bool listContainsBase; 4292 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4293 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4294 "registers must be in range r0-r7"); 4295 // If we should have writeback, then there should be a '!' token. 4296 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4297 return Error(Operands[2]->getStartLoc(), 4298 "writeback operator '!' expected"); 4299 // If we should not have writeback, there must not be a '!'. This is 4300 // true even for the 32-bit wide encodings. 4301 if (listContainsBase && hasWritebackToken) 4302 return Error(Operands[3]->getStartLoc(), 4303 "writeback operator '!' not allowed when base register " 4304 "in register list"); 4305 4306 break; 4307 } 4308 case ARM::t2LDMIA_UPD: { 4309 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4310 return Error(Operands[4]->getStartLoc(), 4311 "writeback operator '!' not allowed when base register " 4312 "in register list"); 4313 break; 4314 } 4315 case ARM::tPOP: { 4316 bool listContainsBase; 4317 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4318 return Error(Operands[2]->getStartLoc(), 4319 "registers must be in range r0-r7 or pc"); 4320 break; 4321 } 4322 case ARM::tPUSH: { 4323 bool listContainsBase; 4324 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4325 return Error(Operands[2]->getStartLoc(), 4326 "registers must be in range r0-r7 or lr"); 4327 break; 4328 } 4329 case ARM::tSTMIA_UPD: { 4330 bool listContainsBase; 4331 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4332 return Error(Operands[4]->getStartLoc(), 4333 "registers must be in range r0-r7"); 4334 break; 4335 } 4336 } 4337 4338 return false; 4339} 4340 4341void ARMAsmParser:: 4342processInstruction(MCInst &Inst, 4343 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4344 switch (Inst.getOpcode()) { 4345 case ARM::LDMIA_UPD: 4346 // If this is a load of a single register via a 'pop', then we should use 4347 // a post-indexed LDR instruction instead, per the ARM ARM. 4348 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4349 Inst.getNumOperands() == 5) { 4350 MCInst TmpInst; 4351 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4352 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4353 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4354 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4355 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4356 TmpInst.addOperand(MCOperand::CreateImm(4)); 4357 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4358 TmpInst.addOperand(Inst.getOperand(3)); 4359 Inst = TmpInst; 4360 } 4361 break; 4362 case ARM::STMDB_UPD: 4363 // If this is a store of a single register via a 'push', then we should use 4364 // a pre-indexed STR instruction instead, per the ARM ARM. 4365 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4366 Inst.getNumOperands() == 5) { 4367 MCInst TmpInst; 4368 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4369 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4370 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4371 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4372 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4373 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4374 TmpInst.addOperand(Inst.getOperand(3)); 4375 Inst = TmpInst; 4376 } 4377 break; 4378 case ARM::tADDi8: 4379 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4380 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4381 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4382 // to encoding T1 if <Rd> is omitted." 4383 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4384 Inst.setOpcode(ARM::tADDi3); 4385 break; 4386 case ARM::tSUBi8: 4387 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4388 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4389 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4390 // to encoding T1 if <Rd> is omitted." 4391 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4392 Inst.setOpcode(ARM::tSUBi3); 4393 break; 4394 case ARM::tB: 4395 // A Thumb conditional branch outside of an IT block is a tBcc. 4396 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4397 Inst.setOpcode(ARM::tBcc); 4398 break; 4399 case ARM::t2B: 4400 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4401 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4402 Inst.setOpcode(ARM::t2Bcc); 4403 break; 4404 case ARM::t2Bcc: 4405 // If the conditional is AL or we're in an IT block, we really want t2B. 4406 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4407 Inst.setOpcode(ARM::t2B); 4408 break; 4409 case ARM::tBcc: 4410 // If the conditional is AL, we really want tB. 4411 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4412 Inst.setOpcode(ARM::tB); 4413 break; 4414 case ARM::tLDMIA: { 4415 // If the register list contains any high registers, or if the writeback 4416 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4417 // instead if we're in Thumb2. Otherwise, this should have generated 4418 // an error in validateInstruction(). 4419 unsigned Rn = Inst.getOperand(0).getReg(); 4420 bool hasWritebackToken = 4421 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4422 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4423 bool listContainsBase; 4424 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4425 (!listContainsBase && !hasWritebackToken) || 4426 (listContainsBase && hasWritebackToken)) { 4427 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4428 assert (isThumbTwo()); 4429 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4430 // If we're switching to the updating version, we need to insert 4431 // the writeback tied operand. 4432 if (hasWritebackToken) 4433 Inst.insert(Inst.begin(), 4434 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4435 } 4436 break; 4437 } 4438 case ARM::tSTMIA_UPD: { 4439 // If the register list contains any high registers, we need to use 4440 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4441 // should have generated an error in validateInstruction(). 4442 unsigned Rn = Inst.getOperand(0).getReg(); 4443 bool listContainsBase; 4444 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4445 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4446 assert (isThumbTwo()); 4447 Inst.setOpcode(ARM::t2STMIA_UPD); 4448 } 4449 break; 4450 } 4451 case ARM::t2MOVi: { 4452 // If we can use the 16-bit encoding and the user didn't explicitly 4453 // request the 32-bit variant, transform it here. 4454 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4455 Inst.getOperand(1).getImm() <= 255 && 4456 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4457 Inst.getOperand(4).getReg() == ARM::CPSR) || 4458 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4459 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4460 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4461 // The operands aren't in the same order for tMOVi8... 4462 MCInst TmpInst; 4463 TmpInst.setOpcode(ARM::tMOVi8); 4464 TmpInst.addOperand(Inst.getOperand(0)); 4465 TmpInst.addOperand(Inst.getOperand(4)); 4466 TmpInst.addOperand(Inst.getOperand(1)); 4467 TmpInst.addOperand(Inst.getOperand(2)); 4468 TmpInst.addOperand(Inst.getOperand(3)); 4469 Inst = TmpInst; 4470 } 4471 break; 4472 } 4473 case ARM::t2MOVr: { 4474 // If we can use the 16-bit encoding and the user didn't explicitly 4475 // request the 32-bit variant, transform it here. 4476 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4477 isARMLowRegister(Inst.getOperand(1).getReg()) && 4478 Inst.getOperand(2).getImm() == ARMCC::AL && 4479 Inst.getOperand(4).getReg() == ARM::CPSR && 4480 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4481 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4482 // The operands aren't the same for tMOV[S]r... (no cc_out) 4483 MCInst TmpInst; 4484 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4485 TmpInst.addOperand(Inst.getOperand(0)); 4486 TmpInst.addOperand(Inst.getOperand(1)); 4487 TmpInst.addOperand(Inst.getOperand(2)); 4488 TmpInst.addOperand(Inst.getOperand(3)); 4489 Inst = TmpInst; 4490 } 4491 break; 4492 } 4493 case ARM::t2SXTH: 4494 case ARM::t2SXTB: 4495 case ARM::t2UXTH: 4496 case ARM::t2UXTB: { 4497 // If we can use the 16-bit encoding and the user didn't explicitly 4498 // request the 32-bit variant, transform it here. 4499 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4500 isARMLowRegister(Inst.getOperand(1).getReg()) && 4501 Inst.getOperand(2).getImm() == 0 && 4502 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4503 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4504 unsigned NewOpc; 4505 switch (Inst.getOpcode()) { 4506 default: llvm_unreachable("Illegal opcode!"); 4507 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4508 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4509 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4510 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4511 } 4512 // The operands aren't the same for thumb1 (no rotate operand). 4513 MCInst TmpInst; 4514 TmpInst.setOpcode(NewOpc); 4515 TmpInst.addOperand(Inst.getOperand(0)); 4516 TmpInst.addOperand(Inst.getOperand(1)); 4517 TmpInst.addOperand(Inst.getOperand(3)); 4518 TmpInst.addOperand(Inst.getOperand(4)); 4519 Inst = TmpInst; 4520 } 4521 break; 4522 } 4523 case ARM::t2IT: { 4524 // The mask bits for all but the first condition are represented as 4525 // the low bit of the condition code value implies 't'. We currently 4526 // always have 1 implies 't', so XOR toggle the bits if the low bit 4527 // of the condition code is zero. The encoding also expects the low 4528 // bit of the condition to be encoded as bit 4 of the mask operand, 4529 // so mask that in if needed 4530 MCOperand &MO = Inst.getOperand(1); 4531 unsigned Mask = MO.getImm(); 4532 unsigned OrigMask = Mask; 4533 unsigned TZ = CountTrailingZeros_32(Mask); 4534 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4535 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4536 for (unsigned i = 3; i != TZ; --i) 4537 Mask ^= 1 << i; 4538 } else 4539 Mask |= 0x10; 4540 MO.setImm(Mask); 4541 4542 // Set up the IT block state according to the IT instruction we just 4543 // matched. 4544 assert(!inITBlock() && "nested IT blocks?!"); 4545 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4546 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4547 ITState.CurPosition = 0; 4548 ITState.FirstCond = true; 4549 break; 4550 } 4551 } 4552} 4553 4554unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4555 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4556 // suffix depending on whether they're in an IT block or not. 4557 unsigned Opc = Inst.getOpcode(); 4558 MCInstrDesc &MCID = getInstDesc(Opc); 4559 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4560 assert(MCID.hasOptionalDef() && 4561 "optionally flag setting instruction missing optional def operand"); 4562 assert(MCID.NumOperands == Inst.getNumOperands() && 4563 "operand count mismatch!"); 4564 // Find the optional-def operand (cc_out). 4565 unsigned OpNo; 4566 for (OpNo = 0; 4567 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4568 ++OpNo) 4569 ; 4570 // If we're parsing Thumb1, reject it completely. 4571 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4572 return Match_MnemonicFail; 4573 // If we're parsing Thumb2, which form is legal depends on whether we're 4574 // in an IT block. 4575 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4576 !inITBlock()) 4577 return Match_RequiresITBlock; 4578 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4579 inITBlock()) 4580 return Match_RequiresNotITBlock; 4581 } 4582 // Some high-register supporting Thumb1 encodings only allow both registers 4583 // to be from r0-r7 when in Thumb2. 4584 else if (Opc == ARM::tADDhirr && isThumbOne() && 4585 isARMLowRegister(Inst.getOperand(1).getReg()) && 4586 isARMLowRegister(Inst.getOperand(2).getReg())) 4587 return Match_RequiresThumb2; 4588 // Others only require ARMv6 or later. 4589 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4590 isARMLowRegister(Inst.getOperand(0).getReg()) && 4591 isARMLowRegister(Inst.getOperand(1).getReg())) 4592 return Match_RequiresV6; 4593 return Match_Success; 4594} 4595 4596bool ARMAsmParser:: 4597MatchAndEmitInstruction(SMLoc IDLoc, 4598 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4599 MCStreamer &Out) { 4600 MCInst Inst; 4601 unsigned ErrorInfo; 4602 unsigned MatchResult; 4603 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4604 switch (MatchResult) { 4605 default: break; 4606 case Match_Success: 4607 // Context sensitive operand constraints aren't handled by the matcher, 4608 // so check them here. 4609 if (validateInstruction(Inst, Operands)) { 4610 // Still progress the IT block, otherwise one wrong condition causes 4611 // nasty cascading errors. 4612 forwardITPosition(); 4613 return true; 4614 } 4615 4616 // Some instructions need post-processing to, for example, tweak which 4617 // encoding is selected. 4618 processInstruction(Inst, Operands); 4619 4620 // Only move forward at the very end so that everything in validate 4621 // and process gets a consistent answer about whether we're in an IT 4622 // block. 4623 forwardITPosition(); 4624 4625 Out.EmitInstruction(Inst); 4626 return false; 4627 case Match_MissingFeature: 4628 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4629 return true; 4630 case Match_InvalidOperand: { 4631 SMLoc ErrorLoc = IDLoc; 4632 if (ErrorInfo != ~0U) { 4633 if (ErrorInfo >= Operands.size()) 4634 return Error(IDLoc, "too few operands for instruction"); 4635 4636 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4637 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4638 } 4639 4640 return Error(ErrorLoc, "invalid operand for instruction"); 4641 } 4642 case Match_MnemonicFail: 4643 return Error(IDLoc, "invalid instruction"); 4644 case Match_ConversionFail: 4645 // The converter function will have already emited a diagnostic. 4646 return true; 4647 case Match_RequiresNotITBlock: 4648 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4649 case Match_RequiresITBlock: 4650 return Error(IDLoc, "instruction only valid inside IT block"); 4651 case Match_RequiresV6: 4652 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4653 case Match_RequiresThumb2: 4654 return Error(IDLoc, "instruction variant requires Thumb2"); 4655 } 4656 4657 llvm_unreachable("Implement any new match types added!"); 4658 return true; 4659} 4660 4661/// parseDirective parses the arm specific directives 4662bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4663 StringRef IDVal = DirectiveID.getIdentifier(); 4664 if (IDVal == ".word") 4665 return parseDirectiveWord(4, DirectiveID.getLoc()); 4666 else if (IDVal == ".thumb") 4667 return parseDirectiveThumb(DirectiveID.getLoc()); 4668 else if (IDVal == ".thumb_func") 4669 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4670 else if (IDVal == ".code") 4671 return parseDirectiveCode(DirectiveID.getLoc()); 4672 else if (IDVal == ".syntax") 4673 return parseDirectiveSyntax(DirectiveID.getLoc()); 4674 return true; 4675} 4676 4677/// parseDirectiveWord 4678/// ::= .word [ expression (, expression)* ] 4679bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4680 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4681 for (;;) { 4682 const MCExpr *Value; 4683 if (getParser().ParseExpression(Value)) 4684 return true; 4685 4686 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4687 4688 if (getLexer().is(AsmToken::EndOfStatement)) 4689 break; 4690 4691 // FIXME: Improve diagnostic. 4692 if (getLexer().isNot(AsmToken::Comma)) 4693 return Error(L, "unexpected token in directive"); 4694 Parser.Lex(); 4695 } 4696 } 4697 4698 Parser.Lex(); 4699 return false; 4700} 4701 4702/// parseDirectiveThumb 4703/// ::= .thumb 4704bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4705 if (getLexer().isNot(AsmToken::EndOfStatement)) 4706 return Error(L, "unexpected token in directive"); 4707 Parser.Lex(); 4708 4709 // TODO: set thumb mode 4710 // TODO: tell the MC streamer the mode 4711 // getParser().getStreamer().Emit???(); 4712 return false; 4713} 4714 4715/// parseDirectiveThumbFunc 4716/// ::= .thumbfunc symbol_name 4717bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4718 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4719 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4720 StringRef Name; 4721 4722 // Darwin asm has function name after .thumb_func direction 4723 // ELF doesn't 4724 if (isMachO) { 4725 const AsmToken &Tok = Parser.getTok(); 4726 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4727 return Error(L, "unexpected token in .thumb_func directive"); 4728 Name = Tok.getString(); 4729 Parser.Lex(); // Consume the identifier token. 4730 } 4731 4732 if (getLexer().isNot(AsmToken::EndOfStatement)) 4733 return Error(L, "unexpected token in directive"); 4734 Parser.Lex(); 4735 4736 // FIXME: assuming function name will be the line following .thumb_func 4737 if (!isMachO) { 4738 Name = Parser.getTok().getString(); 4739 } 4740 4741 // Mark symbol as a thumb symbol. 4742 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4743 getParser().getStreamer().EmitThumbFunc(Func); 4744 return false; 4745} 4746 4747/// parseDirectiveSyntax 4748/// ::= .syntax unified | divided 4749bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4750 const AsmToken &Tok = Parser.getTok(); 4751 if (Tok.isNot(AsmToken::Identifier)) 4752 return Error(L, "unexpected token in .syntax directive"); 4753 StringRef Mode = Tok.getString(); 4754 if (Mode == "unified" || Mode == "UNIFIED") 4755 Parser.Lex(); 4756 else if (Mode == "divided" || Mode == "DIVIDED") 4757 return Error(L, "'.syntax divided' arm asssembly not supported"); 4758 else 4759 return Error(L, "unrecognized syntax mode in .syntax directive"); 4760 4761 if (getLexer().isNot(AsmToken::EndOfStatement)) 4762 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4763 Parser.Lex(); 4764 4765 // TODO tell the MC streamer the mode 4766 // getParser().getStreamer().Emit???(); 4767 return false; 4768} 4769 4770/// parseDirectiveCode 4771/// ::= .code 16 | 32 4772bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4773 const AsmToken &Tok = Parser.getTok(); 4774 if (Tok.isNot(AsmToken::Integer)) 4775 return Error(L, "unexpected token in .code directive"); 4776 int64_t Val = Parser.getTok().getIntVal(); 4777 if (Val == 16) 4778 Parser.Lex(); 4779 else if (Val == 32) 4780 Parser.Lex(); 4781 else 4782 return Error(L, "invalid operand to .code directive"); 4783 4784 if (getLexer().isNot(AsmToken::EndOfStatement)) 4785 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4786 Parser.Lex(); 4787 4788 if (Val == 16) { 4789 if (!isThumb()) 4790 SwitchMode(); 4791 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4792 } else { 4793 if (isThumb()) 4794 SwitchMode(); 4795 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4796 } 4797 4798 return false; 4799} 4800 4801extern "C" void LLVMInitializeARMAsmLexer(); 4802 4803/// Force static initialization. 4804extern "C" void LLVMInitializeARMAsmParser() { 4805 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4806 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4807 LLVMInitializeARMAsmLexer(); 4808} 4809 4810#define GET_REGISTER_MATCHER 4811#define GET_MATCHER_IMPLEMENTATION 4812#include "ARMGenAsmMatcher.inc" 4813