ARMAsmParser.cpp revision 48b368bcd5fd6d1857de137230ac019b8530f1cd
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42class ARMAsmParser : public MCTargetAsmParser { 43 MCSubtargetInfo &STI; 44 MCAsmParser &Parser; 45 46 struct { 47 ARMCC::CondCodes Cond; // Condition for IT block. 48 unsigned Mask:4; // Condition mask for instructions. 49 // Starting at first 1 (from lsb). 50 // '1' condition as indicated in IT. 51 // '0' inverse of condition (else). 52 // Count of instructions in IT block is 53 // 4 - trailingzeroes(mask) 54 55 bool FirstCond; // Explicit flag for when we're parsing the 56 // First instruction in the IT block. It's 57 // implied in the mask, so needs special 58 // handling. 59 60 unsigned CurPosition; // Current position in parsing of IT 61 // block. In range [0,3]. Initialized 62 // according to count of instructions in block. 63 // ~0U if no active IT block. 64 } ITState; 65 bool inITBlock() { return ITState.CurPosition != ~0U;} 66 void forwardITPosition() { 67 if (!inITBlock()) return; 68 // Move to the next instruction in the IT block, if there is one. If not, 69 // mark the block as done. 70 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 71 if (++ITState.CurPosition == 5 - TZ) 72 ITState.CurPosition = ~0U; // Done with the IT block after this. 73 } 74 75 76 MCAsmParser &getParser() const { return Parser; } 77 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 78 79 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 80 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 81 82 int tryParseRegister(); 83 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 84 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 85 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 88 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 89 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 90 unsigned &ShiftAmount); 91 bool parseDirectiveWord(unsigned Size, SMLoc L); 92 bool parseDirectiveThumb(SMLoc L); 93 bool parseDirectiveThumbFunc(SMLoc L); 94 bool parseDirectiveCode(SMLoc L); 95 bool parseDirectiveSyntax(SMLoc L); 96 97 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 98 bool &CarrySetting, unsigned &ProcessorIMod, 99 StringRef &ITMask); 100 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 101 bool &CanAcceptPredicationCode); 102 103 bool isThumb() const { 104 // FIXME: Can tablegen auto-generate this? 105 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 106 } 107 bool isThumbOne() const { 108 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 109 } 110 bool isThumbTwo() const { 111 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 112 } 113 bool hasV6Ops() const { 114 return STI.getFeatureBits() & ARM::HasV6Ops; 115 } 116 bool hasV7Ops() const { 117 return STI.getFeatureBits() & ARM::HasV7Ops; 118 } 119 void SwitchMode() { 120 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 121 setAvailableFeatures(FB); 122 } 123 bool isMClass() const { 124 return STI.getFeatureBits() & ARM::FeatureMClass; 125 } 126 127 /// @name Auto-generated Match Functions 128 /// { 129 130#define GET_ASSEMBLER_HEADER 131#include "ARMGenAsmMatcher.inc" 132 133 /// } 134 135 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 136 OperandMatchResultTy parseCoprocNumOperand( 137 SmallVectorImpl<MCParsedAsmOperand*>&); 138 OperandMatchResultTy parseCoprocRegOperand( 139 SmallVectorImpl<MCParsedAsmOperand*>&); 140 OperandMatchResultTy parseCoprocOptionOperand( 141 SmallVectorImpl<MCParsedAsmOperand*>&); 142 OperandMatchResultTy parseMemBarrierOptOperand( 143 SmallVectorImpl<MCParsedAsmOperand*>&); 144 OperandMatchResultTy parseProcIFlagsOperand( 145 SmallVectorImpl<MCParsedAsmOperand*>&); 146 OperandMatchResultTy parseMSRMaskOperand( 147 SmallVectorImpl<MCParsedAsmOperand*>&); 148 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 149 StringRef Op, int Low, int High); 150 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 151 return parsePKHImm(O, "lsl", 0, 31); 152 } 153 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 154 return parsePKHImm(O, "asr", 1, 32); 155 } 156 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 157 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 164 165 // Asm Match Converter Methods 166 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 167 const SmallVectorImpl<MCParsedAsmOperand*> &); 168 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 169 const SmallVectorImpl<MCParsedAsmOperand*> &); 170 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 209 bool validateInstruction(MCInst &Inst, 210 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 211 bool processInstruction(MCInst &Inst, 212 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 213 bool shouldOmitCCOutOperand(StringRef Mnemonic, 214 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 215 216public: 217 enum ARMMatchResultTy { 218 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 219 Match_RequiresNotITBlock, 220 Match_RequiresV6, 221 Match_RequiresThumb2 222 }; 223 224 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 225 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 226 MCAsmParserExtension::Initialize(_Parser); 227 228 // Initialize the set of available features. 229 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 230 231 // Not in an ITBlock to start with. 232 ITState.CurPosition = ~0U; 233 } 234 235 // Implementation of the MCTargetAsmParser interface: 236 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 237 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 238 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 239 bool ParseDirective(AsmToken DirectiveID); 240 241 unsigned checkTargetMatchPredicate(MCInst &Inst); 242 243 bool MatchAndEmitInstruction(SMLoc IDLoc, 244 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 245 MCStreamer &Out); 246}; 247} // end anonymous namespace 248 249namespace { 250 251/// ARMOperand - Instances of this class represent a parsed ARM machine 252/// instruction. 253class ARMOperand : public MCParsedAsmOperand { 254 enum KindTy { 255 k_CondCode, 256 k_CCOut, 257 k_ITCondMask, 258 k_CoprocNum, 259 k_CoprocReg, 260 k_CoprocOption, 261 k_Immediate, 262 k_FPImmediate, 263 k_MemBarrierOpt, 264 k_Memory, 265 k_PostIndexRegister, 266 k_MSRMask, 267 k_ProcIFlags, 268 k_VectorIndex, 269 k_Register, 270 k_RegisterList, 271 k_DPRRegisterList, 272 k_SPRRegisterList, 273 k_VectorList, 274 k_ShiftedRegister, 275 k_ShiftedImmediate, 276 k_ShifterImmediate, 277 k_RotateImmediate, 278 k_BitfieldDescriptor, 279 k_Token 280 } Kind; 281 282 SMLoc StartLoc, EndLoc; 283 SmallVector<unsigned, 8> Registers; 284 285 union { 286 struct { 287 ARMCC::CondCodes Val; 288 } CC; 289 290 struct { 291 unsigned Val; 292 } Cop; 293 294 struct { 295 unsigned Val; 296 } CoprocOption; 297 298 struct { 299 unsigned Mask:4; 300 } ITMask; 301 302 struct { 303 ARM_MB::MemBOpt Val; 304 } MBOpt; 305 306 struct { 307 ARM_PROC::IFlags Val; 308 } IFlags; 309 310 struct { 311 unsigned Val; 312 } MMask; 313 314 struct { 315 const char *Data; 316 unsigned Length; 317 } Tok; 318 319 struct { 320 unsigned RegNum; 321 } Reg; 322 323 // A vector register list is a sequential list of 1 to 4 registers. 324 struct { 325 unsigned RegNum; 326 unsigned Count; 327 } VectorList; 328 329 struct { 330 unsigned Val; 331 } VectorIndex; 332 333 struct { 334 const MCExpr *Val; 335 } Imm; 336 337 struct { 338 unsigned Val; // encoded 8-bit representation 339 } FPImm; 340 341 /// Combined record for all forms of ARM address expressions. 342 struct { 343 unsigned BaseRegNum; 344 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 345 // was specified. 346 const MCConstantExpr *OffsetImm; // Offset immediate value 347 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 348 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 349 unsigned ShiftImm; // shift for OffsetReg. 350 unsigned Alignment; // 0 = no alignment specified 351 // n = alignment in bytes (8, 16, or 32) 352 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 353 } Memory; 354 355 struct { 356 unsigned RegNum; 357 bool isAdd; 358 ARM_AM::ShiftOpc ShiftTy; 359 unsigned ShiftImm; 360 } PostIdxReg; 361 362 struct { 363 bool isASR; 364 unsigned Imm; 365 } ShifterImm; 366 struct { 367 ARM_AM::ShiftOpc ShiftTy; 368 unsigned SrcReg; 369 unsigned ShiftReg; 370 unsigned ShiftImm; 371 } RegShiftedReg; 372 struct { 373 ARM_AM::ShiftOpc ShiftTy; 374 unsigned SrcReg; 375 unsigned ShiftImm; 376 } RegShiftedImm; 377 struct { 378 unsigned Imm; 379 } RotImm; 380 struct { 381 unsigned LSB; 382 unsigned Width; 383 } Bitfield; 384 }; 385 386 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 387public: 388 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 389 Kind = o.Kind; 390 StartLoc = o.StartLoc; 391 EndLoc = o.EndLoc; 392 switch (Kind) { 393 case k_CondCode: 394 CC = o.CC; 395 break; 396 case k_ITCondMask: 397 ITMask = o.ITMask; 398 break; 399 case k_Token: 400 Tok = o.Tok; 401 break; 402 case k_CCOut: 403 case k_Register: 404 Reg = o.Reg; 405 break; 406 case k_RegisterList: 407 case k_DPRRegisterList: 408 case k_SPRRegisterList: 409 Registers = o.Registers; 410 break; 411 case k_VectorList: 412 VectorList = o.VectorList; 413 break; 414 case k_CoprocNum: 415 case k_CoprocReg: 416 Cop = o.Cop; 417 break; 418 case k_CoprocOption: 419 CoprocOption = o.CoprocOption; 420 break; 421 case k_Immediate: 422 Imm = o.Imm; 423 break; 424 case k_FPImmediate: 425 FPImm = o.FPImm; 426 break; 427 case k_MemBarrierOpt: 428 MBOpt = o.MBOpt; 429 break; 430 case k_Memory: 431 Memory = o.Memory; 432 break; 433 case k_PostIndexRegister: 434 PostIdxReg = o.PostIdxReg; 435 break; 436 case k_MSRMask: 437 MMask = o.MMask; 438 break; 439 case k_ProcIFlags: 440 IFlags = o.IFlags; 441 break; 442 case k_ShifterImmediate: 443 ShifterImm = o.ShifterImm; 444 break; 445 case k_ShiftedRegister: 446 RegShiftedReg = o.RegShiftedReg; 447 break; 448 case k_ShiftedImmediate: 449 RegShiftedImm = o.RegShiftedImm; 450 break; 451 case k_RotateImmediate: 452 RotImm = o.RotImm; 453 break; 454 case k_BitfieldDescriptor: 455 Bitfield = o.Bitfield; 456 break; 457 case k_VectorIndex: 458 VectorIndex = o.VectorIndex; 459 break; 460 } 461 } 462 463 /// getStartLoc - Get the location of the first token of this operand. 464 SMLoc getStartLoc() const { return StartLoc; } 465 /// getEndLoc - Get the location of the last token of this operand. 466 SMLoc getEndLoc() const { return EndLoc; } 467 468 ARMCC::CondCodes getCondCode() const { 469 assert(Kind == k_CondCode && "Invalid access!"); 470 return CC.Val; 471 } 472 473 unsigned getCoproc() const { 474 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 475 return Cop.Val; 476 } 477 478 StringRef getToken() const { 479 assert(Kind == k_Token && "Invalid access!"); 480 return StringRef(Tok.Data, Tok.Length); 481 } 482 483 unsigned getReg() const { 484 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 485 return Reg.RegNum; 486 } 487 488 const SmallVectorImpl<unsigned> &getRegList() const { 489 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 490 Kind == k_SPRRegisterList) && "Invalid access!"); 491 return Registers; 492 } 493 494 const MCExpr *getImm() const { 495 assert(Kind == k_Immediate && "Invalid access!"); 496 return Imm.Val; 497 } 498 499 unsigned getFPImm() const { 500 assert(Kind == k_FPImmediate && "Invalid access!"); 501 return FPImm.Val; 502 } 503 504 unsigned getVectorIndex() const { 505 assert(Kind == k_VectorIndex && "Invalid access!"); 506 return VectorIndex.Val; 507 } 508 509 ARM_MB::MemBOpt getMemBarrierOpt() const { 510 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 511 return MBOpt.Val; 512 } 513 514 ARM_PROC::IFlags getProcIFlags() const { 515 assert(Kind == k_ProcIFlags && "Invalid access!"); 516 return IFlags.Val; 517 } 518 519 unsigned getMSRMask() const { 520 assert(Kind == k_MSRMask && "Invalid access!"); 521 return MMask.Val; 522 } 523 524 bool isCoprocNum() const { return Kind == k_CoprocNum; } 525 bool isCoprocReg() const { return Kind == k_CoprocReg; } 526 bool isCoprocOption() const { return Kind == k_CoprocOption; } 527 bool isCondCode() const { return Kind == k_CondCode; } 528 bool isCCOut() const { return Kind == k_CCOut; } 529 bool isITMask() const { return Kind == k_ITCondMask; } 530 bool isITCondCode() const { return Kind == k_CondCode; } 531 bool isImm() const { return Kind == k_Immediate; } 532 bool isFPImm() const { return Kind == k_FPImmediate; } 533 bool isImm8s4() const { 534 if (Kind != k_Immediate) 535 return false; 536 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 537 if (!CE) return false; 538 int64_t Value = CE->getValue(); 539 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 540 } 541 bool isImm0_1020s4() const { 542 if (Kind != k_Immediate) 543 return false; 544 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 545 if (!CE) return false; 546 int64_t Value = CE->getValue(); 547 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 548 } 549 bool isImm0_508s4() const { 550 if (Kind != k_Immediate) 551 return false; 552 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 553 if (!CE) return false; 554 int64_t Value = CE->getValue(); 555 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 556 } 557 bool isImm0_255() const { 558 if (Kind != k_Immediate) 559 return false; 560 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 561 if (!CE) return false; 562 int64_t Value = CE->getValue(); 563 return Value >= 0 && Value < 256; 564 } 565 bool isImm0_7() const { 566 if (Kind != k_Immediate) 567 return false; 568 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 569 if (!CE) return false; 570 int64_t Value = CE->getValue(); 571 return Value >= 0 && Value < 8; 572 } 573 bool isImm0_15() const { 574 if (Kind != k_Immediate) 575 return false; 576 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 577 if (!CE) return false; 578 int64_t Value = CE->getValue(); 579 return Value >= 0 && Value < 16; 580 } 581 bool isImm0_31() const { 582 if (Kind != k_Immediate) 583 return false; 584 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 585 if (!CE) return false; 586 int64_t Value = CE->getValue(); 587 return Value >= 0 && Value < 32; 588 } 589 bool isImm1_16() const { 590 if (Kind != k_Immediate) 591 return false; 592 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 593 if (!CE) return false; 594 int64_t Value = CE->getValue(); 595 return Value > 0 && Value < 17; 596 } 597 bool isImm1_32() const { 598 if (Kind != k_Immediate) 599 return false; 600 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 601 if (!CE) return false; 602 int64_t Value = CE->getValue(); 603 return Value > 0 && Value < 33; 604 } 605 bool isImm0_32() const { 606 if (Kind != k_Immediate) 607 return false; 608 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 609 if (!CE) return false; 610 int64_t Value = CE->getValue(); 611 return Value >= 0 && Value < 33; 612 } 613 bool isImm0_65535() const { 614 if (Kind != k_Immediate) 615 return false; 616 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 617 if (!CE) return false; 618 int64_t Value = CE->getValue(); 619 return Value >= 0 && Value < 65536; 620 } 621 bool isImm0_65535Expr() const { 622 if (Kind != k_Immediate) 623 return false; 624 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 625 // If it's not a constant expression, it'll generate a fixup and be 626 // handled later. 627 if (!CE) return true; 628 int64_t Value = CE->getValue(); 629 return Value >= 0 && Value < 65536; 630 } 631 bool isImm24bit() const { 632 if (Kind != k_Immediate) 633 return false; 634 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 635 if (!CE) return false; 636 int64_t Value = CE->getValue(); 637 return Value >= 0 && Value <= 0xffffff; 638 } 639 bool isImmThumbSR() const { 640 if (Kind != k_Immediate) 641 return false; 642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 643 if (!CE) return false; 644 int64_t Value = CE->getValue(); 645 return Value > 0 && Value < 33; 646 } 647 bool isPKHLSLImm() const { 648 if (Kind != k_Immediate) 649 return false; 650 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 651 if (!CE) return false; 652 int64_t Value = CE->getValue(); 653 return Value >= 0 && Value < 32; 654 } 655 bool isPKHASRImm() const { 656 if (Kind != k_Immediate) 657 return false; 658 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 659 if (!CE) return false; 660 int64_t Value = CE->getValue(); 661 return Value > 0 && Value <= 32; 662 } 663 bool isARMSOImm() const { 664 if (Kind != k_Immediate) 665 return false; 666 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 667 if (!CE) return false; 668 int64_t Value = CE->getValue(); 669 return ARM_AM::getSOImmVal(Value) != -1; 670 } 671 bool isARMSOImmNot() const { 672 if (Kind != k_Immediate) 673 return false; 674 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 675 if (!CE) return false; 676 int64_t Value = CE->getValue(); 677 return ARM_AM::getSOImmVal(~Value) != -1; 678 } 679 bool isT2SOImm() const { 680 if (Kind != k_Immediate) 681 return false; 682 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 683 if (!CE) return false; 684 int64_t Value = CE->getValue(); 685 return ARM_AM::getT2SOImmVal(Value) != -1; 686 } 687 bool isT2SOImmNot() const { 688 if (Kind != k_Immediate) 689 return false; 690 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 691 if (!CE) return false; 692 int64_t Value = CE->getValue(); 693 return ARM_AM::getT2SOImmVal(~Value) != -1; 694 } 695 bool isSetEndImm() const { 696 if (Kind != k_Immediate) 697 return false; 698 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 699 if (!CE) return false; 700 int64_t Value = CE->getValue(); 701 return Value == 1 || Value == 0; 702 } 703 bool isReg() const { return Kind == k_Register; } 704 bool isRegList() const { return Kind == k_RegisterList; } 705 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 706 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 707 bool isToken() const { return Kind == k_Token; } 708 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 709 bool isMemory() const { return Kind == k_Memory; } 710 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 711 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 712 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 713 bool isRotImm() const { return Kind == k_RotateImmediate; } 714 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 715 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 716 bool isPostIdxReg() const { 717 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 718 } 719 bool isMemNoOffset(bool alignOK = false) const { 720 if (!isMemory()) 721 return false; 722 // No offset of any kind. 723 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 724 (alignOK || Memory.Alignment == 0); 725 } 726 bool isAlignedMemory() const { 727 return isMemNoOffset(true); 728 } 729 bool isAddrMode2() const { 730 if (!isMemory() || Memory.Alignment != 0) return false; 731 // Check for register offset. 732 if (Memory.OffsetRegNum) return true; 733 // Immediate offset in range [-4095, 4095]. 734 if (!Memory.OffsetImm) return true; 735 int64_t Val = Memory.OffsetImm->getValue(); 736 return Val > -4096 && Val < 4096; 737 } 738 bool isAM2OffsetImm() const { 739 if (Kind != k_Immediate) 740 return false; 741 // Immediate offset in range [-4095, 4095]. 742 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 743 if (!CE) return false; 744 int64_t Val = CE->getValue(); 745 return Val > -4096 && Val < 4096; 746 } 747 bool isAddrMode3() const { 748 if (!isMemory() || Memory.Alignment != 0) return false; 749 // No shifts are legal for AM3. 750 if (Memory.ShiftType != ARM_AM::no_shift) return false; 751 // Check for register offset. 752 if (Memory.OffsetRegNum) return true; 753 // Immediate offset in range [-255, 255]. 754 if (!Memory.OffsetImm) return true; 755 int64_t Val = Memory.OffsetImm->getValue(); 756 return Val > -256 && Val < 256; 757 } 758 bool isAM3Offset() const { 759 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 760 return false; 761 if (Kind == k_PostIndexRegister) 762 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 763 // Immediate offset in range [-255, 255]. 764 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 765 if (!CE) return false; 766 int64_t Val = CE->getValue(); 767 // Special case, #-0 is INT32_MIN. 768 return (Val > -256 && Val < 256) || Val == INT32_MIN; 769 } 770 bool isAddrMode5() const { 771 // If we have an immediate that's not a constant, treat it as a label 772 // reference needing a fixup. If it is a constant, it's something else 773 // and we reject it. 774 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 775 return true; 776 if (!isMemory() || Memory.Alignment != 0) return false; 777 // Check for register offset. 778 if (Memory.OffsetRegNum) return false; 779 // Immediate offset in range [-1020, 1020] and a multiple of 4. 780 if (!Memory.OffsetImm) return true; 781 int64_t Val = Memory.OffsetImm->getValue(); 782 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 783 Val == INT32_MIN; 784 } 785 bool isMemTBB() const { 786 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 787 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 788 return false; 789 return true; 790 } 791 bool isMemTBH() const { 792 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 793 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 794 Memory.Alignment != 0 ) 795 return false; 796 return true; 797 } 798 bool isMemRegOffset() const { 799 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 800 return false; 801 return true; 802 } 803 bool isT2MemRegOffset() const { 804 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 805 Memory.Alignment != 0) 806 return false; 807 // Only lsl #{0, 1, 2, 3} allowed. 808 if (Memory.ShiftType == ARM_AM::no_shift) 809 return true; 810 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 811 return false; 812 return true; 813 } 814 bool isMemThumbRR() const { 815 // Thumb reg+reg addressing is simple. Just two registers, a base and 816 // an offset. No shifts, negations or any other complicating factors. 817 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 818 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 819 return false; 820 return isARMLowRegister(Memory.BaseRegNum) && 821 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 822 } 823 bool isMemThumbRIs4() const { 824 if (!isMemory() || Memory.OffsetRegNum != 0 || 825 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 826 return false; 827 // Immediate offset, multiple of 4 in range [0, 124]. 828 if (!Memory.OffsetImm) return true; 829 int64_t Val = Memory.OffsetImm->getValue(); 830 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 831 } 832 bool isMemThumbRIs2() const { 833 if (!isMemory() || Memory.OffsetRegNum != 0 || 834 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 835 return false; 836 // Immediate offset, multiple of 4 in range [0, 62]. 837 if (!Memory.OffsetImm) return true; 838 int64_t Val = Memory.OffsetImm->getValue(); 839 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 840 } 841 bool isMemThumbRIs1() const { 842 if (!isMemory() || Memory.OffsetRegNum != 0 || 843 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 844 return false; 845 // Immediate offset in range [0, 31]. 846 if (!Memory.OffsetImm) return true; 847 int64_t Val = Memory.OffsetImm->getValue(); 848 return Val >= 0 && Val <= 31; 849 } 850 bool isMemThumbSPI() const { 851 if (!isMemory() || Memory.OffsetRegNum != 0 || 852 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 853 return false; 854 // Immediate offset, multiple of 4 in range [0, 1020]. 855 if (!Memory.OffsetImm) return true; 856 int64_t Val = Memory.OffsetImm->getValue(); 857 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 858 } 859 bool isMemImm8s4Offset() const { 860 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 861 return false; 862 // Immediate offset a multiple of 4 in range [-1020, 1020]. 863 if (!Memory.OffsetImm) return true; 864 int64_t Val = Memory.OffsetImm->getValue(); 865 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 866 } 867 bool isMemImm0_1020s4Offset() const { 868 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 869 return false; 870 // Immediate offset a multiple of 4 in range [0, 1020]. 871 if (!Memory.OffsetImm) return true; 872 int64_t Val = Memory.OffsetImm->getValue(); 873 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 874 } 875 bool isMemImm8Offset() const { 876 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 877 return false; 878 // Immediate offset in range [-255, 255]. 879 if (!Memory.OffsetImm) return true; 880 int64_t Val = Memory.OffsetImm->getValue(); 881 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 882 } 883 bool isMemPosImm8Offset() const { 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [0, 255]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return Val >= 0 && Val < 256; 890 } 891 bool isMemNegImm8Offset() const { 892 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 893 return false; 894 // Immediate offset in range [-255, -1]. 895 if (!Memory.OffsetImm) return true; 896 int64_t Val = Memory.OffsetImm->getValue(); 897 return Val > -256 && Val < 0; 898 } 899 bool isMemUImm12Offset() const { 900 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 901 return false; 902 // Immediate offset in range [0, 4095]. 903 if (!Memory.OffsetImm) return true; 904 int64_t Val = Memory.OffsetImm->getValue(); 905 return (Val >= 0 && Val < 4096); 906 } 907 bool isMemImm12Offset() const { 908 // If we have an immediate that's not a constant, treat it as a label 909 // reference needing a fixup. If it is a constant, it's something else 910 // and we reject it. 911 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 912 return true; 913 914 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 915 return false; 916 // Immediate offset in range [-4095, 4095]. 917 if (!Memory.OffsetImm) return true; 918 int64_t Val = Memory.OffsetImm->getValue(); 919 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 920 } 921 bool isPostIdxImm8() const { 922 if (Kind != k_Immediate) 923 return false; 924 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 925 if (!CE) return false; 926 int64_t Val = CE->getValue(); 927 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 928 } 929 bool isPostIdxImm8s4() const { 930 if (Kind != k_Immediate) 931 return false; 932 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 933 if (!CE) return false; 934 int64_t Val = CE->getValue(); 935 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 936 (Val == INT32_MIN); 937 } 938 939 bool isMSRMask() const { return Kind == k_MSRMask; } 940 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 941 942 // NEON operands. 943 bool isVecListOneD() const { 944 if (Kind != k_VectorList) return false; 945 return VectorList.Count == 1; 946 } 947 948 bool isVecListTwoD() const { 949 if (Kind != k_VectorList) return false; 950 return VectorList.Count == 2; 951 } 952 953 bool isVecListThreeD() const { 954 if (Kind != k_VectorList) return false; 955 return VectorList.Count == 3; 956 } 957 958 bool isVecListFourD() const { 959 if (Kind != k_VectorList) return false; 960 return VectorList.Count == 4; 961 } 962 963 bool isVecListTwoQ() const { 964 if (Kind != k_VectorList) return false; 965 //FIXME: We haven't taught the parser to handle by-two register lists 966 // yet, so don't pretend to know one. 967 return VectorList.Count == 2 && false; 968 } 969 970 bool isVectorIndex8() const { 971 if (Kind != k_VectorIndex) return false; 972 return VectorIndex.Val < 8; 973 } 974 bool isVectorIndex16() const { 975 if (Kind != k_VectorIndex) return false; 976 return VectorIndex.Val < 4; 977 } 978 bool isVectorIndex32() const { 979 if (Kind != k_VectorIndex) return false; 980 return VectorIndex.Val < 2; 981 } 982 983 bool isNEONi8splat() const { 984 if (Kind != k_Immediate) 985 return false; 986 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 987 // Must be a constant. 988 if (!CE) return false; 989 int64_t Value = CE->getValue(); 990 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 991 // value. 992 return Value >= 0 && Value < 256; 993 } 994 995 bool isNEONi16splat() const { 996 if (Kind != k_Immediate) 997 return false; 998 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 999 // Must be a constant. 1000 if (!CE) return false; 1001 int64_t Value = CE->getValue(); 1002 // i16 value in the range [0,255] or [0x0100, 0xff00] 1003 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1004 } 1005 1006 bool isNEONi32splat() const { 1007 if (Kind != k_Immediate) 1008 return false; 1009 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1010 // Must be a constant. 1011 if (!CE) return false; 1012 int64_t Value = CE->getValue(); 1013 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1014 return (Value >= 0 && Value < 256) || 1015 (Value >= 0x0100 && Value <= 0xff00) || 1016 (Value >= 0x010000 && Value <= 0xff0000) || 1017 (Value >= 0x01000000 && Value <= 0xff000000); 1018 } 1019 1020 bool isNEONi32vmov() const { 1021 if (Kind != k_Immediate) 1022 return false; 1023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1024 // Must be a constant. 1025 if (!CE) return false; 1026 int64_t Value = CE->getValue(); 1027 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1028 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1029 return (Value >= 0 && Value < 256) || 1030 (Value >= 0x0100 && Value <= 0xff00) || 1031 (Value >= 0x010000 && Value <= 0xff0000) || 1032 (Value >= 0x01000000 && Value <= 0xff000000) || 1033 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1034 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1035 } 1036 1037 bool isNEONi64splat() const { 1038 if (Kind != k_Immediate) 1039 return false; 1040 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1041 // Must be a constant. 1042 if (!CE) return false; 1043 uint64_t Value = CE->getValue(); 1044 // i64 value with each byte being either 0 or 0xff. 1045 for (unsigned i = 0; i < 8; ++i) 1046 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1047 return true; 1048 } 1049 1050 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1051 // Add as immediates when possible. Null MCExpr = 0. 1052 if (Expr == 0) 1053 Inst.addOperand(MCOperand::CreateImm(0)); 1054 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1055 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1056 else 1057 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1058 } 1059 1060 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1061 assert(N == 2 && "Invalid number of operands!"); 1062 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1063 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1064 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1065 } 1066 1067 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1068 assert(N == 1 && "Invalid number of operands!"); 1069 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1070 } 1071 1072 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1073 assert(N == 1 && "Invalid number of operands!"); 1074 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1075 } 1076 1077 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1078 assert(N == 1 && "Invalid number of operands!"); 1079 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1080 } 1081 1082 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1083 assert(N == 1 && "Invalid number of operands!"); 1084 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1085 } 1086 1087 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1088 assert(N == 1 && "Invalid number of operands!"); 1089 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1090 } 1091 1092 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1093 assert(N == 1 && "Invalid number of operands!"); 1094 Inst.addOperand(MCOperand::CreateReg(getReg())); 1095 } 1096 1097 void addRegOperands(MCInst &Inst, unsigned N) const { 1098 assert(N == 1 && "Invalid number of operands!"); 1099 Inst.addOperand(MCOperand::CreateReg(getReg())); 1100 } 1101 1102 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1103 assert(N == 3 && "Invalid number of operands!"); 1104 assert(isRegShiftedReg() && 1105 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1106 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1107 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1108 Inst.addOperand(MCOperand::CreateImm( 1109 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1110 } 1111 1112 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1113 assert(N == 2 && "Invalid number of operands!"); 1114 assert(isRegShiftedImm() && 1115 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1116 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1117 Inst.addOperand(MCOperand::CreateImm( 1118 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1119 } 1120 1121 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1122 assert(N == 1 && "Invalid number of operands!"); 1123 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1124 ShifterImm.Imm)); 1125 } 1126 1127 void addRegListOperands(MCInst &Inst, unsigned N) const { 1128 assert(N == 1 && "Invalid number of operands!"); 1129 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1130 for (SmallVectorImpl<unsigned>::const_iterator 1131 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1132 Inst.addOperand(MCOperand::CreateReg(*I)); 1133 } 1134 1135 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1136 addRegListOperands(Inst, N); 1137 } 1138 1139 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1140 addRegListOperands(Inst, N); 1141 } 1142 1143 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1144 assert(N == 1 && "Invalid number of operands!"); 1145 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1146 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1147 } 1148 1149 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1150 assert(N == 1 && "Invalid number of operands!"); 1151 // Munge the lsb/width into a bitfield mask. 1152 unsigned lsb = Bitfield.LSB; 1153 unsigned width = Bitfield.Width; 1154 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1155 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1156 (32 - (lsb + width))); 1157 Inst.addOperand(MCOperand::CreateImm(Mask)); 1158 } 1159 1160 void addImmOperands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 addExpr(Inst, getImm()); 1163 } 1164 1165 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1166 assert(N == 1 && "Invalid number of operands!"); 1167 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1168 } 1169 1170 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1171 assert(N == 1 && "Invalid number of operands!"); 1172 // FIXME: We really want to scale the value here, but the LDRD/STRD 1173 // instruction don't encode operands that way yet. 1174 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1175 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1176 } 1177 1178 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1179 assert(N == 1 && "Invalid number of operands!"); 1180 // The immediate is scaled by four in the encoding and is stored 1181 // in the MCInst as such. Lop off the low two bits here. 1182 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1183 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1184 } 1185 1186 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1187 assert(N == 1 && "Invalid number of operands!"); 1188 // The immediate is scaled by four in the encoding and is stored 1189 // in the MCInst as such. Lop off the low two bits here. 1190 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1191 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1192 } 1193 1194 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1195 assert(N == 1 && "Invalid number of operands!"); 1196 // The constant encodes as the immediate-1, and we store in the instruction 1197 // the bits as encoded, so subtract off one here. 1198 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1199 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1200 } 1201 1202 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1203 assert(N == 1 && "Invalid number of operands!"); 1204 // The constant encodes as the immediate-1, and we store in the instruction 1205 // the bits as encoded, so subtract off one here. 1206 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1207 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1208 } 1209 1210 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1211 assert(N == 1 && "Invalid number of operands!"); 1212 // The constant encodes as the immediate, except for 32, which encodes as 1213 // zero. 1214 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1215 unsigned Imm = CE->getValue(); 1216 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1217 } 1218 1219 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1220 assert(N == 1 && "Invalid number of operands!"); 1221 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1222 // the instruction as well. 1223 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1224 int Val = CE->getValue(); 1225 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1226 } 1227 1228 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1229 assert(N == 1 && "Invalid number of operands!"); 1230 // The operand is actually a t2_so_imm, but we have its bitwise 1231 // negation in the assembly source, so twiddle it here. 1232 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1233 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1234 } 1235 1236 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1237 assert(N == 1 && "Invalid number of operands!"); 1238 // The operand is actually a so_imm, but we have its bitwise 1239 // negation in the assembly source, so twiddle it here. 1240 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1241 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1242 } 1243 1244 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1245 assert(N == 1 && "Invalid number of operands!"); 1246 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1247 } 1248 1249 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1250 assert(N == 1 && "Invalid number of operands!"); 1251 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1252 } 1253 1254 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1255 assert(N == 2 && "Invalid number of operands!"); 1256 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1257 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1258 } 1259 1260 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1261 assert(N == 3 && "Invalid number of operands!"); 1262 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1263 if (!Memory.OffsetRegNum) { 1264 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1265 // Special case for #-0 1266 if (Val == INT32_MIN) Val = 0; 1267 if (Val < 0) Val = -Val; 1268 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1269 } else { 1270 // For register offset, we encode the shift type and negation flag 1271 // here. 1272 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1273 Memory.ShiftImm, Memory.ShiftType); 1274 } 1275 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1276 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1277 Inst.addOperand(MCOperand::CreateImm(Val)); 1278 } 1279 1280 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1281 assert(N == 2 && "Invalid number of operands!"); 1282 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1283 assert(CE && "non-constant AM2OffsetImm operand!"); 1284 int32_t Val = CE->getValue(); 1285 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1286 // Special case for #-0 1287 if (Val == INT32_MIN) Val = 0; 1288 if (Val < 0) Val = -Val; 1289 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1290 Inst.addOperand(MCOperand::CreateReg(0)); 1291 Inst.addOperand(MCOperand::CreateImm(Val)); 1292 } 1293 1294 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1295 assert(N == 3 && "Invalid number of operands!"); 1296 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1297 if (!Memory.OffsetRegNum) { 1298 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1299 // Special case for #-0 1300 if (Val == INT32_MIN) Val = 0; 1301 if (Val < 0) Val = -Val; 1302 Val = ARM_AM::getAM3Opc(AddSub, Val); 1303 } else { 1304 // For register offset, we encode the shift type and negation flag 1305 // here. 1306 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1307 } 1308 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1309 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1310 Inst.addOperand(MCOperand::CreateImm(Val)); 1311 } 1312 1313 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1314 assert(N == 2 && "Invalid number of operands!"); 1315 if (Kind == k_PostIndexRegister) { 1316 int32_t Val = 1317 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1318 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1319 Inst.addOperand(MCOperand::CreateImm(Val)); 1320 return; 1321 } 1322 1323 // Constant offset. 1324 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1325 int32_t Val = CE->getValue(); 1326 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1327 // Special case for #-0 1328 if (Val == INT32_MIN) Val = 0; 1329 if (Val < 0) Val = -Val; 1330 Val = ARM_AM::getAM3Opc(AddSub, Val); 1331 Inst.addOperand(MCOperand::CreateReg(0)); 1332 Inst.addOperand(MCOperand::CreateImm(Val)); 1333 } 1334 1335 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1336 assert(N == 2 && "Invalid number of operands!"); 1337 // If we have an immediate that's not a constant, treat it as a label 1338 // reference needing a fixup. If it is a constant, it's something else 1339 // and we reject it. 1340 if (isImm()) { 1341 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1342 Inst.addOperand(MCOperand::CreateImm(0)); 1343 return; 1344 } 1345 1346 // The lower two bits are always zero and as such are not encoded. 1347 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1348 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1349 // Special case for #-0 1350 if (Val == INT32_MIN) Val = 0; 1351 if (Val < 0) Val = -Val; 1352 Val = ARM_AM::getAM5Opc(AddSub, Val); 1353 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1354 Inst.addOperand(MCOperand::CreateImm(Val)); 1355 } 1356 1357 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1358 assert(N == 2 && "Invalid number of operands!"); 1359 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1360 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1361 Inst.addOperand(MCOperand::CreateImm(Val)); 1362 } 1363 1364 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1365 assert(N == 2 && "Invalid number of operands!"); 1366 // The lower two bits are always zero and as such are not encoded. 1367 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1368 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1369 Inst.addOperand(MCOperand::CreateImm(Val)); 1370 } 1371 1372 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1373 assert(N == 2 && "Invalid number of operands!"); 1374 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1375 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1376 Inst.addOperand(MCOperand::CreateImm(Val)); 1377 } 1378 1379 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1380 addMemImm8OffsetOperands(Inst, N); 1381 } 1382 1383 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1384 addMemImm8OffsetOperands(Inst, N); 1385 } 1386 1387 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1388 assert(N == 2 && "Invalid number of operands!"); 1389 // If this is an immediate, it's a label reference. 1390 if (Kind == k_Immediate) { 1391 addExpr(Inst, getImm()); 1392 Inst.addOperand(MCOperand::CreateImm(0)); 1393 return; 1394 } 1395 1396 // Otherwise, it's a normal memory reg+offset. 1397 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1398 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1399 Inst.addOperand(MCOperand::CreateImm(Val)); 1400 } 1401 1402 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1403 assert(N == 2 && "Invalid number of operands!"); 1404 // If this is an immediate, it's a label reference. 1405 if (Kind == k_Immediate) { 1406 addExpr(Inst, getImm()); 1407 Inst.addOperand(MCOperand::CreateImm(0)); 1408 return; 1409 } 1410 1411 // Otherwise, it's a normal memory reg+offset. 1412 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1413 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1414 Inst.addOperand(MCOperand::CreateImm(Val)); 1415 } 1416 1417 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1418 assert(N == 2 && "Invalid number of operands!"); 1419 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1420 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1421 } 1422 1423 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1424 assert(N == 2 && "Invalid number of operands!"); 1425 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1426 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1427 } 1428 1429 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1430 assert(N == 3 && "Invalid number of operands!"); 1431 unsigned Val = 1432 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1433 Memory.ShiftImm, Memory.ShiftType); 1434 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1435 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1436 Inst.addOperand(MCOperand::CreateImm(Val)); 1437 } 1438 1439 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1440 assert(N == 3 && "Invalid number of operands!"); 1441 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1442 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1443 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1444 } 1445 1446 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1447 assert(N == 2 && "Invalid number of operands!"); 1448 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1449 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1450 } 1451 1452 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1453 assert(N == 2 && "Invalid number of operands!"); 1454 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1455 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1456 Inst.addOperand(MCOperand::CreateImm(Val)); 1457 } 1458 1459 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1460 assert(N == 2 && "Invalid number of operands!"); 1461 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1462 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1463 Inst.addOperand(MCOperand::CreateImm(Val)); 1464 } 1465 1466 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1467 assert(N == 2 && "Invalid number of operands!"); 1468 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1469 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1470 Inst.addOperand(MCOperand::CreateImm(Val)); 1471 } 1472 1473 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1474 assert(N == 2 && "Invalid number of operands!"); 1475 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1476 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1477 Inst.addOperand(MCOperand::CreateImm(Val)); 1478 } 1479 1480 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1481 assert(N == 1 && "Invalid number of operands!"); 1482 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1483 assert(CE && "non-constant post-idx-imm8 operand!"); 1484 int Imm = CE->getValue(); 1485 bool isAdd = Imm >= 0; 1486 if (Imm == INT32_MIN) Imm = 0; 1487 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1488 Inst.addOperand(MCOperand::CreateImm(Imm)); 1489 } 1490 1491 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1492 assert(N == 1 && "Invalid number of operands!"); 1493 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1494 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1495 int Imm = CE->getValue(); 1496 bool isAdd = Imm >= 0; 1497 if (Imm == INT32_MIN) Imm = 0; 1498 // Immediate is scaled by 4. 1499 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1500 Inst.addOperand(MCOperand::CreateImm(Imm)); 1501 } 1502 1503 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1504 assert(N == 2 && "Invalid number of operands!"); 1505 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1506 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1507 } 1508 1509 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1510 assert(N == 2 && "Invalid number of operands!"); 1511 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1512 // The sign, shift type, and shift amount are encoded in a single operand 1513 // using the AM2 encoding helpers. 1514 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1515 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1516 PostIdxReg.ShiftTy); 1517 Inst.addOperand(MCOperand::CreateImm(Imm)); 1518 } 1519 1520 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1521 assert(N == 1 && "Invalid number of operands!"); 1522 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1523 } 1524 1525 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1526 assert(N == 1 && "Invalid number of operands!"); 1527 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1528 } 1529 1530 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1531 assert(N == 1 && "Invalid number of operands!"); 1532 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1533 } 1534 1535 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1536 assert(N == 1 && "Invalid number of operands!"); 1537 // Only the first register actually goes on the instruction. The rest 1538 // are implied by the opcode. 1539 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1540 } 1541 1542 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1543 assert(N == 1 && "Invalid number of operands!"); 1544 // Only the first register actually goes on the instruction. The rest 1545 // are implied by the opcode. 1546 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1547 } 1548 1549 void addVecListFourDOperands(MCInst &Inst, unsigned N) const { 1550 assert(N == 1 && "Invalid number of operands!"); 1551 // Only the first register actually goes on the instruction. The rest 1552 // are implied by the opcode. 1553 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1554 } 1555 1556 void addVecListTwoQOperands(MCInst &Inst, unsigned N) const { 1557 assert(N == 1 && "Invalid number of operands!"); 1558 // Only the first register actually goes on the instruction. The rest 1559 // are implied by the opcode. 1560 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1561 } 1562 1563 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1564 assert(N == 1 && "Invalid number of operands!"); 1565 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1566 } 1567 1568 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1569 assert(N == 1 && "Invalid number of operands!"); 1570 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1571 } 1572 1573 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1574 assert(N == 1 && "Invalid number of operands!"); 1575 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1576 } 1577 1578 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1579 assert(N == 1 && "Invalid number of operands!"); 1580 // The immediate encodes the type of constant as well as the value. 1581 // Mask in that this is an i8 splat. 1582 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1583 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1584 } 1585 1586 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1587 assert(N == 1 && "Invalid number of operands!"); 1588 // The immediate encodes the type of constant as well as the value. 1589 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1590 unsigned Value = CE->getValue(); 1591 if (Value >= 256) 1592 Value = (Value >> 8) | 0xa00; 1593 else 1594 Value |= 0x800; 1595 Inst.addOperand(MCOperand::CreateImm(Value)); 1596 } 1597 1598 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1599 assert(N == 1 && "Invalid number of operands!"); 1600 // The immediate encodes the type of constant as well as the value. 1601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1602 unsigned Value = CE->getValue(); 1603 if (Value >= 256 && Value <= 0xff00) 1604 Value = (Value >> 8) | 0x200; 1605 else if (Value > 0xffff && Value <= 0xff0000) 1606 Value = (Value >> 16) | 0x400; 1607 else if (Value > 0xffffff) 1608 Value = (Value >> 24) | 0x600; 1609 Inst.addOperand(MCOperand::CreateImm(Value)); 1610 } 1611 1612 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1613 assert(N == 1 && "Invalid number of operands!"); 1614 // The immediate encodes the type of constant as well as the value. 1615 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1616 unsigned Value = CE->getValue(); 1617 if (Value >= 256 && Value <= 0xffff) 1618 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1619 else if (Value > 0xffff && Value <= 0xffffff) 1620 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1621 else if (Value > 0xffffff) 1622 Value = (Value >> 24) | 0x600; 1623 Inst.addOperand(MCOperand::CreateImm(Value)); 1624 } 1625 1626 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1627 assert(N == 1 && "Invalid number of operands!"); 1628 // The immediate encodes the type of constant as well as the value. 1629 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1630 uint64_t Value = CE->getValue(); 1631 unsigned Imm = 0; 1632 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1633 Imm |= (Value & 1) << i; 1634 } 1635 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1636 } 1637 1638 virtual void print(raw_ostream &OS) const; 1639 1640 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1641 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1642 Op->ITMask.Mask = Mask; 1643 Op->StartLoc = S; 1644 Op->EndLoc = S; 1645 return Op; 1646 } 1647 1648 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1649 ARMOperand *Op = new ARMOperand(k_CondCode); 1650 Op->CC.Val = CC; 1651 Op->StartLoc = S; 1652 Op->EndLoc = S; 1653 return Op; 1654 } 1655 1656 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1657 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1658 Op->Cop.Val = CopVal; 1659 Op->StartLoc = S; 1660 Op->EndLoc = S; 1661 return Op; 1662 } 1663 1664 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1665 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1666 Op->Cop.Val = CopVal; 1667 Op->StartLoc = S; 1668 Op->EndLoc = S; 1669 return Op; 1670 } 1671 1672 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1673 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1674 Op->Cop.Val = Val; 1675 Op->StartLoc = S; 1676 Op->EndLoc = E; 1677 return Op; 1678 } 1679 1680 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1681 ARMOperand *Op = new ARMOperand(k_CCOut); 1682 Op->Reg.RegNum = RegNum; 1683 Op->StartLoc = S; 1684 Op->EndLoc = S; 1685 return Op; 1686 } 1687 1688 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1689 ARMOperand *Op = new ARMOperand(k_Token); 1690 Op->Tok.Data = Str.data(); 1691 Op->Tok.Length = Str.size(); 1692 Op->StartLoc = S; 1693 Op->EndLoc = S; 1694 return Op; 1695 } 1696 1697 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1698 ARMOperand *Op = new ARMOperand(k_Register); 1699 Op->Reg.RegNum = RegNum; 1700 Op->StartLoc = S; 1701 Op->EndLoc = E; 1702 return Op; 1703 } 1704 1705 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1706 unsigned SrcReg, 1707 unsigned ShiftReg, 1708 unsigned ShiftImm, 1709 SMLoc S, SMLoc E) { 1710 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1711 Op->RegShiftedReg.ShiftTy = ShTy; 1712 Op->RegShiftedReg.SrcReg = SrcReg; 1713 Op->RegShiftedReg.ShiftReg = ShiftReg; 1714 Op->RegShiftedReg.ShiftImm = ShiftImm; 1715 Op->StartLoc = S; 1716 Op->EndLoc = E; 1717 return Op; 1718 } 1719 1720 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1721 unsigned SrcReg, 1722 unsigned ShiftImm, 1723 SMLoc S, SMLoc E) { 1724 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1725 Op->RegShiftedImm.ShiftTy = ShTy; 1726 Op->RegShiftedImm.SrcReg = SrcReg; 1727 Op->RegShiftedImm.ShiftImm = ShiftImm; 1728 Op->StartLoc = S; 1729 Op->EndLoc = E; 1730 return Op; 1731 } 1732 1733 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1734 SMLoc S, SMLoc E) { 1735 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1736 Op->ShifterImm.isASR = isASR; 1737 Op->ShifterImm.Imm = Imm; 1738 Op->StartLoc = S; 1739 Op->EndLoc = E; 1740 return Op; 1741 } 1742 1743 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1744 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1745 Op->RotImm.Imm = Imm; 1746 Op->StartLoc = S; 1747 Op->EndLoc = E; 1748 return Op; 1749 } 1750 1751 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1752 SMLoc S, SMLoc E) { 1753 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1754 Op->Bitfield.LSB = LSB; 1755 Op->Bitfield.Width = Width; 1756 Op->StartLoc = S; 1757 Op->EndLoc = E; 1758 return Op; 1759 } 1760 1761 static ARMOperand * 1762 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1763 SMLoc StartLoc, SMLoc EndLoc) { 1764 KindTy Kind = k_RegisterList; 1765 1766 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1767 Kind = k_DPRRegisterList; 1768 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1769 contains(Regs.front().first)) 1770 Kind = k_SPRRegisterList; 1771 1772 ARMOperand *Op = new ARMOperand(Kind); 1773 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1774 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1775 Op->Registers.push_back(I->first); 1776 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1777 Op->StartLoc = StartLoc; 1778 Op->EndLoc = EndLoc; 1779 return Op; 1780 } 1781 1782 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1783 SMLoc S, SMLoc E) { 1784 ARMOperand *Op = new ARMOperand(k_VectorList); 1785 Op->VectorList.RegNum = RegNum; 1786 Op->VectorList.Count = Count; 1787 Op->StartLoc = S; 1788 Op->EndLoc = E; 1789 return Op; 1790 } 1791 1792 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1793 MCContext &Ctx) { 1794 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1795 Op->VectorIndex.Val = Idx; 1796 Op->StartLoc = S; 1797 Op->EndLoc = E; 1798 return Op; 1799 } 1800 1801 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1802 ARMOperand *Op = new ARMOperand(k_Immediate); 1803 Op->Imm.Val = Val; 1804 Op->StartLoc = S; 1805 Op->EndLoc = E; 1806 return Op; 1807 } 1808 1809 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1810 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1811 Op->FPImm.Val = Val; 1812 Op->StartLoc = S; 1813 Op->EndLoc = S; 1814 return Op; 1815 } 1816 1817 static ARMOperand *CreateMem(unsigned BaseRegNum, 1818 const MCConstantExpr *OffsetImm, 1819 unsigned OffsetRegNum, 1820 ARM_AM::ShiftOpc ShiftType, 1821 unsigned ShiftImm, 1822 unsigned Alignment, 1823 bool isNegative, 1824 SMLoc S, SMLoc E) { 1825 ARMOperand *Op = new ARMOperand(k_Memory); 1826 Op->Memory.BaseRegNum = BaseRegNum; 1827 Op->Memory.OffsetImm = OffsetImm; 1828 Op->Memory.OffsetRegNum = OffsetRegNum; 1829 Op->Memory.ShiftType = ShiftType; 1830 Op->Memory.ShiftImm = ShiftImm; 1831 Op->Memory.Alignment = Alignment; 1832 Op->Memory.isNegative = isNegative; 1833 Op->StartLoc = S; 1834 Op->EndLoc = E; 1835 return Op; 1836 } 1837 1838 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1839 ARM_AM::ShiftOpc ShiftTy, 1840 unsigned ShiftImm, 1841 SMLoc S, SMLoc E) { 1842 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1843 Op->PostIdxReg.RegNum = RegNum; 1844 Op->PostIdxReg.isAdd = isAdd; 1845 Op->PostIdxReg.ShiftTy = ShiftTy; 1846 Op->PostIdxReg.ShiftImm = ShiftImm; 1847 Op->StartLoc = S; 1848 Op->EndLoc = E; 1849 return Op; 1850 } 1851 1852 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1853 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1854 Op->MBOpt.Val = Opt; 1855 Op->StartLoc = S; 1856 Op->EndLoc = S; 1857 return Op; 1858 } 1859 1860 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1861 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1862 Op->IFlags.Val = IFlags; 1863 Op->StartLoc = S; 1864 Op->EndLoc = S; 1865 return Op; 1866 } 1867 1868 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1869 ARMOperand *Op = new ARMOperand(k_MSRMask); 1870 Op->MMask.Val = MMask; 1871 Op->StartLoc = S; 1872 Op->EndLoc = S; 1873 return Op; 1874 } 1875}; 1876 1877} // end anonymous namespace. 1878 1879void ARMOperand::print(raw_ostream &OS) const { 1880 switch (Kind) { 1881 case k_FPImmediate: 1882 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1883 << ") >"; 1884 break; 1885 case k_CondCode: 1886 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1887 break; 1888 case k_CCOut: 1889 OS << "<ccout " << getReg() << ">"; 1890 break; 1891 case k_ITCondMask: { 1892 static const char *MaskStr[] = { 1893 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 1894 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 1895 }; 1896 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1897 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1898 break; 1899 } 1900 case k_CoprocNum: 1901 OS << "<coprocessor number: " << getCoproc() << ">"; 1902 break; 1903 case k_CoprocReg: 1904 OS << "<coprocessor register: " << getCoproc() << ">"; 1905 break; 1906 case k_CoprocOption: 1907 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1908 break; 1909 case k_MSRMask: 1910 OS << "<mask: " << getMSRMask() << ">"; 1911 break; 1912 case k_Immediate: 1913 getImm()->print(OS); 1914 break; 1915 case k_MemBarrierOpt: 1916 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1917 break; 1918 case k_Memory: 1919 OS << "<memory " 1920 << " base:" << Memory.BaseRegNum; 1921 OS << ">"; 1922 break; 1923 case k_PostIndexRegister: 1924 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1925 << PostIdxReg.RegNum; 1926 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1927 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1928 << PostIdxReg.ShiftImm; 1929 OS << ">"; 1930 break; 1931 case k_ProcIFlags: { 1932 OS << "<ARM_PROC::"; 1933 unsigned IFlags = getProcIFlags(); 1934 for (int i=2; i >= 0; --i) 1935 if (IFlags & (1 << i)) 1936 OS << ARM_PROC::IFlagsToString(1 << i); 1937 OS << ">"; 1938 break; 1939 } 1940 case k_Register: 1941 OS << "<register " << getReg() << ">"; 1942 break; 1943 case k_ShifterImmediate: 1944 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1945 << " #" << ShifterImm.Imm << ">"; 1946 break; 1947 case k_ShiftedRegister: 1948 OS << "<so_reg_reg " 1949 << RegShiftedReg.SrcReg 1950 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1951 << ", " << RegShiftedReg.ShiftReg << ", " 1952 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1953 << ">"; 1954 break; 1955 case k_ShiftedImmediate: 1956 OS << "<so_reg_imm " 1957 << RegShiftedImm.SrcReg 1958 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1959 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1960 << ">"; 1961 break; 1962 case k_RotateImmediate: 1963 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1964 break; 1965 case k_BitfieldDescriptor: 1966 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1967 << ", width: " << Bitfield.Width << ">"; 1968 break; 1969 case k_RegisterList: 1970 case k_DPRRegisterList: 1971 case k_SPRRegisterList: { 1972 OS << "<register_list "; 1973 1974 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1975 for (SmallVectorImpl<unsigned>::const_iterator 1976 I = RegList.begin(), E = RegList.end(); I != E; ) { 1977 OS << *I; 1978 if (++I < E) OS << ", "; 1979 } 1980 1981 OS << ">"; 1982 break; 1983 } 1984 case k_VectorList: 1985 OS << "<vector_list " << VectorList.Count << " * " 1986 << VectorList.RegNum << ">"; 1987 break; 1988 case k_Token: 1989 OS << "'" << getToken() << "'"; 1990 break; 1991 case k_VectorIndex: 1992 OS << "<vectorindex " << getVectorIndex() << ">"; 1993 break; 1994 } 1995} 1996 1997/// @name Auto-generated Match Functions 1998/// { 1999 2000static unsigned MatchRegisterName(StringRef Name); 2001 2002/// } 2003 2004bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2005 SMLoc &StartLoc, SMLoc &EndLoc) { 2006 RegNo = tryParseRegister(); 2007 2008 return (RegNo == (unsigned)-1); 2009} 2010 2011/// Try to parse a register name. The token must be an Identifier when called, 2012/// and if it is a register name the token is eaten and the register number is 2013/// returned. Otherwise return -1. 2014/// 2015int ARMAsmParser::tryParseRegister() { 2016 const AsmToken &Tok = Parser.getTok(); 2017 if (Tok.isNot(AsmToken::Identifier)) return -1; 2018 2019 // FIXME: Validate register for the current architecture; we have to do 2020 // validation later, so maybe there is no need for this here. 2021 std::string lowerCase = Tok.getString().lower(); 2022 unsigned RegNum = MatchRegisterName(lowerCase); 2023 if (!RegNum) { 2024 RegNum = StringSwitch<unsigned>(lowerCase) 2025 .Case("r13", ARM::SP) 2026 .Case("r14", ARM::LR) 2027 .Case("r15", ARM::PC) 2028 .Case("ip", ARM::R12) 2029 .Default(0); 2030 } 2031 if (!RegNum) return -1; 2032 2033 Parser.Lex(); // Eat identifier token. 2034 2035 return RegNum; 2036} 2037 2038// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2039// If a recoverable error occurs, return 1. If an irrecoverable error 2040// occurs, return -1. An irrecoverable error is one where tokens have been 2041// consumed in the process of trying to parse the shifter (i.e., when it is 2042// indeed a shifter operand, but malformed). 2043int ARMAsmParser::tryParseShiftRegister( 2044 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2045 SMLoc S = Parser.getTok().getLoc(); 2046 const AsmToken &Tok = Parser.getTok(); 2047 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2048 2049 std::string lowerCase = Tok.getString().lower(); 2050 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2051 .Case("lsl", ARM_AM::lsl) 2052 .Case("lsr", ARM_AM::lsr) 2053 .Case("asr", ARM_AM::asr) 2054 .Case("ror", ARM_AM::ror) 2055 .Case("rrx", ARM_AM::rrx) 2056 .Default(ARM_AM::no_shift); 2057 2058 if (ShiftTy == ARM_AM::no_shift) 2059 return 1; 2060 2061 Parser.Lex(); // Eat the operator. 2062 2063 // The source register for the shift has already been added to the 2064 // operand list, so we need to pop it off and combine it into the shifted 2065 // register operand instead. 2066 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2067 if (!PrevOp->isReg()) 2068 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2069 int SrcReg = PrevOp->getReg(); 2070 int64_t Imm = 0; 2071 int ShiftReg = 0; 2072 if (ShiftTy == ARM_AM::rrx) { 2073 // RRX Doesn't have an explicit shift amount. The encoder expects 2074 // the shift register to be the same as the source register. Seems odd, 2075 // but OK. 2076 ShiftReg = SrcReg; 2077 } else { 2078 // Figure out if this is shifted by a constant or a register (for non-RRX). 2079 if (Parser.getTok().is(AsmToken::Hash)) { 2080 Parser.Lex(); // Eat hash. 2081 SMLoc ImmLoc = Parser.getTok().getLoc(); 2082 const MCExpr *ShiftExpr = 0; 2083 if (getParser().ParseExpression(ShiftExpr)) { 2084 Error(ImmLoc, "invalid immediate shift value"); 2085 return -1; 2086 } 2087 // The expression must be evaluatable as an immediate. 2088 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2089 if (!CE) { 2090 Error(ImmLoc, "invalid immediate shift value"); 2091 return -1; 2092 } 2093 // Range check the immediate. 2094 // lsl, ror: 0 <= imm <= 31 2095 // lsr, asr: 0 <= imm <= 32 2096 Imm = CE->getValue(); 2097 if (Imm < 0 || 2098 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2099 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2100 Error(ImmLoc, "immediate shift value out of range"); 2101 return -1; 2102 } 2103 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2104 ShiftReg = tryParseRegister(); 2105 SMLoc L = Parser.getTok().getLoc(); 2106 if (ShiftReg == -1) { 2107 Error (L, "expected immediate or register in shift operand"); 2108 return -1; 2109 } 2110 } else { 2111 Error (Parser.getTok().getLoc(), 2112 "expected immediate or register in shift operand"); 2113 return -1; 2114 } 2115 } 2116 2117 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2118 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2119 ShiftReg, Imm, 2120 S, Parser.getTok().getLoc())); 2121 else 2122 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2123 S, Parser.getTok().getLoc())); 2124 2125 return 0; 2126} 2127 2128 2129/// Try to parse a register name. The token must be an Identifier when called. 2130/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2131/// if there is a "writeback". 'true' if it's not a register. 2132/// 2133/// TODO this is likely to change to allow different register types and or to 2134/// parse for a specific register type. 2135bool ARMAsmParser:: 2136tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2137 SMLoc S = Parser.getTok().getLoc(); 2138 int RegNo = tryParseRegister(); 2139 if (RegNo == -1) 2140 return true; 2141 2142 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2143 2144 const AsmToken &ExclaimTok = Parser.getTok(); 2145 if (ExclaimTok.is(AsmToken::Exclaim)) { 2146 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2147 ExclaimTok.getLoc())); 2148 Parser.Lex(); // Eat exclaim token 2149 return false; 2150 } 2151 2152 // Also check for an index operand. This is only legal for vector registers, 2153 // but that'll get caught OK in operand matching, so we don't need to 2154 // explicitly filter everything else out here. 2155 if (Parser.getTok().is(AsmToken::LBrac)) { 2156 SMLoc SIdx = Parser.getTok().getLoc(); 2157 Parser.Lex(); // Eat left bracket token. 2158 2159 const MCExpr *ImmVal; 2160 if (getParser().ParseExpression(ImmVal)) 2161 return MatchOperand_ParseFail; 2162 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2163 if (!MCE) { 2164 TokError("immediate value expected for vector index"); 2165 return MatchOperand_ParseFail; 2166 } 2167 2168 SMLoc E = Parser.getTok().getLoc(); 2169 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2170 Error(E, "']' expected"); 2171 return MatchOperand_ParseFail; 2172 } 2173 2174 Parser.Lex(); // Eat right bracket token. 2175 2176 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2177 SIdx, E, 2178 getContext())); 2179 } 2180 2181 return false; 2182} 2183 2184/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2185/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2186/// "c5", ... 2187static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2188 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2189 // but efficient. 2190 switch (Name.size()) { 2191 default: break; 2192 case 2: 2193 if (Name[0] != CoprocOp) 2194 return -1; 2195 switch (Name[1]) { 2196 default: return -1; 2197 case '0': return 0; 2198 case '1': return 1; 2199 case '2': return 2; 2200 case '3': return 3; 2201 case '4': return 4; 2202 case '5': return 5; 2203 case '6': return 6; 2204 case '7': return 7; 2205 case '8': return 8; 2206 case '9': return 9; 2207 } 2208 break; 2209 case 3: 2210 if (Name[0] != CoprocOp || Name[1] != '1') 2211 return -1; 2212 switch (Name[2]) { 2213 default: return -1; 2214 case '0': return 10; 2215 case '1': return 11; 2216 case '2': return 12; 2217 case '3': return 13; 2218 case '4': return 14; 2219 case '5': return 15; 2220 } 2221 break; 2222 } 2223 2224 return -1; 2225} 2226 2227/// parseITCondCode - Try to parse a condition code for an IT instruction. 2228ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2229parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2230 SMLoc S = Parser.getTok().getLoc(); 2231 const AsmToken &Tok = Parser.getTok(); 2232 if (!Tok.is(AsmToken::Identifier)) 2233 return MatchOperand_NoMatch; 2234 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2235 .Case("eq", ARMCC::EQ) 2236 .Case("ne", ARMCC::NE) 2237 .Case("hs", ARMCC::HS) 2238 .Case("cs", ARMCC::HS) 2239 .Case("lo", ARMCC::LO) 2240 .Case("cc", ARMCC::LO) 2241 .Case("mi", ARMCC::MI) 2242 .Case("pl", ARMCC::PL) 2243 .Case("vs", ARMCC::VS) 2244 .Case("vc", ARMCC::VC) 2245 .Case("hi", ARMCC::HI) 2246 .Case("ls", ARMCC::LS) 2247 .Case("ge", ARMCC::GE) 2248 .Case("lt", ARMCC::LT) 2249 .Case("gt", ARMCC::GT) 2250 .Case("le", ARMCC::LE) 2251 .Case("al", ARMCC::AL) 2252 .Default(~0U); 2253 if (CC == ~0U) 2254 return MatchOperand_NoMatch; 2255 Parser.Lex(); // Eat the token. 2256 2257 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2258 2259 return MatchOperand_Success; 2260} 2261 2262/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2263/// token must be an Identifier when called, and if it is a coprocessor 2264/// number, the token is eaten and the operand is added to the operand list. 2265ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2266parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2267 SMLoc S = Parser.getTok().getLoc(); 2268 const AsmToken &Tok = Parser.getTok(); 2269 if (Tok.isNot(AsmToken::Identifier)) 2270 return MatchOperand_NoMatch; 2271 2272 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2273 if (Num == -1) 2274 return MatchOperand_NoMatch; 2275 2276 Parser.Lex(); // Eat identifier token. 2277 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2278 return MatchOperand_Success; 2279} 2280 2281/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2282/// token must be an Identifier when called, and if it is a coprocessor 2283/// number, the token is eaten and the operand is added to the operand list. 2284ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2285parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2286 SMLoc S = Parser.getTok().getLoc(); 2287 const AsmToken &Tok = Parser.getTok(); 2288 if (Tok.isNot(AsmToken::Identifier)) 2289 return MatchOperand_NoMatch; 2290 2291 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2292 if (Reg == -1) 2293 return MatchOperand_NoMatch; 2294 2295 Parser.Lex(); // Eat identifier token. 2296 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2297 return MatchOperand_Success; 2298} 2299 2300/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2301/// coproc_option : '{' imm0_255 '}' 2302ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2303parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2304 SMLoc S = Parser.getTok().getLoc(); 2305 2306 // If this isn't a '{', this isn't a coprocessor immediate operand. 2307 if (Parser.getTok().isNot(AsmToken::LCurly)) 2308 return MatchOperand_NoMatch; 2309 Parser.Lex(); // Eat the '{' 2310 2311 const MCExpr *Expr; 2312 SMLoc Loc = Parser.getTok().getLoc(); 2313 if (getParser().ParseExpression(Expr)) { 2314 Error(Loc, "illegal expression"); 2315 return MatchOperand_ParseFail; 2316 } 2317 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2318 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2319 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2320 return MatchOperand_ParseFail; 2321 } 2322 int Val = CE->getValue(); 2323 2324 // Check for and consume the closing '}' 2325 if (Parser.getTok().isNot(AsmToken::RCurly)) 2326 return MatchOperand_ParseFail; 2327 SMLoc E = Parser.getTok().getLoc(); 2328 Parser.Lex(); // Eat the '}' 2329 2330 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2331 return MatchOperand_Success; 2332} 2333 2334// For register list parsing, we need to map from raw GPR register numbering 2335// to the enumeration values. The enumeration values aren't sorted by 2336// register number due to our using "sp", "lr" and "pc" as canonical names. 2337static unsigned getNextRegister(unsigned Reg) { 2338 // If this is a GPR, we need to do it manually, otherwise we can rely 2339 // on the sort ordering of the enumeration since the other reg-classes 2340 // are sane. 2341 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2342 return Reg + 1; 2343 switch(Reg) { 2344 default: assert(0 && "Invalid GPR number!"); 2345 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2346 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2347 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2348 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2349 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2350 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2351 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2352 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2353 } 2354} 2355 2356// Return the low-subreg of a given Q register. 2357static unsigned getDRegFromQReg(unsigned QReg) { 2358 switch (QReg) { 2359 default: llvm_unreachable("expected a Q register!"); 2360 case ARM::Q0: return ARM::D0; 2361 case ARM::Q1: return ARM::D2; 2362 case ARM::Q2: return ARM::D4; 2363 case ARM::Q3: return ARM::D6; 2364 case ARM::Q4: return ARM::D8; 2365 case ARM::Q5: return ARM::D10; 2366 case ARM::Q6: return ARM::D12; 2367 case ARM::Q7: return ARM::D14; 2368 case ARM::Q8: return ARM::D16; 2369 case ARM::Q9: return ARM::D18; 2370 case ARM::Q10: return ARM::D20; 2371 case ARM::Q11: return ARM::D22; 2372 case ARM::Q12: return ARM::D24; 2373 case ARM::Q13: return ARM::D26; 2374 case ARM::Q14: return ARM::D28; 2375 case ARM::Q15: return ARM::D30; 2376 } 2377} 2378 2379/// Parse a register list. 2380bool ARMAsmParser:: 2381parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2382 assert(Parser.getTok().is(AsmToken::LCurly) && 2383 "Token is not a Left Curly Brace"); 2384 SMLoc S = Parser.getTok().getLoc(); 2385 Parser.Lex(); // Eat '{' token. 2386 SMLoc RegLoc = Parser.getTok().getLoc(); 2387 2388 // Check the first register in the list to see what register class 2389 // this is a list of. 2390 int Reg = tryParseRegister(); 2391 if (Reg == -1) 2392 return Error(RegLoc, "register expected"); 2393 2394 // The reglist instructions have at most 16 registers, so reserve 2395 // space for that many. 2396 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2397 2398 // Allow Q regs and just interpret them as the two D sub-registers. 2399 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2400 Reg = getDRegFromQReg(Reg); 2401 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2402 ++Reg; 2403 } 2404 const MCRegisterClass *RC; 2405 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2406 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2407 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2408 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2409 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2410 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2411 else 2412 return Error(RegLoc, "invalid register in register list"); 2413 2414 // Store the register. 2415 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2416 2417 // This starts immediately after the first register token in the list, 2418 // so we can see either a comma or a minus (range separator) as a legal 2419 // next token. 2420 while (Parser.getTok().is(AsmToken::Comma) || 2421 Parser.getTok().is(AsmToken::Minus)) { 2422 if (Parser.getTok().is(AsmToken::Minus)) { 2423 Parser.Lex(); // Eat the minus. 2424 SMLoc EndLoc = Parser.getTok().getLoc(); 2425 int EndReg = tryParseRegister(); 2426 if (EndReg == -1) 2427 return Error(EndLoc, "register expected"); 2428 // Allow Q regs and just interpret them as the two D sub-registers. 2429 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2430 EndReg = getDRegFromQReg(EndReg) + 1; 2431 // If the register is the same as the start reg, there's nothing 2432 // more to do. 2433 if (Reg == EndReg) 2434 continue; 2435 // The register must be in the same register class as the first. 2436 if (!RC->contains(EndReg)) 2437 return Error(EndLoc, "invalid register in register list"); 2438 // Ranges must go from low to high. 2439 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2440 return Error(EndLoc, "bad range in register list"); 2441 2442 // Add all the registers in the range to the register list. 2443 while (Reg != EndReg) { 2444 Reg = getNextRegister(Reg); 2445 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2446 } 2447 continue; 2448 } 2449 Parser.Lex(); // Eat the comma. 2450 RegLoc = Parser.getTok().getLoc(); 2451 int OldReg = Reg; 2452 Reg = tryParseRegister(); 2453 if (Reg == -1) 2454 return Error(RegLoc, "register expected"); 2455 // Allow Q regs and just interpret them as the two D sub-registers. 2456 bool isQReg = false; 2457 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2458 Reg = getDRegFromQReg(Reg); 2459 isQReg = true; 2460 } 2461 // The register must be in the same register class as the first. 2462 if (!RC->contains(Reg)) 2463 return Error(RegLoc, "invalid register in register list"); 2464 // List must be monotonically increasing. 2465 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2466 return Error(RegLoc, "register list not in ascending order"); 2467 // VFP register lists must also be contiguous. 2468 // It's OK to use the enumeration values directly here rather, as the 2469 // VFP register classes have the enum sorted properly. 2470 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2471 Reg != OldReg + 1) 2472 return Error(RegLoc, "non-contiguous register range"); 2473 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2474 if (isQReg) 2475 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2476 } 2477 2478 SMLoc E = Parser.getTok().getLoc(); 2479 if (Parser.getTok().isNot(AsmToken::RCurly)) 2480 return Error(E, "'}' expected"); 2481 Parser.Lex(); // Eat '}' token. 2482 2483 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2484 return false; 2485} 2486 2487// parse a vector register list 2488ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2489parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2490 SMLoc S = Parser.getTok().getLoc(); 2491 // As an extension (to match gas), support a plain D register or Q register 2492 // (without encosing curly braces) as a single or double entry list, 2493 // respectively. 2494 if (Parser.getTok().is(AsmToken::Identifier)) { 2495 int Reg = tryParseRegister(); 2496 if (Reg == -1) 2497 return MatchOperand_NoMatch; 2498 SMLoc E = Parser.getTok().getLoc(); 2499 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 2500 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E)); 2501 return MatchOperand_Success; 2502 } 2503 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2504 Reg = getDRegFromQReg(Reg); 2505 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E)); 2506 return MatchOperand_Success; 2507 } 2508 Error(S, "vector register expected"); 2509 return MatchOperand_ParseFail; 2510 } 2511 2512 if (Parser.getTok().isNot(AsmToken::LCurly)) 2513 return MatchOperand_NoMatch; 2514 2515 Parser.Lex(); // Eat '{' token. 2516 SMLoc RegLoc = Parser.getTok().getLoc(); 2517 2518 int Reg = tryParseRegister(); 2519 if (Reg == -1) { 2520 Error(RegLoc, "register expected"); 2521 return MatchOperand_ParseFail; 2522 } 2523 unsigned Count = 1; 2524 unsigned FirstReg = Reg; 2525 // The list is of D registers, but we also allow Q regs and just interpret 2526 // them as the two D sub-registers. 2527 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2528 FirstReg = Reg = getDRegFromQReg(Reg); 2529 ++Reg; 2530 ++Count; 2531 } 2532 2533 while (Parser.getTok().is(AsmToken::Comma) || 2534 Parser.getTok().is(AsmToken::Minus)) { 2535 if (Parser.getTok().is(AsmToken::Minus)) { 2536 Parser.Lex(); // Eat the minus. 2537 SMLoc EndLoc = Parser.getTok().getLoc(); 2538 int EndReg = tryParseRegister(); 2539 if (EndReg == -1) { 2540 Error(EndLoc, "register expected"); 2541 return MatchOperand_ParseFail; 2542 } 2543 // Allow Q regs and just interpret them as the two D sub-registers. 2544 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2545 EndReg = getDRegFromQReg(EndReg) + 1; 2546 // If the register is the same as the start reg, there's nothing 2547 // more to do. 2548 if (Reg == EndReg) 2549 continue; 2550 // The register must be in the same register class as the first. 2551 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 2552 Error(EndLoc, "invalid register in register list"); 2553 return MatchOperand_ParseFail; 2554 } 2555 // Ranges must go from low to high. 2556 if (Reg > EndReg) { 2557 Error(EndLoc, "bad range in register list"); 2558 return MatchOperand_ParseFail; 2559 } 2560 2561 // Add all the registers in the range to the register list. 2562 Count += EndReg - Reg; 2563 Reg = EndReg; 2564 continue; 2565 } 2566 Parser.Lex(); // Eat the comma. 2567 RegLoc = Parser.getTok().getLoc(); 2568 int OldReg = Reg; 2569 Reg = tryParseRegister(); 2570 if (Reg == -1) { 2571 Error(RegLoc, "register expected"); 2572 return MatchOperand_ParseFail; 2573 } 2574 // vector register lists must be contiguous. 2575 // It's OK to use the enumeration values directly here rather, as the 2576 // VFP register classes have the enum sorted properly. 2577 // 2578 // The list is of D registers, but we also allow Q regs and just interpret 2579 // them as the two D sub-registers. 2580 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2581 Reg = getDRegFromQReg(Reg); 2582 if (Reg != OldReg + 1) { 2583 Error(RegLoc, "non-contiguous register range"); 2584 return MatchOperand_ParseFail; 2585 } 2586 ++Reg; 2587 Count += 2; 2588 continue; 2589 } 2590 // Normal D register. Just check that it's contiguous and keep going. 2591 if (Reg != OldReg + 1) { 2592 Error(RegLoc, "non-contiguous register range"); 2593 return MatchOperand_ParseFail; 2594 } 2595 ++Count; 2596 } 2597 2598 SMLoc E = Parser.getTok().getLoc(); 2599 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2600 Error(E, "'}' expected"); 2601 return MatchOperand_ParseFail; 2602 } 2603 Parser.Lex(); // Eat '}' token. 2604 2605 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2606 return MatchOperand_Success; 2607} 2608 2609/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2610ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2611parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2612 SMLoc S = Parser.getTok().getLoc(); 2613 const AsmToken &Tok = Parser.getTok(); 2614 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2615 StringRef OptStr = Tok.getString(); 2616 2617 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2618 .Case("sy", ARM_MB::SY) 2619 .Case("st", ARM_MB::ST) 2620 .Case("sh", ARM_MB::ISH) 2621 .Case("ish", ARM_MB::ISH) 2622 .Case("shst", ARM_MB::ISHST) 2623 .Case("ishst", ARM_MB::ISHST) 2624 .Case("nsh", ARM_MB::NSH) 2625 .Case("un", ARM_MB::NSH) 2626 .Case("nshst", ARM_MB::NSHST) 2627 .Case("unst", ARM_MB::NSHST) 2628 .Case("osh", ARM_MB::OSH) 2629 .Case("oshst", ARM_MB::OSHST) 2630 .Default(~0U); 2631 2632 if (Opt == ~0U) 2633 return MatchOperand_NoMatch; 2634 2635 Parser.Lex(); // Eat identifier token. 2636 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2637 return MatchOperand_Success; 2638} 2639 2640/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2641ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2642parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2643 SMLoc S = Parser.getTok().getLoc(); 2644 const AsmToken &Tok = Parser.getTok(); 2645 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2646 StringRef IFlagsStr = Tok.getString(); 2647 2648 // An iflags string of "none" is interpreted to mean that none of the AIF 2649 // bits are set. Not a terribly useful instruction, but a valid encoding. 2650 unsigned IFlags = 0; 2651 if (IFlagsStr != "none") { 2652 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2653 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2654 .Case("a", ARM_PROC::A) 2655 .Case("i", ARM_PROC::I) 2656 .Case("f", ARM_PROC::F) 2657 .Default(~0U); 2658 2659 // If some specific iflag is already set, it means that some letter is 2660 // present more than once, this is not acceptable. 2661 if (Flag == ~0U || (IFlags & Flag)) 2662 return MatchOperand_NoMatch; 2663 2664 IFlags |= Flag; 2665 } 2666 } 2667 2668 Parser.Lex(); // Eat identifier token. 2669 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2670 return MatchOperand_Success; 2671} 2672 2673/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2674ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2675parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2676 SMLoc S = Parser.getTok().getLoc(); 2677 const AsmToken &Tok = Parser.getTok(); 2678 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2679 StringRef Mask = Tok.getString(); 2680 2681 if (isMClass()) { 2682 // See ARMv6-M 10.1.1 2683 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2684 .Case("apsr", 0) 2685 .Case("iapsr", 1) 2686 .Case("eapsr", 2) 2687 .Case("xpsr", 3) 2688 .Case("ipsr", 5) 2689 .Case("epsr", 6) 2690 .Case("iepsr", 7) 2691 .Case("msp", 8) 2692 .Case("psp", 9) 2693 .Case("primask", 16) 2694 .Case("basepri", 17) 2695 .Case("basepri_max", 18) 2696 .Case("faultmask", 19) 2697 .Case("control", 20) 2698 .Default(~0U); 2699 2700 if (FlagsVal == ~0U) 2701 return MatchOperand_NoMatch; 2702 2703 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2704 // basepri, basepri_max and faultmask only valid for V7m. 2705 return MatchOperand_NoMatch; 2706 2707 Parser.Lex(); // Eat identifier token. 2708 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2709 return MatchOperand_Success; 2710 } 2711 2712 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2713 size_t Start = 0, Next = Mask.find('_'); 2714 StringRef Flags = ""; 2715 std::string SpecReg = Mask.slice(Start, Next).lower(); 2716 if (Next != StringRef::npos) 2717 Flags = Mask.slice(Next+1, Mask.size()); 2718 2719 // FlagsVal contains the complete mask: 2720 // 3-0: Mask 2721 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2722 unsigned FlagsVal = 0; 2723 2724 if (SpecReg == "apsr") { 2725 FlagsVal = StringSwitch<unsigned>(Flags) 2726 .Case("nzcvq", 0x8) // same as CPSR_f 2727 .Case("g", 0x4) // same as CPSR_s 2728 .Case("nzcvqg", 0xc) // same as CPSR_fs 2729 .Default(~0U); 2730 2731 if (FlagsVal == ~0U) { 2732 if (!Flags.empty()) 2733 return MatchOperand_NoMatch; 2734 else 2735 FlagsVal = 8; // No flag 2736 } 2737 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2738 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2739 Flags = "fc"; 2740 for (int i = 0, e = Flags.size(); i != e; ++i) { 2741 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2742 .Case("c", 1) 2743 .Case("x", 2) 2744 .Case("s", 4) 2745 .Case("f", 8) 2746 .Default(~0U); 2747 2748 // If some specific flag is already set, it means that some letter is 2749 // present more than once, this is not acceptable. 2750 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2751 return MatchOperand_NoMatch; 2752 FlagsVal |= Flag; 2753 } 2754 } else // No match for special register. 2755 return MatchOperand_NoMatch; 2756 2757 // Special register without flags is NOT equivalent to "fc" flags. 2758 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2759 // two lines would enable gas compatibility at the expense of breaking 2760 // round-tripping. 2761 // 2762 // if (!FlagsVal) 2763 // FlagsVal = 0x9; 2764 2765 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2766 if (SpecReg == "spsr") 2767 FlagsVal |= 16; 2768 2769 Parser.Lex(); // Eat identifier token. 2770 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2771 return MatchOperand_Success; 2772} 2773 2774ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2775parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2776 int Low, int High) { 2777 const AsmToken &Tok = Parser.getTok(); 2778 if (Tok.isNot(AsmToken::Identifier)) { 2779 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2780 return MatchOperand_ParseFail; 2781 } 2782 StringRef ShiftName = Tok.getString(); 2783 std::string LowerOp = Op.lower(); 2784 std::string UpperOp = Op.upper(); 2785 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2786 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2787 return MatchOperand_ParseFail; 2788 } 2789 Parser.Lex(); // Eat shift type token. 2790 2791 // There must be a '#' and a shift amount. 2792 if (Parser.getTok().isNot(AsmToken::Hash)) { 2793 Error(Parser.getTok().getLoc(), "'#' expected"); 2794 return MatchOperand_ParseFail; 2795 } 2796 Parser.Lex(); // Eat hash token. 2797 2798 const MCExpr *ShiftAmount; 2799 SMLoc Loc = Parser.getTok().getLoc(); 2800 if (getParser().ParseExpression(ShiftAmount)) { 2801 Error(Loc, "illegal expression"); 2802 return MatchOperand_ParseFail; 2803 } 2804 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2805 if (!CE) { 2806 Error(Loc, "constant expression expected"); 2807 return MatchOperand_ParseFail; 2808 } 2809 int Val = CE->getValue(); 2810 if (Val < Low || Val > High) { 2811 Error(Loc, "immediate value out of range"); 2812 return MatchOperand_ParseFail; 2813 } 2814 2815 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2816 2817 return MatchOperand_Success; 2818} 2819 2820ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2821parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2822 const AsmToken &Tok = Parser.getTok(); 2823 SMLoc S = Tok.getLoc(); 2824 if (Tok.isNot(AsmToken::Identifier)) { 2825 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2826 return MatchOperand_ParseFail; 2827 } 2828 int Val = StringSwitch<int>(Tok.getString()) 2829 .Case("be", 1) 2830 .Case("le", 0) 2831 .Default(-1); 2832 Parser.Lex(); // Eat the token. 2833 2834 if (Val == -1) { 2835 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2836 return MatchOperand_ParseFail; 2837 } 2838 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2839 getContext()), 2840 S, Parser.getTok().getLoc())); 2841 return MatchOperand_Success; 2842} 2843 2844/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2845/// instructions. Legal values are: 2846/// lsl #n 'n' in [0,31] 2847/// asr #n 'n' in [1,32] 2848/// n == 32 encoded as n == 0. 2849ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2850parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2851 const AsmToken &Tok = Parser.getTok(); 2852 SMLoc S = Tok.getLoc(); 2853 if (Tok.isNot(AsmToken::Identifier)) { 2854 Error(S, "shift operator 'asr' or 'lsl' expected"); 2855 return MatchOperand_ParseFail; 2856 } 2857 StringRef ShiftName = Tok.getString(); 2858 bool isASR; 2859 if (ShiftName == "lsl" || ShiftName == "LSL") 2860 isASR = false; 2861 else if (ShiftName == "asr" || ShiftName == "ASR") 2862 isASR = true; 2863 else { 2864 Error(S, "shift operator 'asr' or 'lsl' expected"); 2865 return MatchOperand_ParseFail; 2866 } 2867 Parser.Lex(); // Eat the operator. 2868 2869 // A '#' and a shift amount. 2870 if (Parser.getTok().isNot(AsmToken::Hash)) { 2871 Error(Parser.getTok().getLoc(), "'#' expected"); 2872 return MatchOperand_ParseFail; 2873 } 2874 Parser.Lex(); // Eat hash token. 2875 2876 const MCExpr *ShiftAmount; 2877 SMLoc E = Parser.getTok().getLoc(); 2878 if (getParser().ParseExpression(ShiftAmount)) { 2879 Error(E, "malformed shift expression"); 2880 return MatchOperand_ParseFail; 2881 } 2882 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2883 if (!CE) { 2884 Error(E, "shift amount must be an immediate"); 2885 return MatchOperand_ParseFail; 2886 } 2887 2888 int64_t Val = CE->getValue(); 2889 if (isASR) { 2890 // Shift amount must be in [1,32] 2891 if (Val < 1 || Val > 32) { 2892 Error(E, "'asr' shift amount must be in range [1,32]"); 2893 return MatchOperand_ParseFail; 2894 } 2895 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2896 if (isThumb() && Val == 32) { 2897 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2898 return MatchOperand_ParseFail; 2899 } 2900 if (Val == 32) Val = 0; 2901 } else { 2902 // Shift amount must be in [1,32] 2903 if (Val < 0 || Val > 31) { 2904 Error(E, "'lsr' shift amount must be in range [0,31]"); 2905 return MatchOperand_ParseFail; 2906 } 2907 } 2908 2909 E = Parser.getTok().getLoc(); 2910 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2911 2912 return MatchOperand_Success; 2913} 2914 2915/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2916/// of instructions. Legal values are: 2917/// ror #n 'n' in {0, 8, 16, 24} 2918ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2919parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2920 const AsmToken &Tok = Parser.getTok(); 2921 SMLoc S = Tok.getLoc(); 2922 if (Tok.isNot(AsmToken::Identifier)) 2923 return MatchOperand_NoMatch; 2924 StringRef ShiftName = Tok.getString(); 2925 if (ShiftName != "ror" && ShiftName != "ROR") 2926 return MatchOperand_NoMatch; 2927 Parser.Lex(); // Eat the operator. 2928 2929 // A '#' and a rotate amount. 2930 if (Parser.getTok().isNot(AsmToken::Hash)) { 2931 Error(Parser.getTok().getLoc(), "'#' expected"); 2932 return MatchOperand_ParseFail; 2933 } 2934 Parser.Lex(); // Eat hash token. 2935 2936 const MCExpr *ShiftAmount; 2937 SMLoc E = Parser.getTok().getLoc(); 2938 if (getParser().ParseExpression(ShiftAmount)) { 2939 Error(E, "malformed rotate expression"); 2940 return MatchOperand_ParseFail; 2941 } 2942 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2943 if (!CE) { 2944 Error(E, "rotate amount must be an immediate"); 2945 return MatchOperand_ParseFail; 2946 } 2947 2948 int64_t Val = CE->getValue(); 2949 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2950 // normally, zero is represented in asm by omitting the rotate operand 2951 // entirely. 2952 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2953 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2954 return MatchOperand_ParseFail; 2955 } 2956 2957 E = Parser.getTok().getLoc(); 2958 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2959 2960 return MatchOperand_Success; 2961} 2962 2963ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2964parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2965 SMLoc S = Parser.getTok().getLoc(); 2966 // The bitfield descriptor is really two operands, the LSB and the width. 2967 if (Parser.getTok().isNot(AsmToken::Hash)) { 2968 Error(Parser.getTok().getLoc(), "'#' expected"); 2969 return MatchOperand_ParseFail; 2970 } 2971 Parser.Lex(); // Eat hash token. 2972 2973 const MCExpr *LSBExpr; 2974 SMLoc E = Parser.getTok().getLoc(); 2975 if (getParser().ParseExpression(LSBExpr)) { 2976 Error(E, "malformed immediate expression"); 2977 return MatchOperand_ParseFail; 2978 } 2979 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2980 if (!CE) { 2981 Error(E, "'lsb' operand must be an immediate"); 2982 return MatchOperand_ParseFail; 2983 } 2984 2985 int64_t LSB = CE->getValue(); 2986 // The LSB must be in the range [0,31] 2987 if (LSB < 0 || LSB > 31) { 2988 Error(E, "'lsb' operand must be in the range [0,31]"); 2989 return MatchOperand_ParseFail; 2990 } 2991 E = Parser.getTok().getLoc(); 2992 2993 // Expect another immediate operand. 2994 if (Parser.getTok().isNot(AsmToken::Comma)) { 2995 Error(Parser.getTok().getLoc(), "too few operands"); 2996 return MatchOperand_ParseFail; 2997 } 2998 Parser.Lex(); // Eat hash token. 2999 if (Parser.getTok().isNot(AsmToken::Hash)) { 3000 Error(Parser.getTok().getLoc(), "'#' expected"); 3001 return MatchOperand_ParseFail; 3002 } 3003 Parser.Lex(); // Eat hash token. 3004 3005 const MCExpr *WidthExpr; 3006 if (getParser().ParseExpression(WidthExpr)) { 3007 Error(E, "malformed immediate expression"); 3008 return MatchOperand_ParseFail; 3009 } 3010 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3011 if (!CE) { 3012 Error(E, "'width' operand must be an immediate"); 3013 return MatchOperand_ParseFail; 3014 } 3015 3016 int64_t Width = CE->getValue(); 3017 // The LSB must be in the range [1,32-lsb] 3018 if (Width < 1 || Width > 32 - LSB) { 3019 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3020 return MatchOperand_ParseFail; 3021 } 3022 E = Parser.getTok().getLoc(); 3023 3024 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3025 3026 return MatchOperand_Success; 3027} 3028 3029ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3030parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3031 // Check for a post-index addressing register operand. Specifically: 3032 // postidx_reg := '+' register {, shift} 3033 // | '-' register {, shift} 3034 // | register {, shift} 3035 3036 // This method must return MatchOperand_NoMatch without consuming any tokens 3037 // in the case where there is no match, as other alternatives take other 3038 // parse methods. 3039 AsmToken Tok = Parser.getTok(); 3040 SMLoc S = Tok.getLoc(); 3041 bool haveEaten = false; 3042 bool isAdd = true; 3043 int Reg = -1; 3044 if (Tok.is(AsmToken::Plus)) { 3045 Parser.Lex(); // Eat the '+' token. 3046 haveEaten = true; 3047 } else if (Tok.is(AsmToken::Minus)) { 3048 Parser.Lex(); // Eat the '-' token. 3049 isAdd = false; 3050 haveEaten = true; 3051 } 3052 if (Parser.getTok().is(AsmToken::Identifier)) 3053 Reg = tryParseRegister(); 3054 if (Reg == -1) { 3055 if (!haveEaten) 3056 return MatchOperand_NoMatch; 3057 Error(Parser.getTok().getLoc(), "register expected"); 3058 return MatchOperand_ParseFail; 3059 } 3060 SMLoc E = Parser.getTok().getLoc(); 3061 3062 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3063 unsigned ShiftImm = 0; 3064 if (Parser.getTok().is(AsmToken::Comma)) { 3065 Parser.Lex(); // Eat the ','. 3066 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3067 return MatchOperand_ParseFail; 3068 } 3069 3070 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3071 ShiftImm, S, E)); 3072 3073 return MatchOperand_Success; 3074} 3075 3076ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3077parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3078 // Check for a post-index addressing register operand. Specifically: 3079 // am3offset := '+' register 3080 // | '-' register 3081 // | register 3082 // | # imm 3083 // | # + imm 3084 // | # - imm 3085 3086 // This method must return MatchOperand_NoMatch without consuming any tokens 3087 // in the case where there is no match, as other alternatives take other 3088 // parse methods. 3089 AsmToken Tok = Parser.getTok(); 3090 SMLoc S = Tok.getLoc(); 3091 3092 // Do immediates first, as we always parse those if we have a '#'. 3093 if (Parser.getTok().is(AsmToken::Hash)) { 3094 Parser.Lex(); // Eat the '#'. 3095 // Explicitly look for a '-', as we need to encode negative zero 3096 // differently. 3097 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3098 const MCExpr *Offset; 3099 if (getParser().ParseExpression(Offset)) 3100 return MatchOperand_ParseFail; 3101 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3102 if (!CE) { 3103 Error(S, "constant expression expected"); 3104 return MatchOperand_ParseFail; 3105 } 3106 SMLoc E = Tok.getLoc(); 3107 // Negative zero is encoded as the flag value INT32_MIN. 3108 int32_t Val = CE->getValue(); 3109 if (isNegative && Val == 0) 3110 Val = INT32_MIN; 3111 3112 Operands.push_back( 3113 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3114 3115 return MatchOperand_Success; 3116 } 3117 3118 3119 bool haveEaten = false; 3120 bool isAdd = true; 3121 int Reg = -1; 3122 if (Tok.is(AsmToken::Plus)) { 3123 Parser.Lex(); // Eat the '+' token. 3124 haveEaten = true; 3125 } else if (Tok.is(AsmToken::Minus)) { 3126 Parser.Lex(); // Eat the '-' token. 3127 isAdd = false; 3128 haveEaten = true; 3129 } 3130 if (Parser.getTok().is(AsmToken::Identifier)) 3131 Reg = tryParseRegister(); 3132 if (Reg == -1) { 3133 if (!haveEaten) 3134 return MatchOperand_NoMatch; 3135 Error(Parser.getTok().getLoc(), "register expected"); 3136 return MatchOperand_ParseFail; 3137 } 3138 SMLoc E = Parser.getTok().getLoc(); 3139 3140 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3141 0, S, E)); 3142 3143 return MatchOperand_Success; 3144} 3145 3146/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3147/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3148/// when they refer multiple MIOperands inside a single one. 3149bool ARMAsmParser:: 3150cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3151 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3152 // Rt, Rt2 3153 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3154 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3155 // Create a writeback register dummy placeholder. 3156 Inst.addOperand(MCOperand::CreateReg(0)); 3157 // addr 3158 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3159 // pred 3160 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3161 return true; 3162} 3163 3164/// cvtT2StrdPre - Convert parsed operands to MCInst. 3165/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3166/// when they refer multiple MIOperands inside a single one. 3167bool ARMAsmParser:: 3168cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3169 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3170 // Create a writeback register dummy placeholder. 3171 Inst.addOperand(MCOperand::CreateReg(0)); 3172 // Rt, Rt2 3173 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3174 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3175 // addr 3176 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3177 // pred 3178 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3179 return true; 3180} 3181 3182/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3183/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3184/// when they refer multiple MIOperands inside a single one. 3185bool ARMAsmParser:: 3186cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3187 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3188 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3189 3190 // Create a writeback register dummy placeholder. 3191 Inst.addOperand(MCOperand::CreateImm(0)); 3192 3193 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3194 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3195 return true; 3196} 3197 3198/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3199/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3200/// when they refer multiple MIOperands inside a single one. 3201bool ARMAsmParser:: 3202cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3203 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3204 // Create a writeback register dummy placeholder. 3205 Inst.addOperand(MCOperand::CreateImm(0)); 3206 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3207 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3208 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3209 return true; 3210} 3211 3212/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3213/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3214/// when they refer multiple MIOperands inside a single one. 3215bool ARMAsmParser:: 3216cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3217 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3218 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3219 3220 // Create a writeback register dummy placeholder. 3221 Inst.addOperand(MCOperand::CreateImm(0)); 3222 3223 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3224 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3225 return true; 3226} 3227 3228/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3229/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3230/// when they refer multiple MIOperands inside a single one. 3231bool ARMAsmParser:: 3232cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3233 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3234 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3235 3236 // Create a writeback register dummy placeholder. 3237 Inst.addOperand(MCOperand::CreateImm(0)); 3238 3239 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3240 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3241 return true; 3242} 3243 3244 3245/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3246/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3247/// when they refer multiple MIOperands inside a single one. 3248bool ARMAsmParser:: 3249cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3250 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3251 // Create a writeback register dummy placeholder. 3252 Inst.addOperand(MCOperand::CreateImm(0)); 3253 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3254 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3255 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3256 return true; 3257} 3258 3259/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3260/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3261/// when they refer multiple MIOperands inside a single one. 3262bool ARMAsmParser:: 3263cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3264 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3265 // Create a writeback register dummy placeholder. 3266 Inst.addOperand(MCOperand::CreateImm(0)); 3267 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3268 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3269 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3270 return true; 3271} 3272 3273/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3274/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3275/// when they refer multiple MIOperands inside a single one. 3276bool ARMAsmParser:: 3277cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3278 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3279 // Create a writeback register dummy placeholder. 3280 Inst.addOperand(MCOperand::CreateImm(0)); 3281 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3282 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3283 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3284 return true; 3285} 3286 3287/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3288/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3289/// when they refer multiple MIOperands inside a single one. 3290bool ARMAsmParser:: 3291cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3292 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3293 // Rt 3294 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3295 // Create a writeback register dummy placeholder. 3296 Inst.addOperand(MCOperand::CreateImm(0)); 3297 // addr 3298 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3299 // offset 3300 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3301 // pred 3302 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3303 return true; 3304} 3305 3306/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3307/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3308/// when they refer multiple MIOperands inside a single one. 3309bool ARMAsmParser:: 3310cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3311 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3312 // Rt 3313 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3314 // Create a writeback register dummy placeholder. 3315 Inst.addOperand(MCOperand::CreateImm(0)); 3316 // addr 3317 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3318 // offset 3319 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3320 // pred 3321 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3322 return true; 3323} 3324 3325/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3326/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3327/// when they refer multiple MIOperands inside a single one. 3328bool ARMAsmParser:: 3329cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3330 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3331 // Create a writeback register dummy placeholder. 3332 Inst.addOperand(MCOperand::CreateImm(0)); 3333 // Rt 3334 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3335 // addr 3336 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3337 // offset 3338 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3339 // pred 3340 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3341 return true; 3342} 3343 3344/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3345/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3346/// when they refer multiple MIOperands inside a single one. 3347bool ARMAsmParser:: 3348cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3349 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3350 // Create a writeback register dummy placeholder. 3351 Inst.addOperand(MCOperand::CreateImm(0)); 3352 // Rt 3353 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3354 // addr 3355 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3356 // offset 3357 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3358 // pred 3359 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3360 return true; 3361} 3362 3363/// cvtLdrdPre - Convert parsed operands to MCInst. 3364/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3365/// when they refer multiple MIOperands inside a single one. 3366bool ARMAsmParser:: 3367cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3368 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3369 // Rt, Rt2 3370 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3371 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3372 // Create a writeback register dummy placeholder. 3373 Inst.addOperand(MCOperand::CreateImm(0)); 3374 // addr 3375 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3376 // pred 3377 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3378 return true; 3379} 3380 3381/// cvtStrdPre - Convert parsed operands to MCInst. 3382/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3383/// when they refer multiple MIOperands inside a single one. 3384bool ARMAsmParser:: 3385cvtStrdPre(MCInst &Inst, unsigned Opcode, 3386 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3387 // Create a writeback register dummy placeholder. 3388 Inst.addOperand(MCOperand::CreateImm(0)); 3389 // Rt, Rt2 3390 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3391 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3392 // addr 3393 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3394 // pred 3395 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3396 return true; 3397} 3398 3399/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3400/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3401/// when they refer multiple MIOperands inside a single one. 3402bool ARMAsmParser:: 3403cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3404 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3405 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3406 // Create a writeback register dummy placeholder. 3407 Inst.addOperand(MCOperand::CreateImm(0)); 3408 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3409 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3410 return true; 3411} 3412 3413/// cvtThumbMultiple- Convert parsed operands to MCInst. 3414/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3415/// when they refer multiple MIOperands inside a single one. 3416bool ARMAsmParser:: 3417cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3418 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3419 // The second source operand must be the same register as the destination 3420 // operand. 3421 if (Operands.size() == 6 && 3422 (((ARMOperand*)Operands[3])->getReg() != 3423 ((ARMOperand*)Operands[5])->getReg()) && 3424 (((ARMOperand*)Operands[3])->getReg() != 3425 ((ARMOperand*)Operands[4])->getReg())) { 3426 Error(Operands[3]->getStartLoc(), 3427 "destination register must match source register"); 3428 return false; 3429 } 3430 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3431 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3432 // If we have a three-operand form, make sure to set Rn to be the operand 3433 // that isn't the same as Rd. 3434 unsigned RegOp = 4; 3435 if (Operands.size() == 6 && 3436 ((ARMOperand*)Operands[4])->getReg() == 3437 ((ARMOperand*)Operands[3])->getReg()) 3438 RegOp = 5; 3439 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 3440 Inst.addOperand(Inst.getOperand(0)); 3441 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3442 3443 return true; 3444} 3445 3446bool ARMAsmParser:: 3447cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3448 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3449 // Vd 3450 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3451 // Create a writeback register dummy placeholder. 3452 Inst.addOperand(MCOperand::CreateImm(0)); 3453 // Vn 3454 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3455 // pred 3456 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3457 return true; 3458} 3459 3460bool ARMAsmParser:: 3461cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3462 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3463 // Vd 3464 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3465 // Create a writeback register dummy placeholder. 3466 Inst.addOperand(MCOperand::CreateImm(0)); 3467 // Vn 3468 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3469 // Vm 3470 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3471 // pred 3472 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3473 return true; 3474} 3475 3476bool ARMAsmParser:: 3477cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3478 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3479 // Create a writeback register dummy placeholder. 3480 Inst.addOperand(MCOperand::CreateImm(0)); 3481 // Vn 3482 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3483 // Vt 3484 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3485 // pred 3486 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3487 return true; 3488} 3489 3490bool ARMAsmParser:: 3491cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3492 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3493 // Create a writeback register dummy placeholder. 3494 Inst.addOperand(MCOperand::CreateImm(0)); 3495 // Vn 3496 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3497 // Vm 3498 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3499 // Vt 3500 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3501 // pred 3502 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3503 return true; 3504} 3505 3506/// Parse an ARM memory expression, return false if successful else return true 3507/// or an error. The first token must be a '[' when called. 3508bool ARMAsmParser:: 3509parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3510 SMLoc S, E; 3511 assert(Parser.getTok().is(AsmToken::LBrac) && 3512 "Token is not a Left Bracket"); 3513 S = Parser.getTok().getLoc(); 3514 Parser.Lex(); // Eat left bracket token. 3515 3516 const AsmToken &BaseRegTok = Parser.getTok(); 3517 int BaseRegNum = tryParseRegister(); 3518 if (BaseRegNum == -1) 3519 return Error(BaseRegTok.getLoc(), "register expected"); 3520 3521 // The next token must either be a comma or a closing bracket. 3522 const AsmToken &Tok = Parser.getTok(); 3523 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3524 return Error(Tok.getLoc(), "malformed memory operand"); 3525 3526 if (Tok.is(AsmToken::RBrac)) { 3527 E = Tok.getLoc(); 3528 Parser.Lex(); // Eat right bracket token. 3529 3530 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3531 0, 0, false, S, E)); 3532 3533 // If there's a pre-indexing writeback marker, '!', just add it as a token 3534 // operand. It's rather odd, but syntactically valid. 3535 if (Parser.getTok().is(AsmToken::Exclaim)) { 3536 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3537 Parser.Lex(); // Eat the '!'. 3538 } 3539 3540 return false; 3541 } 3542 3543 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3544 Parser.Lex(); // Eat the comma. 3545 3546 // If we have a ':', it's an alignment specifier. 3547 if (Parser.getTok().is(AsmToken::Colon)) { 3548 Parser.Lex(); // Eat the ':'. 3549 E = Parser.getTok().getLoc(); 3550 3551 const MCExpr *Expr; 3552 if (getParser().ParseExpression(Expr)) 3553 return true; 3554 3555 // The expression has to be a constant. Memory references with relocations 3556 // don't come through here, as they use the <label> forms of the relevant 3557 // instructions. 3558 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3559 if (!CE) 3560 return Error (E, "constant expression expected"); 3561 3562 unsigned Align = 0; 3563 switch (CE->getValue()) { 3564 default: 3565 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3566 case 64: Align = 8; break; 3567 case 128: Align = 16; break; 3568 case 256: Align = 32; break; 3569 } 3570 3571 // Now we should have the closing ']' 3572 E = Parser.getTok().getLoc(); 3573 if (Parser.getTok().isNot(AsmToken::RBrac)) 3574 return Error(E, "']' expected"); 3575 Parser.Lex(); // Eat right bracket token. 3576 3577 // Don't worry about range checking the value here. That's handled by 3578 // the is*() predicates. 3579 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3580 ARM_AM::no_shift, 0, Align, 3581 false, S, E)); 3582 3583 // If there's a pre-indexing writeback marker, '!', just add it as a token 3584 // operand. 3585 if (Parser.getTok().is(AsmToken::Exclaim)) { 3586 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3587 Parser.Lex(); // Eat the '!'. 3588 } 3589 3590 return false; 3591 } 3592 3593 // If we have a '#', it's an immediate offset, else assume it's a register 3594 // offset. Be friendly and also accept a plain integer (without a leading 3595 // hash) for gas compatibility. 3596 if (Parser.getTok().is(AsmToken::Hash) || 3597 Parser.getTok().is(AsmToken::Integer)) { 3598 if (Parser.getTok().is(AsmToken::Hash)) 3599 Parser.Lex(); // Eat the '#'. 3600 E = Parser.getTok().getLoc(); 3601 3602 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3603 const MCExpr *Offset; 3604 if (getParser().ParseExpression(Offset)) 3605 return true; 3606 3607 // The expression has to be a constant. Memory references with relocations 3608 // don't come through here, as they use the <label> forms of the relevant 3609 // instructions. 3610 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3611 if (!CE) 3612 return Error (E, "constant expression expected"); 3613 3614 // If the constant was #-0, represent it as INT32_MIN. 3615 int32_t Val = CE->getValue(); 3616 if (isNegative && Val == 0) 3617 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3618 3619 // Now we should have the closing ']' 3620 E = Parser.getTok().getLoc(); 3621 if (Parser.getTok().isNot(AsmToken::RBrac)) 3622 return Error(E, "']' expected"); 3623 Parser.Lex(); // Eat right bracket token. 3624 3625 // Don't worry about range checking the value here. That's handled by 3626 // the is*() predicates. 3627 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3628 ARM_AM::no_shift, 0, 0, 3629 false, S, E)); 3630 3631 // If there's a pre-indexing writeback marker, '!', just add it as a token 3632 // operand. 3633 if (Parser.getTok().is(AsmToken::Exclaim)) { 3634 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3635 Parser.Lex(); // Eat the '!'. 3636 } 3637 3638 return false; 3639 } 3640 3641 // The register offset is optionally preceded by a '+' or '-' 3642 bool isNegative = false; 3643 if (Parser.getTok().is(AsmToken::Minus)) { 3644 isNegative = true; 3645 Parser.Lex(); // Eat the '-'. 3646 } else if (Parser.getTok().is(AsmToken::Plus)) { 3647 // Nothing to do. 3648 Parser.Lex(); // Eat the '+'. 3649 } 3650 3651 E = Parser.getTok().getLoc(); 3652 int OffsetRegNum = tryParseRegister(); 3653 if (OffsetRegNum == -1) 3654 return Error(E, "register expected"); 3655 3656 // If there's a shift operator, handle it. 3657 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3658 unsigned ShiftImm = 0; 3659 if (Parser.getTok().is(AsmToken::Comma)) { 3660 Parser.Lex(); // Eat the ','. 3661 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3662 return true; 3663 } 3664 3665 // Now we should have the closing ']' 3666 E = Parser.getTok().getLoc(); 3667 if (Parser.getTok().isNot(AsmToken::RBrac)) 3668 return Error(E, "']' expected"); 3669 Parser.Lex(); // Eat right bracket token. 3670 3671 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3672 ShiftType, ShiftImm, 0, isNegative, 3673 S, E)); 3674 3675 // If there's a pre-indexing writeback marker, '!', just add it as a token 3676 // operand. 3677 if (Parser.getTok().is(AsmToken::Exclaim)) { 3678 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3679 Parser.Lex(); // Eat the '!'. 3680 } 3681 3682 return false; 3683} 3684 3685/// parseMemRegOffsetShift - one of these two: 3686/// ( lsl | lsr | asr | ror ) , # shift_amount 3687/// rrx 3688/// return true if it parses a shift otherwise it returns false. 3689bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3690 unsigned &Amount) { 3691 SMLoc Loc = Parser.getTok().getLoc(); 3692 const AsmToken &Tok = Parser.getTok(); 3693 if (Tok.isNot(AsmToken::Identifier)) 3694 return true; 3695 StringRef ShiftName = Tok.getString(); 3696 if (ShiftName == "lsl" || ShiftName == "LSL") 3697 St = ARM_AM::lsl; 3698 else if (ShiftName == "lsr" || ShiftName == "LSR") 3699 St = ARM_AM::lsr; 3700 else if (ShiftName == "asr" || ShiftName == "ASR") 3701 St = ARM_AM::asr; 3702 else if (ShiftName == "ror" || ShiftName == "ROR") 3703 St = ARM_AM::ror; 3704 else if (ShiftName == "rrx" || ShiftName == "RRX") 3705 St = ARM_AM::rrx; 3706 else 3707 return Error(Loc, "illegal shift operator"); 3708 Parser.Lex(); // Eat shift type token. 3709 3710 // rrx stands alone. 3711 Amount = 0; 3712 if (St != ARM_AM::rrx) { 3713 Loc = Parser.getTok().getLoc(); 3714 // A '#' and a shift amount. 3715 const AsmToken &HashTok = Parser.getTok(); 3716 if (HashTok.isNot(AsmToken::Hash)) 3717 return Error(HashTok.getLoc(), "'#' expected"); 3718 Parser.Lex(); // Eat hash token. 3719 3720 const MCExpr *Expr; 3721 if (getParser().ParseExpression(Expr)) 3722 return true; 3723 // Range check the immediate. 3724 // lsl, ror: 0 <= imm <= 31 3725 // lsr, asr: 0 <= imm <= 32 3726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3727 if (!CE) 3728 return Error(Loc, "shift amount must be an immediate"); 3729 int64_t Imm = CE->getValue(); 3730 if (Imm < 0 || 3731 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3732 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3733 return Error(Loc, "immediate shift value out of range"); 3734 Amount = Imm; 3735 } 3736 3737 return false; 3738} 3739 3740/// parseFPImm - A floating point immediate expression operand. 3741ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3742parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3743 SMLoc S = Parser.getTok().getLoc(); 3744 3745 if (Parser.getTok().isNot(AsmToken::Hash)) 3746 return MatchOperand_NoMatch; 3747 3748 // Disambiguate the VMOV forms that can accept an FP immediate. 3749 // vmov.f32 <sreg>, #imm 3750 // vmov.f64 <dreg>, #imm 3751 // vmov.f32 <dreg>, #imm @ vector f32x2 3752 // vmov.f32 <qreg>, #imm @ vector f32x4 3753 // 3754 // There are also the NEON VMOV instructions which expect an 3755 // integer constant. Make sure we don't try to parse an FPImm 3756 // for these: 3757 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3758 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3759 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3760 TyOp->getToken() != ".f64")) 3761 return MatchOperand_NoMatch; 3762 3763 Parser.Lex(); // Eat the '#'. 3764 3765 // Handle negation, as that still comes through as a separate token. 3766 bool isNegative = false; 3767 if (Parser.getTok().is(AsmToken::Minus)) { 3768 isNegative = true; 3769 Parser.Lex(); 3770 } 3771 const AsmToken &Tok = Parser.getTok(); 3772 if (Tok.is(AsmToken::Real)) { 3773 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3774 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3775 // If we had a '-' in front, toggle the sign bit. 3776 IntVal ^= (uint64_t)isNegative << 63; 3777 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3778 Parser.Lex(); // Eat the token. 3779 if (Val == -1) { 3780 TokError("floating point value out of range"); 3781 return MatchOperand_ParseFail; 3782 } 3783 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3784 return MatchOperand_Success; 3785 } 3786 if (Tok.is(AsmToken::Integer)) { 3787 int64_t Val = Tok.getIntVal(); 3788 Parser.Lex(); // Eat the token. 3789 if (Val > 255 || Val < 0) { 3790 TokError("encoded floating point value out of range"); 3791 return MatchOperand_ParseFail; 3792 } 3793 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3794 return MatchOperand_Success; 3795 } 3796 3797 TokError("invalid floating point immediate"); 3798 return MatchOperand_ParseFail; 3799} 3800/// Parse a arm instruction operand. For now this parses the operand regardless 3801/// of the mnemonic. 3802bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3803 StringRef Mnemonic) { 3804 SMLoc S, E; 3805 3806 // Check if the current operand has a custom associated parser, if so, try to 3807 // custom parse the operand, or fallback to the general approach. 3808 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3809 if (ResTy == MatchOperand_Success) 3810 return false; 3811 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3812 // there was a match, but an error occurred, in which case, just return that 3813 // the operand parsing failed. 3814 if (ResTy == MatchOperand_ParseFail) 3815 return true; 3816 3817 switch (getLexer().getKind()) { 3818 default: 3819 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3820 return true; 3821 case AsmToken::Identifier: { 3822 // If this is VMRS, check for the apsr_nzcv operand. 3823 if (!tryParseRegisterWithWriteBack(Operands)) 3824 return false; 3825 int Res = tryParseShiftRegister(Operands); 3826 if (Res == 0) // success 3827 return false; 3828 else if (Res == -1) // irrecoverable error 3829 return true; 3830 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3831 S = Parser.getTok().getLoc(); 3832 Parser.Lex(); 3833 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3834 return false; 3835 } 3836 3837 // Fall though for the Identifier case that is not a register or a 3838 // special name. 3839 } 3840 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 3841 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3842 case AsmToken::String: // quoted label names. 3843 case AsmToken::Dot: { // . as a branch target 3844 // This was not a register so parse other operands that start with an 3845 // identifier (like labels) as expressions and create them as immediates. 3846 const MCExpr *IdVal; 3847 S = Parser.getTok().getLoc(); 3848 if (getParser().ParseExpression(IdVal)) 3849 return true; 3850 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3851 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3852 return false; 3853 } 3854 case AsmToken::LBrac: 3855 return parseMemory(Operands); 3856 case AsmToken::LCurly: 3857 return parseRegisterList(Operands); 3858 case AsmToken::Hash: { 3859 // #42 -> immediate. 3860 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3861 S = Parser.getTok().getLoc(); 3862 Parser.Lex(); 3863 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3864 const MCExpr *ImmVal; 3865 if (getParser().ParseExpression(ImmVal)) 3866 return true; 3867 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3868 if (CE) { 3869 int32_t Val = CE->getValue(); 3870 if (isNegative && Val == 0) 3871 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3872 } 3873 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3874 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3875 return false; 3876 } 3877 case AsmToken::Colon: { 3878 // ":lower16:" and ":upper16:" expression prefixes 3879 // FIXME: Check it's an expression prefix, 3880 // e.g. (FOO - :lower16:BAR) isn't legal. 3881 ARMMCExpr::VariantKind RefKind; 3882 if (parsePrefix(RefKind)) 3883 return true; 3884 3885 const MCExpr *SubExprVal; 3886 if (getParser().ParseExpression(SubExprVal)) 3887 return true; 3888 3889 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3890 getContext()); 3891 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3892 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3893 return false; 3894 } 3895 } 3896} 3897 3898// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3899// :lower16: and :upper16:. 3900bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3901 RefKind = ARMMCExpr::VK_ARM_None; 3902 3903 // :lower16: and :upper16: modifiers 3904 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3905 Parser.Lex(); // Eat ':' 3906 3907 if (getLexer().isNot(AsmToken::Identifier)) { 3908 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3909 return true; 3910 } 3911 3912 StringRef IDVal = Parser.getTok().getIdentifier(); 3913 if (IDVal == "lower16") { 3914 RefKind = ARMMCExpr::VK_ARM_LO16; 3915 } else if (IDVal == "upper16") { 3916 RefKind = ARMMCExpr::VK_ARM_HI16; 3917 } else { 3918 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3919 return true; 3920 } 3921 Parser.Lex(); 3922 3923 if (getLexer().isNot(AsmToken::Colon)) { 3924 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3925 return true; 3926 } 3927 Parser.Lex(); // Eat the last ':' 3928 return false; 3929} 3930 3931/// \brief Given a mnemonic, split out possible predication code and carry 3932/// setting letters to form a canonical mnemonic and flags. 3933// 3934// FIXME: Would be nice to autogen this. 3935// FIXME: This is a bit of a maze of special cases. 3936StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3937 unsigned &PredicationCode, 3938 bool &CarrySetting, 3939 unsigned &ProcessorIMod, 3940 StringRef &ITMask) { 3941 PredicationCode = ARMCC::AL; 3942 CarrySetting = false; 3943 ProcessorIMod = 0; 3944 3945 // Ignore some mnemonics we know aren't predicated forms. 3946 // 3947 // FIXME: Would be nice to autogen this. 3948 if ((Mnemonic == "movs" && isThumb()) || 3949 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3950 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3951 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3952 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3953 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3954 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3955 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3956 return Mnemonic; 3957 3958 // First, split out any predication code. Ignore mnemonics we know aren't 3959 // predicated but do have a carry-set and so weren't caught above. 3960 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3961 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3962 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3963 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3964 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3965 .Case("eq", ARMCC::EQ) 3966 .Case("ne", ARMCC::NE) 3967 .Case("hs", ARMCC::HS) 3968 .Case("cs", ARMCC::HS) 3969 .Case("lo", ARMCC::LO) 3970 .Case("cc", ARMCC::LO) 3971 .Case("mi", ARMCC::MI) 3972 .Case("pl", ARMCC::PL) 3973 .Case("vs", ARMCC::VS) 3974 .Case("vc", ARMCC::VC) 3975 .Case("hi", ARMCC::HI) 3976 .Case("ls", ARMCC::LS) 3977 .Case("ge", ARMCC::GE) 3978 .Case("lt", ARMCC::LT) 3979 .Case("gt", ARMCC::GT) 3980 .Case("le", ARMCC::LE) 3981 .Case("al", ARMCC::AL) 3982 .Default(~0U); 3983 if (CC != ~0U) { 3984 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3985 PredicationCode = CC; 3986 } 3987 } 3988 3989 // Next, determine if we have a carry setting bit. We explicitly ignore all 3990 // the instructions we know end in 's'. 3991 if (Mnemonic.endswith("s") && 3992 !(Mnemonic == "cps" || Mnemonic == "mls" || 3993 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3994 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3995 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3996 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3997 (Mnemonic == "movs" && isThumb()))) { 3998 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3999 CarrySetting = true; 4000 } 4001 4002 // The "cps" instruction can have a interrupt mode operand which is glued into 4003 // the mnemonic. Check if this is the case, split it and parse the imod op 4004 if (Mnemonic.startswith("cps")) { 4005 // Split out any imod code. 4006 unsigned IMod = 4007 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4008 .Case("ie", ARM_PROC::IE) 4009 .Case("id", ARM_PROC::ID) 4010 .Default(~0U); 4011 if (IMod != ~0U) { 4012 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4013 ProcessorIMod = IMod; 4014 } 4015 } 4016 4017 // The "it" instruction has the condition mask on the end of the mnemonic. 4018 if (Mnemonic.startswith("it")) { 4019 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4020 Mnemonic = Mnemonic.slice(0, 2); 4021 } 4022 4023 return Mnemonic; 4024} 4025 4026/// \brief Given a canonical mnemonic, determine if the instruction ever allows 4027/// inclusion of carry set or predication code operands. 4028// 4029// FIXME: It would be nice to autogen this. 4030void ARMAsmParser:: 4031getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4032 bool &CanAcceptPredicationCode) { 4033 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4034 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4035 Mnemonic == "add" || Mnemonic == "adc" || 4036 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4037 Mnemonic == "orr" || Mnemonic == "mvn" || 4038 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4039 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4040 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4041 Mnemonic == "mla" || Mnemonic == "smlal" || 4042 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4043 CanAcceptCarrySet = true; 4044 } else 4045 CanAcceptCarrySet = false; 4046 4047 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4048 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4049 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4050 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4051 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4052 (Mnemonic == "clrex" && !isThumb()) || 4053 (Mnemonic == "nop" && isThumbOne()) || 4054 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4055 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4056 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4057 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4058 !isThumb()) || 4059 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4060 CanAcceptPredicationCode = false; 4061 } else 4062 CanAcceptPredicationCode = true; 4063 4064 if (isThumb()) { 4065 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4066 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4067 CanAcceptPredicationCode = false; 4068 } 4069} 4070 4071bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4072 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4073 // FIXME: This is all horribly hacky. We really need a better way to deal 4074 // with optional operands like this in the matcher table. 4075 4076 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4077 // another does not. Specifically, the MOVW instruction does not. So we 4078 // special case it here and remove the defaulted (non-setting) cc_out 4079 // operand if that's the instruction we're trying to match. 4080 // 4081 // We do this as post-processing of the explicit operands rather than just 4082 // conditionally adding the cc_out in the first place because we need 4083 // to check the type of the parsed immediate operand. 4084 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4085 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4086 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4087 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4088 return true; 4089 4090 // Register-register 'add' for thumb does not have a cc_out operand 4091 // when there are only two register operands. 4092 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4093 static_cast<ARMOperand*>(Operands[3])->isReg() && 4094 static_cast<ARMOperand*>(Operands[4])->isReg() && 4095 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4096 return true; 4097 // Register-register 'add' for thumb does not have a cc_out operand 4098 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4099 // have to check the immediate range here since Thumb2 has a variant 4100 // that can handle a different range and has a cc_out operand. 4101 if (((isThumb() && Mnemonic == "add") || 4102 (isThumbTwo() && Mnemonic == "sub")) && 4103 Operands.size() == 6 && 4104 static_cast<ARMOperand*>(Operands[3])->isReg() && 4105 static_cast<ARMOperand*>(Operands[4])->isReg() && 4106 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4107 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4108 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4109 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4110 return true; 4111 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4112 // imm0_4095 variant. That's the least-preferred variant when 4113 // selecting via the generic "add" mnemonic, so to know that we 4114 // should remove the cc_out operand, we have to explicitly check that 4115 // it's not one of the other variants. Ugh. 4116 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4117 Operands.size() == 6 && 4118 static_cast<ARMOperand*>(Operands[3])->isReg() && 4119 static_cast<ARMOperand*>(Operands[4])->isReg() && 4120 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4121 // Nest conditions rather than one big 'if' statement for readability. 4122 // 4123 // If either register is a high reg, it's either one of the SP 4124 // variants (handled above) or a 32-bit encoding, so we just 4125 // check against T3. 4126 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4127 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4128 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4129 return false; 4130 // If both registers are low, we're in an IT block, and the immediate is 4131 // in range, we should use encoding T1 instead, which has a cc_out. 4132 if (inITBlock() && 4133 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4134 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4135 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4136 return false; 4137 4138 // Otherwise, we use encoding T4, which does not have a cc_out 4139 // operand. 4140 return true; 4141 } 4142 4143 // The thumb2 multiply instruction doesn't have a CCOut register, so 4144 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4145 // use the 16-bit encoding or not. 4146 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4147 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4148 static_cast<ARMOperand*>(Operands[3])->isReg() && 4149 static_cast<ARMOperand*>(Operands[4])->isReg() && 4150 static_cast<ARMOperand*>(Operands[5])->isReg() && 4151 // If the registers aren't low regs, the destination reg isn't the 4152 // same as one of the source regs, or the cc_out operand is zero 4153 // outside of an IT block, we have to use the 32-bit encoding, so 4154 // remove the cc_out operand. 4155 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4156 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4157 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4158 !inITBlock() || 4159 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4160 static_cast<ARMOperand*>(Operands[5])->getReg() && 4161 static_cast<ARMOperand*>(Operands[3])->getReg() != 4162 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4163 return true; 4164 4165 // Also check the 'mul' syntax variant that doesn't specify an explicit 4166 // destination register. 4167 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4168 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4169 static_cast<ARMOperand*>(Operands[3])->isReg() && 4170 static_cast<ARMOperand*>(Operands[4])->isReg() && 4171 // If the registers aren't low regs or the cc_out operand is zero 4172 // outside of an IT block, we have to use the 32-bit encoding, so 4173 // remove the cc_out operand. 4174 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4175 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4176 !inITBlock())) 4177 return true; 4178 4179 4180 4181 // Register-register 'add/sub' for thumb does not have a cc_out operand 4182 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4183 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4184 // right, this will result in better diagnostics (which operand is off) 4185 // anyway. 4186 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4187 (Operands.size() == 5 || Operands.size() == 6) && 4188 static_cast<ARMOperand*>(Operands[3])->isReg() && 4189 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4190 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4191 return true; 4192 4193 return false; 4194} 4195 4196static bool isDataTypeToken(StringRef Tok) { 4197 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4198 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4199 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4200 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4201 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4202 Tok == ".f" || Tok == ".d"; 4203} 4204 4205// FIXME: This bit should probably be handled via an explicit match class 4206// in the .td files that matches the suffix instead of having it be 4207// a literal string token the way it is now. 4208static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4209 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4210} 4211 4212/// Parse an arm instruction mnemonic followed by its operands. 4213bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4214 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4215 // Create the leading tokens for the mnemonic, split by '.' characters. 4216 size_t Start = 0, Next = Name.find('.'); 4217 StringRef Mnemonic = Name.slice(Start, Next); 4218 4219 // Split out the predication code and carry setting flag from the mnemonic. 4220 unsigned PredicationCode; 4221 unsigned ProcessorIMod; 4222 bool CarrySetting; 4223 StringRef ITMask; 4224 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4225 ProcessorIMod, ITMask); 4226 4227 // In Thumb1, only the branch (B) instruction can be predicated. 4228 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4229 Parser.EatToEndOfStatement(); 4230 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4231 } 4232 4233 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4234 4235 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4236 // is the mask as it will be for the IT encoding if the conditional 4237 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4238 // where the conditional bit0 is zero, the instruction post-processing 4239 // will adjust the mask accordingly. 4240 if (Mnemonic == "it") { 4241 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4242 if (ITMask.size() > 3) { 4243 Parser.EatToEndOfStatement(); 4244 return Error(Loc, "too many conditions on IT instruction"); 4245 } 4246 unsigned Mask = 8; 4247 for (unsigned i = ITMask.size(); i != 0; --i) { 4248 char pos = ITMask[i - 1]; 4249 if (pos != 't' && pos != 'e') { 4250 Parser.EatToEndOfStatement(); 4251 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4252 } 4253 Mask >>= 1; 4254 if (ITMask[i - 1] == 't') 4255 Mask |= 8; 4256 } 4257 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4258 } 4259 4260 // FIXME: This is all a pretty gross hack. We should automatically handle 4261 // optional operands like this via tblgen. 4262 4263 // Next, add the CCOut and ConditionCode operands, if needed. 4264 // 4265 // For mnemonics which can ever incorporate a carry setting bit or predication 4266 // code, our matching model involves us always generating CCOut and 4267 // ConditionCode operands to match the mnemonic "as written" and then we let 4268 // the matcher deal with finding the right instruction or generating an 4269 // appropriate error. 4270 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4271 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4272 4273 // If we had a carry-set on an instruction that can't do that, issue an 4274 // error. 4275 if (!CanAcceptCarrySet && CarrySetting) { 4276 Parser.EatToEndOfStatement(); 4277 return Error(NameLoc, "instruction '" + Mnemonic + 4278 "' can not set flags, but 's' suffix specified"); 4279 } 4280 // If we had a predication code on an instruction that can't do that, issue an 4281 // error. 4282 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4283 Parser.EatToEndOfStatement(); 4284 return Error(NameLoc, "instruction '" + Mnemonic + 4285 "' is not predicable, but condition code specified"); 4286 } 4287 4288 // Add the carry setting operand, if necessary. 4289 if (CanAcceptCarrySet) { 4290 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4291 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4292 Loc)); 4293 } 4294 4295 // Add the predication code operand, if necessary. 4296 if (CanAcceptPredicationCode) { 4297 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4298 CarrySetting); 4299 Operands.push_back(ARMOperand::CreateCondCode( 4300 ARMCC::CondCodes(PredicationCode), Loc)); 4301 } 4302 4303 // Add the processor imod operand, if necessary. 4304 if (ProcessorIMod) { 4305 Operands.push_back(ARMOperand::CreateImm( 4306 MCConstantExpr::Create(ProcessorIMod, getContext()), 4307 NameLoc, NameLoc)); 4308 } 4309 4310 // Add the remaining tokens in the mnemonic. 4311 while (Next != StringRef::npos) { 4312 Start = Next; 4313 Next = Name.find('.', Start + 1); 4314 StringRef ExtraToken = Name.slice(Start, Next); 4315 4316 // Some NEON instructions have an optional datatype suffix that is 4317 // completely ignored. Check for that. 4318 if (isDataTypeToken(ExtraToken) && 4319 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 4320 continue; 4321 4322 if (ExtraToken != ".n") { 4323 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4324 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4325 } 4326 } 4327 4328 // Read the remaining operands. 4329 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4330 // Read the first operand. 4331 if (parseOperand(Operands, Mnemonic)) { 4332 Parser.EatToEndOfStatement(); 4333 return true; 4334 } 4335 4336 while (getLexer().is(AsmToken::Comma)) { 4337 Parser.Lex(); // Eat the comma. 4338 4339 // Parse and remember the operand. 4340 if (parseOperand(Operands, Mnemonic)) { 4341 Parser.EatToEndOfStatement(); 4342 return true; 4343 } 4344 } 4345 } 4346 4347 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4348 SMLoc Loc = getLexer().getLoc(); 4349 Parser.EatToEndOfStatement(); 4350 return Error(Loc, "unexpected token in argument list"); 4351 } 4352 4353 Parser.Lex(); // Consume the EndOfStatement 4354 4355 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4356 // do and don't have a cc_out optional-def operand. With some spot-checks 4357 // of the operand list, we can figure out which variant we're trying to 4358 // parse and adjust accordingly before actually matching. We shouldn't ever 4359 // try to remove a cc_out operand that was explicitly set on the the 4360 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4361 // table driven matcher doesn't fit well with the ARM instruction set. 4362 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4363 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4364 Operands.erase(Operands.begin() + 1); 4365 delete Op; 4366 } 4367 4368 // ARM mode 'blx' need special handling, as the register operand version 4369 // is predicable, but the label operand version is not. So, we can't rely 4370 // on the Mnemonic based checking to correctly figure out when to put 4371 // a k_CondCode operand in the list. If we're trying to match the label 4372 // version, remove the k_CondCode operand here. 4373 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4374 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4375 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4376 Operands.erase(Operands.begin() + 1); 4377 delete Op; 4378 } 4379 4380 // The vector-compare-to-zero instructions have a literal token "#0" at 4381 // the end that comes to here as an immediate operand. Convert it to a 4382 // token to play nicely with the matcher. 4383 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4384 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4385 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4386 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4387 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4388 if (CE && CE->getValue() == 0) { 4389 Operands.erase(Operands.begin() + 5); 4390 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4391 delete Op; 4392 } 4393 } 4394 // VCMP{E} does the same thing, but with a different operand count. 4395 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4396 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4397 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4398 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4399 if (CE && CE->getValue() == 0) { 4400 Operands.erase(Operands.begin() + 4); 4401 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4402 delete Op; 4403 } 4404 } 4405 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4406 // end. Convert it to a token here. 4407 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4408 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4409 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4410 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4411 if (CE && CE->getValue() == 0) { 4412 Operands.erase(Operands.begin() + 5); 4413 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4414 delete Op; 4415 } 4416 } 4417 4418 return false; 4419} 4420 4421// Validate context-sensitive operand constraints. 4422 4423// return 'true' if register list contains non-low GPR registers, 4424// 'false' otherwise. If Reg is in the register list or is HiReg, set 4425// 'containsReg' to true. 4426static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4427 unsigned HiReg, bool &containsReg) { 4428 containsReg = false; 4429 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4430 unsigned OpReg = Inst.getOperand(i).getReg(); 4431 if (OpReg == Reg) 4432 containsReg = true; 4433 // Anything other than a low register isn't legal here. 4434 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4435 return true; 4436 } 4437 return false; 4438} 4439 4440// Check if the specified regisgter is in the register list of the inst, 4441// starting at the indicated operand number. 4442static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4443 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4444 unsigned OpReg = Inst.getOperand(i).getReg(); 4445 if (OpReg == Reg) 4446 return true; 4447 } 4448 return false; 4449} 4450 4451// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4452// the ARMInsts array) instead. Getting that here requires awkward 4453// API changes, though. Better way? 4454namespace llvm { 4455extern const MCInstrDesc ARMInsts[]; 4456} 4457static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4458 return ARMInsts[Opcode]; 4459} 4460 4461// FIXME: We would really like to be able to tablegen'erate this. 4462bool ARMAsmParser:: 4463validateInstruction(MCInst &Inst, 4464 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4465 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4466 SMLoc Loc = Operands[0]->getStartLoc(); 4467 // Check the IT block state first. 4468 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4469 // being allowed in IT blocks, but not being predicable. It just always 4470 // executes. 4471 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4472 unsigned bit = 1; 4473 if (ITState.FirstCond) 4474 ITState.FirstCond = false; 4475 else 4476 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4477 // The instruction must be predicable. 4478 if (!MCID.isPredicable()) 4479 return Error(Loc, "instructions in IT block must be predicable"); 4480 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4481 unsigned ITCond = bit ? ITState.Cond : 4482 ARMCC::getOppositeCondition(ITState.Cond); 4483 if (Cond != ITCond) { 4484 // Find the condition code Operand to get its SMLoc information. 4485 SMLoc CondLoc; 4486 for (unsigned i = 1; i < Operands.size(); ++i) 4487 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4488 CondLoc = Operands[i]->getStartLoc(); 4489 return Error(CondLoc, "incorrect condition in IT block; got '" + 4490 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4491 "', but expected '" + 4492 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4493 } 4494 // Check for non-'al' condition codes outside of the IT block. 4495 } else if (isThumbTwo() && MCID.isPredicable() && 4496 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4497 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4498 Inst.getOpcode() != ARM::t2B) 4499 return Error(Loc, "predicated instructions must be in IT block"); 4500 4501 switch (Inst.getOpcode()) { 4502 case ARM::LDRD: 4503 case ARM::LDRD_PRE: 4504 case ARM::LDRD_POST: 4505 case ARM::LDREXD: { 4506 // Rt2 must be Rt + 1. 4507 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4508 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4509 if (Rt2 != Rt + 1) 4510 return Error(Operands[3]->getStartLoc(), 4511 "destination operands must be sequential"); 4512 return false; 4513 } 4514 case ARM::STRD: { 4515 // Rt2 must be Rt + 1. 4516 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4517 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4518 if (Rt2 != Rt + 1) 4519 return Error(Operands[3]->getStartLoc(), 4520 "source operands must be sequential"); 4521 return false; 4522 } 4523 case ARM::STRD_PRE: 4524 case ARM::STRD_POST: 4525 case ARM::STREXD: { 4526 // Rt2 must be Rt + 1. 4527 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4528 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4529 if (Rt2 != Rt + 1) 4530 return Error(Operands[3]->getStartLoc(), 4531 "source operands must be sequential"); 4532 return false; 4533 } 4534 case ARM::SBFX: 4535 case ARM::UBFX: { 4536 // width must be in range [1, 32-lsb] 4537 unsigned lsb = Inst.getOperand(2).getImm(); 4538 unsigned widthm1 = Inst.getOperand(3).getImm(); 4539 if (widthm1 >= 32 - lsb) 4540 return Error(Operands[5]->getStartLoc(), 4541 "bitfield width must be in range [1,32-lsb]"); 4542 return false; 4543 } 4544 case ARM::tLDMIA: { 4545 // If we're parsing Thumb2, the .w variant is available and handles 4546 // most cases that are normally illegal for a Thumb1 LDM 4547 // instruction. We'll make the transformation in processInstruction() 4548 // if necessary. 4549 // 4550 // Thumb LDM instructions are writeback iff the base register is not 4551 // in the register list. 4552 unsigned Rn = Inst.getOperand(0).getReg(); 4553 bool hasWritebackToken = 4554 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4555 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4556 bool listContainsBase; 4557 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4558 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4559 "registers must be in range r0-r7"); 4560 // If we should have writeback, then there should be a '!' token. 4561 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4562 return Error(Operands[2]->getStartLoc(), 4563 "writeback operator '!' expected"); 4564 // If we should not have writeback, there must not be a '!'. This is 4565 // true even for the 32-bit wide encodings. 4566 if (listContainsBase && hasWritebackToken) 4567 return Error(Operands[3]->getStartLoc(), 4568 "writeback operator '!' not allowed when base register " 4569 "in register list"); 4570 4571 break; 4572 } 4573 case ARM::t2LDMIA_UPD: { 4574 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4575 return Error(Operands[4]->getStartLoc(), 4576 "writeback operator '!' not allowed when base register " 4577 "in register list"); 4578 break; 4579 } 4580 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 4581 // so only issue a diagnostic for thumb1. The instructions will be 4582 // switched to the t2 encodings in processInstruction() if necessary. 4583 case ARM::tPOP: { 4584 bool listContainsBase; 4585 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 4586 !isThumbTwo()) 4587 return Error(Operands[2]->getStartLoc(), 4588 "registers must be in range r0-r7 or pc"); 4589 break; 4590 } 4591 case ARM::tPUSH: { 4592 bool listContainsBase; 4593 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 4594 !isThumbTwo()) 4595 return Error(Operands[2]->getStartLoc(), 4596 "registers must be in range r0-r7 or lr"); 4597 break; 4598 } 4599 case ARM::tSTMIA_UPD: { 4600 bool listContainsBase; 4601 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4602 return Error(Operands[4]->getStartLoc(), 4603 "registers must be in range r0-r7"); 4604 break; 4605 } 4606 } 4607 4608 return false; 4609} 4610 4611bool ARMAsmParser:: 4612processInstruction(MCInst &Inst, 4613 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4614 switch (Inst.getOpcode()) { 4615 // Handle the MOV complex aliases. 4616 case ARM::ASRr: 4617 case ARM::LSRr: 4618 case ARM::LSLr: 4619 case ARM::RORr: { 4620 ARM_AM::ShiftOpc ShiftTy; 4621 switch(Inst.getOpcode()) { 4622 default: llvm_unreachable("unexpected opcode!"); 4623 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 4624 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 4625 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 4626 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 4627 } 4628 // A shift by zero is a plain MOVr, not a MOVsi. 4629 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 4630 MCInst TmpInst; 4631 TmpInst.setOpcode(ARM::MOVsr); 4632 TmpInst.addOperand(Inst.getOperand(0)); // Rd 4633 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4634 TmpInst.addOperand(Inst.getOperand(2)); // Rm 4635 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 4636 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 4637 TmpInst.addOperand(Inst.getOperand(4)); 4638 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 4639 Inst = TmpInst; 4640 return true; 4641 } 4642 case ARM::ASRi: 4643 case ARM::LSRi: 4644 case ARM::LSLi: 4645 case ARM::RORi: { 4646 ARM_AM::ShiftOpc ShiftTy; 4647 switch(Inst.getOpcode()) { 4648 default: llvm_unreachable("unexpected opcode!"); 4649 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 4650 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 4651 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 4652 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 4653 } 4654 // A shift by zero is a plain MOVr, not a MOVsi. 4655 unsigned Amt = Inst.getOperand(2).getImm(); 4656 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 4657 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 4658 MCInst TmpInst; 4659 TmpInst.setOpcode(Opc); 4660 TmpInst.addOperand(Inst.getOperand(0)); // Rd 4661 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4662 if (Opc == ARM::MOVsi) 4663 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 4664 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 4665 TmpInst.addOperand(Inst.getOperand(4)); 4666 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 4667 Inst = TmpInst; 4668 return true; 4669 } 4670 case ARM::RRXi: { 4671 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 4672 MCInst TmpInst; 4673 TmpInst.setOpcode(ARM::MOVsi); 4674 TmpInst.addOperand(Inst.getOperand(0)); // Rd 4675 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4676 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 4677 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4678 TmpInst.addOperand(Inst.getOperand(3)); 4679 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 4680 Inst = TmpInst; 4681 return true; 4682 } 4683 case ARM::t2LDMIA_UPD: { 4684 // If this is a load of a single register, then we should use 4685 // a post-indexed LDR instruction instead, per the ARM ARM. 4686 if (Inst.getNumOperands() != 5) 4687 return false; 4688 MCInst TmpInst; 4689 TmpInst.setOpcode(ARM::t2LDR_POST); 4690 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4691 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4692 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4693 TmpInst.addOperand(MCOperand::CreateImm(4)); 4694 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4695 TmpInst.addOperand(Inst.getOperand(3)); 4696 Inst = TmpInst; 4697 return true; 4698 } 4699 case ARM::t2STMDB_UPD: { 4700 // If this is a store of a single register, then we should use 4701 // a pre-indexed STR instruction instead, per the ARM ARM. 4702 if (Inst.getNumOperands() != 5) 4703 return false; 4704 MCInst TmpInst; 4705 TmpInst.setOpcode(ARM::t2STR_PRE); 4706 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4707 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4708 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4709 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4710 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4711 TmpInst.addOperand(Inst.getOperand(3)); 4712 Inst = TmpInst; 4713 return true; 4714 } 4715 case ARM::LDMIA_UPD: 4716 // If this is a load of a single register via a 'pop', then we should use 4717 // a post-indexed LDR instruction instead, per the ARM ARM. 4718 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4719 Inst.getNumOperands() == 5) { 4720 MCInst TmpInst; 4721 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4722 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4723 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4724 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4725 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4726 TmpInst.addOperand(MCOperand::CreateImm(4)); 4727 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4728 TmpInst.addOperand(Inst.getOperand(3)); 4729 Inst = TmpInst; 4730 return true; 4731 } 4732 break; 4733 case ARM::STMDB_UPD: 4734 // If this is a store of a single register via a 'push', then we should use 4735 // a pre-indexed STR instruction instead, per the ARM ARM. 4736 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4737 Inst.getNumOperands() == 5) { 4738 MCInst TmpInst; 4739 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4740 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4741 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4742 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4743 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4744 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4745 TmpInst.addOperand(Inst.getOperand(3)); 4746 Inst = TmpInst; 4747 } 4748 break; 4749 case ARM::tADDi8: 4750 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4751 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4752 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4753 // to encoding T1 if <Rd> is omitted." 4754 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 4755 Inst.setOpcode(ARM::tADDi3); 4756 return true; 4757 } 4758 break; 4759 case ARM::tSUBi8: 4760 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4761 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4762 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4763 // to encoding T1 if <Rd> is omitted." 4764 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 4765 Inst.setOpcode(ARM::tSUBi3); 4766 return true; 4767 } 4768 break; 4769 case ARM::tB: 4770 // A Thumb conditional branch outside of an IT block is a tBcc. 4771 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 4772 Inst.setOpcode(ARM::tBcc); 4773 return true; 4774 } 4775 break; 4776 case ARM::t2B: 4777 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4778 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 4779 Inst.setOpcode(ARM::t2Bcc); 4780 return true; 4781 } 4782 break; 4783 case ARM::t2Bcc: 4784 // If the conditional is AL or we're in an IT block, we really want t2B. 4785 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 4786 Inst.setOpcode(ARM::t2B); 4787 return true; 4788 } 4789 break; 4790 case ARM::tBcc: 4791 // If the conditional is AL, we really want tB. 4792 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 4793 Inst.setOpcode(ARM::tB); 4794 return true; 4795 } 4796 break; 4797 case ARM::tLDMIA: { 4798 // If the register list contains any high registers, or if the writeback 4799 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4800 // instead if we're in Thumb2. Otherwise, this should have generated 4801 // an error in validateInstruction(). 4802 unsigned Rn = Inst.getOperand(0).getReg(); 4803 bool hasWritebackToken = 4804 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4805 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4806 bool listContainsBase; 4807 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4808 (!listContainsBase && !hasWritebackToken) || 4809 (listContainsBase && hasWritebackToken)) { 4810 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4811 assert (isThumbTwo()); 4812 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4813 // If we're switching to the updating version, we need to insert 4814 // the writeback tied operand. 4815 if (hasWritebackToken) 4816 Inst.insert(Inst.begin(), 4817 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4818 return true; 4819 } 4820 break; 4821 } 4822 case ARM::tSTMIA_UPD: { 4823 // If the register list contains any high registers, we need to use 4824 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4825 // should have generated an error in validateInstruction(). 4826 unsigned Rn = Inst.getOperand(0).getReg(); 4827 bool listContainsBase; 4828 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4829 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4830 assert (isThumbTwo()); 4831 Inst.setOpcode(ARM::t2STMIA_UPD); 4832 return true; 4833 } 4834 break; 4835 } 4836 case ARM::tPOP: { 4837 bool listContainsBase; 4838 // If the register list contains any high registers, we need to use 4839 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4840 // should have generated an error in validateInstruction(). 4841 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 4842 return false; 4843 assert (isThumbTwo()); 4844 Inst.setOpcode(ARM::t2LDMIA_UPD); 4845 // Add the base register and writeback operands. 4846 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4847 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4848 return true; 4849 } 4850 case ARM::tPUSH: { 4851 bool listContainsBase; 4852 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 4853 return false; 4854 assert (isThumbTwo()); 4855 Inst.setOpcode(ARM::t2STMDB_UPD); 4856 // Add the base register and writeback operands. 4857 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4858 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4859 return true; 4860 } 4861 case ARM::t2MOVi: { 4862 // If we can use the 16-bit encoding and the user didn't explicitly 4863 // request the 32-bit variant, transform it here. 4864 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4865 Inst.getOperand(1).getImm() <= 255 && 4866 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4867 Inst.getOperand(4).getReg() == ARM::CPSR) || 4868 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4869 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4870 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4871 // The operands aren't in the same order for tMOVi8... 4872 MCInst TmpInst; 4873 TmpInst.setOpcode(ARM::tMOVi8); 4874 TmpInst.addOperand(Inst.getOperand(0)); 4875 TmpInst.addOperand(Inst.getOperand(4)); 4876 TmpInst.addOperand(Inst.getOperand(1)); 4877 TmpInst.addOperand(Inst.getOperand(2)); 4878 TmpInst.addOperand(Inst.getOperand(3)); 4879 Inst = TmpInst; 4880 return true; 4881 } 4882 break; 4883 } 4884 case ARM::t2MOVr: { 4885 // If we can use the 16-bit encoding and the user didn't explicitly 4886 // request the 32-bit variant, transform it here. 4887 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4888 isARMLowRegister(Inst.getOperand(1).getReg()) && 4889 Inst.getOperand(2).getImm() == ARMCC::AL && 4890 Inst.getOperand(4).getReg() == ARM::CPSR && 4891 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4892 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4893 // The operands aren't the same for tMOV[S]r... (no cc_out) 4894 MCInst TmpInst; 4895 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4896 TmpInst.addOperand(Inst.getOperand(0)); 4897 TmpInst.addOperand(Inst.getOperand(1)); 4898 TmpInst.addOperand(Inst.getOperand(2)); 4899 TmpInst.addOperand(Inst.getOperand(3)); 4900 Inst = TmpInst; 4901 return true; 4902 } 4903 break; 4904 } 4905 case ARM::t2SXTH: 4906 case ARM::t2SXTB: 4907 case ARM::t2UXTH: 4908 case ARM::t2UXTB: { 4909 // If we can use the 16-bit encoding and the user didn't explicitly 4910 // request the 32-bit variant, transform it here. 4911 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4912 isARMLowRegister(Inst.getOperand(1).getReg()) && 4913 Inst.getOperand(2).getImm() == 0 && 4914 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4915 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4916 unsigned NewOpc; 4917 switch (Inst.getOpcode()) { 4918 default: llvm_unreachable("Illegal opcode!"); 4919 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4920 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4921 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4922 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4923 } 4924 // The operands aren't the same for thumb1 (no rotate operand). 4925 MCInst TmpInst; 4926 TmpInst.setOpcode(NewOpc); 4927 TmpInst.addOperand(Inst.getOperand(0)); 4928 TmpInst.addOperand(Inst.getOperand(1)); 4929 TmpInst.addOperand(Inst.getOperand(3)); 4930 TmpInst.addOperand(Inst.getOperand(4)); 4931 Inst = TmpInst; 4932 return true; 4933 } 4934 break; 4935 } 4936 case ARM::t2IT: { 4937 // The mask bits for all but the first condition are represented as 4938 // the low bit of the condition code value implies 't'. We currently 4939 // always have 1 implies 't', so XOR toggle the bits if the low bit 4940 // of the condition code is zero. The encoding also expects the low 4941 // bit of the condition to be encoded as bit 4 of the mask operand, 4942 // so mask that in if needed 4943 MCOperand &MO = Inst.getOperand(1); 4944 unsigned Mask = MO.getImm(); 4945 unsigned OrigMask = Mask; 4946 unsigned TZ = CountTrailingZeros_32(Mask); 4947 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4948 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4949 for (unsigned i = 3; i != TZ; --i) 4950 Mask ^= 1 << i; 4951 } else 4952 Mask |= 0x10; 4953 MO.setImm(Mask); 4954 4955 // Set up the IT block state according to the IT instruction we just 4956 // matched. 4957 assert(!inITBlock() && "nested IT blocks?!"); 4958 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4959 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4960 ITState.CurPosition = 0; 4961 ITState.FirstCond = true; 4962 break; 4963 } 4964 } 4965 return false; 4966} 4967 4968unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4969 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4970 // suffix depending on whether they're in an IT block or not. 4971 unsigned Opc = Inst.getOpcode(); 4972 const MCInstrDesc &MCID = getInstDesc(Opc); 4973 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4974 assert(MCID.hasOptionalDef() && 4975 "optionally flag setting instruction missing optional def operand"); 4976 assert(MCID.NumOperands == Inst.getNumOperands() && 4977 "operand count mismatch!"); 4978 // Find the optional-def operand (cc_out). 4979 unsigned OpNo; 4980 for (OpNo = 0; 4981 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4982 ++OpNo) 4983 ; 4984 // If we're parsing Thumb1, reject it completely. 4985 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4986 return Match_MnemonicFail; 4987 // If we're parsing Thumb2, which form is legal depends on whether we're 4988 // in an IT block. 4989 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4990 !inITBlock()) 4991 return Match_RequiresITBlock; 4992 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4993 inITBlock()) 4994 return Match_RequiresNotITBlock; 4995 } 4996 // Some high-register supporting Thumb1 encodings only allow both registers 4997 // to be from r0-r7 when in Thumb2. 4998 else if (Opc == ARM::tADDhirr && isThumbOne() && 4999 isARMLowRegister(Inst.getOperand(1).getReg()) && 5000 isARMLowRegister(Inst.getOperand(2).getReg())) 5001 return Match_RequiresThumb2; 5002 // Others only require ARMv6 or later. 5003 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 5004 isARMLowRegister(Inst.getOperand(0).getReg()) && 5005 isARMLowRegister(Inst.getOperand(1).getReg())) 5006 return Match_RequiresV6; 5007 return Match_Success; 5008} 5009 5010bool ARMAsmParser:: 5011MatchAndEmitInstruction(SMLoc IDLoc, 5012 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 5013 MCStreamer &Out) { 5014 MCInst Inst; 5015 unsigned ErrorInfo; 5016 unsigned MatchResult; 5017 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 5018 switch (MatchResult) { 5019 default: break; 5020 case Match_Success: 5021 // Context sensitive operand constraints aren't handled by the matcher, 5022 // so check them here. 5023 if (validateInstruction(Inst, Operands)) { 5024 // Still progress the IT block, otherwise one wrong condition causes 5025 // nasty cascading errors. 5026 forwardITPosition(); 5027 return true; 5028 } 5029 5030 // Some instructions need post-processing to, for example, tweak which 5031 // encoding is selected. Loop on it while changes happen so the 5032 // individual transformations can chain off each other. E.g., 5033 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 5034 while (processInstruction(Inst, Operands)) 5035 ; 5036 5037 // Only move forward at the very end so that everything in validate 5038 // and process gets a consistent answer about whether we're in an IT 5039 // block. 5040 forwardITPosition(); 5041 5042 Out.EmitInstruction(Inst); 5043 return false; 5044 case Match_MissingFeature: 5045 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 5046 return true; 5047 case Match_InvalidOperand: { 5048 SMLoc ErrorLoc = IDLoc; 5049 if (ErrorInfo != ~0U) { 5050 if (ErrorInfo >= Operands.size()) 5051 return Error(IDLoc, "too few operands for instruction"); 5052 5053 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 5054 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 5055 } 5056 5057 return Error(ErrorLoc, "invalid operand for instruction"); 5058 } 5059 case Match_MnemonicFail: 5060 return Error(IDLoc, "invalid instruction"); 5061 case Match_ConversionFail: 5062 // The converter function will have already emited a diagnostic. 5063 return true; 5064 case Match_RequiresNotITBlock: 5065 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 5066 case Match_RequiresITBlock: 5067 return Error(IDLoc, "instruction only valid inside IT block"); 5068 case Match_RequiresV6: 5069 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 5070 case Match_RequiresThumb2: 5071 return Error(IDLoc, "instruction variant requires Thumb2"); 5072 } 5073 5074 llvm_unreachable("Implement any new match types added!"); 5075 return true; 5076} 5077 5078/// parseDirective parses the arm specific directives 5079bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 5080 StringRef IDVal = DirectiveID.getIdentifier(); 5081 if (IDVal == ".word") 5082 return parseDirectiveWord(4, DirectiveID.getLoc()); 5083 else if (IDVal == ".thumb") 5084 return parseDirectiveThumb(DirectiveID.getLoc()); 5085 else if (IDVal == ".thumb_func") 5086 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 5087 else if (IDVal == ".code") 5088 return parseDirectiveCode(DirectiveID.getLoc()); 5089 else if (IDVal == ".syntax") 5090 return parseDirectiveSyntax(DirectiveID.getLoc()); 5091 return true; 5092} 5093 5094/// parseDirectiveWord 5095/// ::= .word [ expression (, expression)* ] 5096bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 5097 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5098 for (;;) { 5099 const MCExpr *Value; 5100 if (getParser().ParseExpression(Value)) 5101 return true; 5102 5103 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 5104 5105 if (getLexer().is(AsmToken::EndOfStatement)) 5106 break; 5107 5108 // FIXME: Improve diagnostic. 5109 if (getLexer().isNot(AsmToken::Comma)) 5110 return Error(L, "unexpected token in directive"); 5111 Parser.Lex(); 5112 } 5113 } 5114 5115 Parser.Lex(); 5116 return false; 5117} 5118 5119/// parseDirectiveThumb 5120/// ::= .thumb 5121bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 5122 if (getLexer().isNot(AsmToken::EndOfStatement)) 5123 return Error(L, "unexpected token in directive"); 5124 Parser.Lex(); 5125 5126 // TODO: set thumb mode 5127 // TODO: tell the MC streamer the mode 5128 // getParser().getStreamer().Emit???(); 5129 return false; 5130} 5131 5132/// parseDirectiveThumbFunc 5133/// ::= .thumbfunc symbol_name 5134bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 5135 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 5136 bool isMachO = MAI.hasSubsectionsViaSymbols(); 5137 StringRef Name; 5138 5139 // Darwin asm has function name after .thumb_func direction 5140 // ELF doesn't 5141 if (isMachO) { 5142 const AsmToken &Tok = Parser.getTok(); 5143 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 5144 return Error(L, "unexpected token in .thumb_func directive"); 5145 Name = Tok.getIdentifier(); 5146 Parser.Lex(); // Consume the identifier token. 5147 } 5148 5149 if (getLexer().isNot(AsmToken::EndOfStatement)) 5150 return Error(L, "unexpected token in directive"); 5151 Parser.Lex(); 5152 5153 // FIXME: assuming function name will be the line following .thumb_func 5154 if (!isMachO) { 5155 Name = Parser.getTok().getIdentifier(); 5156 } 5157 5158 // Mark symbol as a thumb symbol. 5159 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 5160 getParser().getStreamer().EmitThumbFunc(Func); 5161 return false; 5162} 5163 5164/// parseDirectiveSyntax 5165/// ::= .syntax unified | divided 5166bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 5167 const AsmToken &Tok = Parser.getTok(); 5168 if (Tok.isNot(AsmToken::Identifier)) 5169 return Error(L, "unexpected token in .syntax directive"); 5170 StringRef Mode = Tok.getString(); 5171 if (Mode == "unified" || Mode == "UNIFIED") 5172 Parser.Lex(); 5173 else if (Mode == "divided" || Mode == "DIVIDED") 5174 return Error(L, "'.syntax divided' arm asssembly not supported"); 5175 else 5176 return Error(L, "unrecognized syntax mode in .syntax directive"); 5177 5178 if (getLexer().isNot(AsmToken::EndOfStatement)) 5179 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5180 Parser.Lex(); 5181 5182 // TODO tell the MC streamer the mode 5183 // getParser().getStreamer().Emit???(); 5184 return false; 5185} 5186 5187/// parseDirectiveCode 5188/// ::= .code 16 | 32 5189bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 5190 const AsmToken &Tok = Parser.getTok(); 5191 if (Tok.isNot(AsmToken::Integer)) 5192 return Error(L, "unexpected token in .code directive"); 5193 int64_t Val = Parser.getTok().getIntVal(); 5194 if (Val == 16) 5195 Parser.Lex(); 5196 else if (Val == 32) 5197 Parser.Lex(); 5198 else 5199 return Error(L, "invalid operand to .code directive"); 5200 5201 if (getLexer().isNot(AsmToken::EndOfStatement)) 5202 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5203 Parser.Lex(); 5204 5205 if (Val == 16) { 5206 if (!isThumb()) 5207 SwitchMode(); 5208 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5209 } else { 5210 if (isThumb()) 5211 SwitchMode(); 5212 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5213 } 5214 5215 return false; 5216} 5217 5218extern "C" void LLVMInitializeARMAsmLexer(); 5219 5220/// Force static initialization. 5221extern "C" void LLVMInitializeARMAsmParser() { 5222 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 5223 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 5224 LLVMInitializeARMAsmLexer(); 5225} 5226 5227#define GET_REGISTER_MATCHER 5228#define GET_MATCHER_IMPLEMENTATION 5229#include "ARMGenAsmMatcher.inc" 5230