X86AsmBackend.cpp revision 5943d4e3eea9ad5ef55618c075262337463aafa9
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/X86BaseInfo.h" 11#include "MCTargetDesc/X86FixupKinds.h" 12#include "llvm/ADT/StringSwitch.h" 13#include "llvm/MC/MCAsmBackend.h" 14#include "llvm/MC/MCAssembler.h" 15#include "llvm/MC/MCELFObjectWriter.h" 16#include "llvm/MC/MCExpr.h" 17#include "llvm/MC/MCFixupKindInfo.h" 18#include "llvm/MC/MCMachObjectWriter.h" 19#include "llvm/MC/MCObjectWriter.h" 20#include "llvm/MC/MCSectionCOFF.h" 21#include "llvm/MC/MCSectionELF.h" 22#include "llvm/MC/MCSectionMachO.h" 23#include "llvm/Support/CommandLine.h" 24#include "llvm/Support/ELF.h" 25#include "llvm/Support/ErrorHandling.h" 26#include "llvm/Support/MachO.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29using namespace llvm; 30 31// Option to allow disabling arithmetic relaxation to workaround PR9807, which 32// is useful when running bitwise comparison experiments on Darwin. We should be 33// able to remove this once PR9807 is resolved. 34static cl::opt<bool> 35MCDisableArithRelaxation("mc-x86-disable-arith-relaxation", 36 cl::desc("Disable relaxation of arithmetic instruction for X86")); 37 38static unsigned getFixupKindLog2Size(unsigned Kind) { 39 switch (Kind) { 40 default: llvm_unreachable("invalid fixup kind!"); 41 case FK_PCRel_1: 42 case FK_SecRel_1: 43 case FK_Data_1: return 0; 44 case FK_PCRel_2: 45 case FK_SecRel_2: 46 case FK_Data_2: return 1; 47 case FK_PCRel_4: 48 case X86::reloc_riprel_4byte: 49 case X86::reloc_riprel_4byte_movq_load: 50 case X86::reloc_signed_4byte: 51 case X86::reloc_global_offset_table: 52 case FK_SecRel_4: 53 case FK_Data_4: return 2; 54 case FK_PCRel_8: 55 case FK_SecRel_8: 56 case FK_Data_8: return 3; 57 } 58} 59 60namespace { 61 62class X86ELFObjectWriter : public MCELFObjectTargetWriter { 63public: 64 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine, 65 bool HasRelocationAddend, bool foobar) 66 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {} 67}; 68 69class X86AsmBackend : public MCAsmBackend { 70 StringRef CPU; 71 bool HasNopl; 72public: 73 X86AsmBackend(const Target &T, StringRef _CPU) 74 : MCAsmBackend(), CPU(_CPU) { 75 HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" && 76 CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" && 77 CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" && 78 CPU != "geode" && CPU != "winchip-c6" && CPU != "winchip2" && 79 CPU != "c3" && CPU != "c3-2"; 80 } 81 82 unsigned getNumFixupKinds() const { 83 return X86::NumTargetFixupKinds; 84 } 85 86 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const { 87 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = { 88 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }, 89 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel}, 90 { "reloc_signed_4byte", 0, 4 * 8, 0}, 91 { "reloc_global_offset_table", 0, 4 * 8, 0} 92 }; 93 94 if (Kind < FirstTargetFixupKind) 95 return MCAsmBackend::getFixupKindInfo(Kind); 96 97 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 98 "Invalid kind!"); 99 return Infos[Kind - FirstTargetFixupKind]; 100 } 101 102 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 103 uint64_t Value) const { 104 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind()); 105 106 assert(Fixup.getOffset() + Size <= DataSize && 107 "Invalid fixup offset!"); 108 109 // Check that uppper bits are either all zeros or all ones. 110 // Specifically ignore overflow/underflow as long as the leakage is 111 // limited to the lower bits. This is to remain compatible with 112 // other assemblers. 113 assert(isIntN(Size * 8 + 1, Value) && 114 "Value does not fit in the Fixup field"); 115 116 for (unsigned i = 0; i != Size; ++i) 117 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8)); 118 } 119 120 bool mayNeedRelaxation(const MCInst &Inst) const; 121 122 bool fixupNeedsRelaxation(const MCFixup &Fixup, 123 uint64_t Value, 124 const MCRelaxableFragment *DF, 125 const MCAsmLayout &Layout) const; 126 127 void relaxInstruction(const MCInst &Inst, MCInst &Res) const; 128 129 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const; 130}; 131} // end anonymous namespace 132 133static unsigned getRelaxedOpcodeBranch(unsigned Op) { 134 switch (Op) { 135 default: 136 return Op; 137 138 case X86::JAE_1: return X86::JAE_4; 139 case X86::JA_1: return X86::JA_4; 140 case X86::JBE_1: return X86::JBE_4; 141 case X86::JB_1: return X86::JB_4; 142 case X86::JE_1: return X86::JE_4; 143 case X86::JGE_1: return X86::JGE_4; 144 case X86::JG_1: return X86::JG_4; 145 case X86::JLE_1: return X86::JLE_4; 146 case X86::JL_1: return X86::JL_4; 147 case X86::JMP_1: return X86::JMP_4; 148 case X86::JNE_1: return X86::JNE_4; 149 case X86::JNO_1: return X86::JNO_4; 150 case X86::JNP_1: return X86::JNP_4; 151 case X86::JNS_1: return X86::JNS_4; 152 case X86::JO_1: return X86::JO_4; 153 case X86::JP_1: return X86::JP_4; 154 case X86::JS_1: return X86::JS_4; 155 } 156} 157 158static unsigned getRelaxedOpcodeArith(unsigned Op) { 159 switch (Op) { 160 default: 161 return Op; 162 163 // IMUL 164 case X86::IMUL16rri8: return X86::IMUL16rri; 165 case X86::IMUL16rmi8: return X86::IMUL16rmi; 166 case X86::IMUL32rri8: return X86::IMUL32rri; 167 case X86::IMUL32rmi8: return X86::IMUL32rmi; 168 case X86::IMUL64rri8: return X86::IMUL64rri32; 169 case X86::IMUL64rmi8: return X86::IMUL64rmi32; 170 171 // AND 172 case X86::AND16ri8: return X86::AND16ri; 173 case X86::AND16mi8: return X86::AND16mi; 174 case X86::AND32ri8: return X86::AND32ri; 175 case X86::AND32mi8: return X86::AND32mi; 176 case X86::AND64ri8: return X86::AND64ri32; 177 case X86::AND64mi8: return X86::AND64mi32; 178 179 // OR 180 case X86::OR16ri8: return X86::OR16ri; 181 case X86::OR16mi8: return X86::OR16mi; 182 case X86::OR32ri8: return X86::OR32ri; 183 case X86::OR32mi8: return X86::OR32mi; 184 case X86::OR64ri8: return X86::OR64ri32; 185 case X86::OR64mi8: return X86::OR64mi32; 186 187 // XOR 188 case X86::XOR16ri8: return X86::XOR16ri; 189 case X86::XOR16mi8: return X86::XOR16mi; 190 case X86::XOR32ri8: return X86::XOR32ri; 191 case X86::XOR32mi8: return X86::XOR32mi; 192 case X86::XOR64ri8: return X86::XOR64ri32; 193 case X86::XOR64mi8: return X86::XOR64mi32; 194 195 // ADD 196 case X86::ADD16ri8: return X86::ADD16ri; 197 case X86::ADD16mi8: return X86::ADD16mi; 198 case X86::ADD32ri8: return X86::ADD32ri; 199 case X86::ADD32mi8: return X86::ADD32mi; 200 case X86::ADD64ri8: return X86::ADD64ri32; 201 case X86::ADD64mi8: return X86::ADD64mi32; 202 203 // SUB 204 case X86::SUB16ri8: return X86::SUB16ri; 205 case X86::SUB16mi8: return X86::SUB16mi; 206 case X86::SUB32ri8: return X86::SUB32ri; 207 case X86::SUB32mi8: return X86::SUB32mi; 208 case X86::SUB64ri8: return X86::SUB64ri32; 209 case X86::SUB64mi8: return X86::SUB64mi32; 210 211 // CMP 212 case X86::CMP16ri8: return X86::CMP16ri; 213 case X86::CMP16mi8: return X86::CMP16mi; 214 case X86::CMP32ri8: return X86::CMP32ri; 215 case X86::CMP32mi8: return X86::CMP32mi; 216 case X86::CMP64ri8: return X86::CMP64ri32; 217 case X86::CMP64mi8: return X86::CMP64mi32; 218 219 // PUSH 220 case X86::PUSHi8: return X86::PUSHi32; 221 case X86::PUSHi16: return X86::PUSHi32; 222 case X86::PUSH64i8: return X86::PUSH64i32; 223 case X86::PUSH64i16: return X86::PUSH64i32; 224 } 225} 226 227static unsigned getRelaxedOpcode(unsigned Op) { 228 unsigned R = getRelaxedOpcodeArith(Op); 229 if (R != Op) 230 return R; 231 return getRelaxedOpcodeBranch(Op); 232} 233 234bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 235 // Branches can always be relaxed. 236 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode()) 237 return true; 238 239 if (MCDisableArithRelaxation) 240 return false; 241 242 // Check if this instruction is ever relaxable. 243 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode()) 244 return false; 245 246 247 // Check if it has an expression and is not RIP relative. 248 bool hasExp = false; 249 bool hasRIP = false; 250 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) { 251 const MCOperand &Op = Inst.getOperand(i); 252 if (Op.isExpr()) 253 hasExp = true; 254 255 if (Op.isReg() && Op.getReg() == X86::RIP) 256 hasRIP = true; 257 } 258 259 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on 260 // how we do relaxations? 261 return hasExp && !hasRIP; 262} 263 264bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, 265 uint64_t Value, 266 const MCRelaxableFragment *DF, 267 const MCAsmLayout &Layout) const { 268 // Relax if the value is too big for a (signed) i8. 269 return int64_t(Value) != int64_t(int8_t(Value)); 270} 271 272// FIXME: Can tblgen help at all here to verify there aren't other instructions 273// we can relax? 274void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { 275 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel. 276 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode()); 277 278 if (RelaxedOp == Inst.getOpcode()) { 279 SmallString<256> Tmp; 280 raw_svector_ostream OS(Tmp); 281 Inst.dump_pretty(OS); 282 OS << "\n"; 283 report_fatal_error("unexpected instruction to relax: " + OS.str()); 284 } 285 286 Res = Inst; 287 Res.setOpcode(RelaxedOp); 288} 289 290/// \brief Write a sequence of optimal nops to the output, covering \p Count 291/// bytes. 292/// \return - true on success, false on failure 293bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 294 static const uint8_t Nops[10][10] = { 295 // nop 296 {0x90}, 297 // xchg %ax,%ax 298 {0x66, 0x90}, 299 // nopl (%[re]ax) 300 {0x0f, 0x1f, 0x00}, 301 // nopl 0(%[re]ax) 302 {0x0f, 0x1f, 0x40, 0x00}, 303 // nopl 0(%[re]ax,%[re]ax,1) 304 {0x0f, 0x1f, 0x44, 0x00, 0x00}, 305 // nopw 0(%[re]ax,%[re]ax,1) 306 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00}, 307 // nopl 0L(%[re]ax) 308 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00}, 309 // nopl 0L(%[re]ax,%[re]ax,1) 310 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 311 // nopw 0L(%[re]ax,%[re]ax,1) 312 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 313 // nopw %cs:0L(%[re]ax,%[re]ax,1) 314 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 315 }; 316 317 // This CPU doesnt support long nops. If needed add more. 318 // FIXME: Can we get this from the subtarget somehow? 319 // FIXME: We could generated something better than plain 0x90. 320 if (!HasNopl) { 321 for (uint64_t i = 0; i < Count; ++i) 322 OW->Write8(0x90); 323 return true; 324 } 325 326 // 15 is the longest single nop instruction. Emit as many 15-byte nops as 327 // needed, then emit a nop of the remaining length. 328 do { 329 const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t) 15); 330 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10; 331 for (uint8_t i = 0; i < Prefixes; i++) 332 OW->Write8(0x66); 333 const uint8_t Rest = ThisNopLength - Prefixes; 334 for (uint8_t i = 0; i < Rest; i++) 335 OW->Write8(Nops[Rest - 1][i]); 336 Count -= ThisNopLength; 337 } while (Count != 0); 338 339 return true; 340} 341 342/* *** */ 343 344namespace { 345 346class ELFX86AsmBackend : public X86AsmBackend { 347public: 348 uint8_t OSABI; 349 ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU) 350 : X86AsmBackend(T, CPU), OSABI(_OSABI) { 351 HasReliableSymbolDifference = true; 352 } 353 354 virtual bool doesSectionRequireSymbols(const MCSection &Section) const { 355 const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section); 356 return ES.getFlags() & ELF::SHF_MERGE; 357 } 358}; 359 360class ELFX86_32AsmBackend : public ELFX86AsmBackend { 361public: 362 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 363 : ELFX86AsmBackend(T, OSABI, CPU) {} 364 365 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 366 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386); 367 } 368}; 369 370class ELFX86_64AsmBackend : public ELFX86AsmBackend { 371public: 372 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 373 : ELFX86AsmBackend(T, OSABI, CPU) {} 374 375 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 376 return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64); 377 } 378}; 379 380class WindowsX86AsmBackend : public X86AsmBackend { 381 bool Is64Bit; 382 383public: 384 WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU) 385 : X86AsmBackend(T, CPU) 386 , Is64Bit(is64Bit) { 387 } 388 389 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 390 return createX86WinCOFFObjectWriter(OS, Is64Bit); 391 } 392}; 393 394namespace CU { 395 396 /// Compact unwind encoding values. 397 enum CompactUnwindEncodings { 398 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after 399 /// the return address, then [RE]SP is moved to [RE]BP. 400 UNWIND_MODE_BP_FRAME = 0x01000000, 401 402 /// A frameless function with a small constant stack size. 403 UNWIND_MODE_STACK_IMMD = 0x02000000, 404 405 /// A frameless function with a large constant stack size. 406 UNWIND_MODE_STACK_IND = 0x03000000, 407 408 /// No compact unwind encoding is available. 409 UNWIND_MODE_DWARF = 0x04000000, 410 411 /// Mask for encoding the frame registers. 412 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF, 413 414 /// Mask for encoding the frameless registers. 415 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF 416 }; 417 418} // end CU namespace 419 420class DarwinX86AsmBackend : public X86AsmBackend { 421 const MCRegisterInfo &MRI; 422 423 /// \brief Number of registers that can be saved in a compact unwind encoding. 424 enum { CU_NUM_SAVED_REGS = 6 }; 425 426 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS]; 427 bool Is64Bit; 428 429 unsigned OffsetSize; ///< Offset of a "push" instruction. 430 unsigned PushInstrSize; ///< Size of a "push" instruction. 431 unsigned MoveInstrSize; ///< Size of a "move" instruction. 432 unsigned StackDivide; ///< Amount to adjust stack stize by. 433protected: 434 /// \brief Implementation of algorithm to generate the compact unwind encoding 435 /// for the CFI instructions. 436 uint32_t 437 generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const { 438 if (Instrs.empty()) return 0; 439 440 // Reset the saved registers. 441 unsigned SavedRegIdx = 0; 442 memset(SavedRegs, 0, sizeof(SavedRegs)); 443 444 bool HasFP = false; 445 446 // Encode that we are using EBP/RBP as the frame pointer. 447 uint32_t CompactUnwindEncoding = 0; 448 449 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2; 450 unsigned InstrOffset = 0; 451 unsigned StackAdjust = 0; 452 unsigned StackSize = 0; 453 unsigned PrevStackSize = 0; 454 unsigned NumDefCFAOffsets = 0; 455 456 for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { 457 const MCCFIInstruction &Inst = Instrs[i]; 458 459 switch (Inst.getOperation()) { 460 default: 461 // Any other CFI directives indicate a frame that we aren't prepared 462 // to represent via compact unwind, so just bail out. 463 return 0; 464 case MCCFIInstruction::OpDefCfaRegister: { 465 // Defines a frame pointer. E.g. 466 // 467 // movq %rsp, %rbp 468 // L0: 469 // .cfi_def_cfa_register %rbp 470 // 471 HasFP = true; 472 assert(MRI.getLLVMRegNum(Inst.getRegister(), true) == 473 (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!"); 474 475 // Reset the counts. 476 memset(SavedRegs, 0, sizeof(SavedRegs)); 477 StackAdjust = 0; 478 SavedRegIdx = 0; 479 InstrOffset += MoveInstrSize; 480 break; 481 } 482 case MCCFIInstruction::OpDefCfaOffset: { 483 // Defines a new offset for the CFA. E.g. 484 // 485 // With frame: 486 // 487 // pushq %rbp 488 // L0: 489 // .cfi_def_cfa_offset 16 490 // 491 // Without frame: 492 // 493 // subq $72, %rsp 494 // L0: 495 // .cfi_def_cfa_offset 80 496 // 497 PrevStackSize = StackSize; 498 StackSize = std::abs(Inst.getOffset()) / StackDivide; 499 ++NumDefCFAOffsets; 500 break; 501 } 502 case MCCFIInstruction::OpOffset: { 503 // Defines a "push" of a callee-saved register. E.g. 504 // 505 // pushq %r15 506 // pushq %r14 507 // pushq %rbx 508 // L0: 509 // subq $120, %rsp 510 // L1: 511 // .cfi_offset %rbx, -40 512 // .cfi_offset %r14, -32 513 // .cfi_offset %r15, -24 514 // 515 if (SavedRegIdx == CU_NUM_SAVED_REGS) 516 // If there are too many saved registers, we cannot use a compact 517 // unwind encoding. 518 return CU::UNWIND_MODE_DWARF; 519 520 unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); 521 SavedRegs[SavedRegIdx++] = Reg; 522 StackAdjust += OffsetSize; 523 InstrOffset += PushInstrSize; 524 break; 525 } 526 } 527 } 528 529 StackAdjust /= StackDivide; 530 531 if (HasFP) { 532 if ((StackAdjust & 0xFF) != StackAdjust) 533 // Offset was too big for a compact unwind encoding. 534 return CU::UNWIND_MODE_DWARF; 535 536 // Get the encoding of the saved registers when we have a frame pointer. 537 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(); 538 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 539 540 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME; 541 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; 542 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS; 543 } else { 544 // If the amount of the stack allocation is the size of a register, then 545 // we "push" the RAX/EAX register onto the stack instead of adjusting the 546 // stack pointer with a SUB instruction. We don't support the push of the 547 // RAX/EAX register with compact unwind. So we check for that situation 548 // here. 549 if ((NumDefCFAOffsets == SavedRegIdx + 1 && 550 StackSize - PrevStackSize == 1) || 551 (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2)) 552 return CU::UNWIND_MODE_DWARF; 553 554 SubtractInstrIdx += InstrOffset; 555 ++StackAdjust; 556 557 if ((StackSize & 0xFF) == StackSize) { 558 // Frameless stack with a small stack size. 559 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD; 560 561 // Encode the stack size. 562 CompactUnwindEncoding |= (StackSize & 0xFF) << 16; 563 } else { 564 if ((StackAdjust & 0x7) != StackAdjust) 565 // The extra stack adjustments are too big for us to handle. 566 return CU::UNWIND_MODE_DWARF; 567 568 // Frameless stack with an offset too large for us to encode compactly. 569 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND; 570 571 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' 572 // instruction. 573 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; 574 575 // Encode any extra stack stack adjustments (done via push 576 // instructions). 577 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; 578 } 579 580 // Encode the number of registers saved. (Reverse the list first.) 581 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]); 582 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10; 583 584 // Get the encoding of the saved registers when we don't have a frame 585 // pointer. 586 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx); 587 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 588 589 // Encode the register encoding. 590 CompactUnwindEncoding |= 591 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION; 592 } 593 594 return CompactUnwindEncoding; 595 } 596 597private: 598 /// \brief Get the compact unwind number for a given register. The number 599 /// corresponds to the enum lists in compact_unwind_encoding.h. 600 int getCompactUnwindRegNum(unsigned Reg) const { 601 static const uint16_t CU32BitRegs[7] = { 602 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 603 }; 604 static const uint16_t CU64BitRegs[] = { 605 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 606 }; 607 const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs; 608 for (int Idx = 1; *CURegs; ++CURegs, ++Idx) 609 if (*CURegs == Reg) 610 return Idx; 611 612 return -1; 613 } 614 615 /// \brief Return the registers encoded for a compact encoding with a frame 616 /// pointer. 617 uint32_t encodeCompactUnwindRegistersWithFrame() const { 618 // Encode the registers in the order they were saved --- 3-bits per 619 // register. The list of saved registers is assumed to be in reverse 620 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS. 621 uint32_t RegEnc = 0; 622 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) { 623 unsigned Reg = SavedRegs[i]; 624 if (Reg == 0) break; 625 626 int CURegNum = getCompactUnwindRegNum(Reg); 627 if (CURegNum == -1) return ~0U; 628 629 // Encode the 3-bit register number in order, skipping over 3-bits for 630 // each register. 631 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3); 632 } 633 634 assert((RegEnc & 0x3FFFF) == RegEnc && 635 "Invalid compact register encoding!"); 636 return RegEnc; 637 } 638 639 /// \brief Create the permutation encoding used with frameless stacks. It is 640 /// passed the number of registers to be saved and an array of the registers 641 /// saved. 642 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const { 643 // The saved registers are numbered from 1 to 6. In order to encode the 644 // order in which they were saved, we re-number them according to their 645 // place in the register order. The re-numbering is relative to the last 646 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in 647 // that order: 648 // 649 // Orig Re-Num 650 // ---- ------ 651 // 6 6 652 // 2 2 653 // 4 3 654 // 5 3 655 // 656 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) { 657 int CUReg = getCompactUnwindRegNum(SavedRegs[i]); 658 if (CUReg == -1) return ~0U; 659 SavedRegs[i] = CUReg; 660 } 661 662 // Reverse the list. 663 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]); 664 665 uint32_t RenumRegs[CU_NUM_SAVED_REGS]; 666 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){ 667 unsigned Countless = 0; 668 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j) 669 if (SavedRegs[j] < SavedRegs[i]) 670 ++Countless; 671 672 RenumRegs[i] = SavedRegs[i] - Countless - 1; 673 } 674 675 // Take the renumbered values and encode them into a 10-bit number. 676 uint32_t permutationEncoding = 0; 677 switch (RegCount) { 678 case 6: 679 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] 680 + 6 * RenumRegs[2] + 2 * RenumRegs[3] 681 + RenumRegs[4]; 682 break; 683 case 5: 684 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] 685 + 6 * RenumRegs[3] + 2 * RenumRegs[4] 686 + RenumRegs[5]; 687 break; 688 case 4: 689 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] 690 + 3 * RenumRegs[4] + RenumRegs[5]; 691 break; 692 case 3: 693 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] 694 + RenumRegs[5]; 695 break; 696 case 2: 697 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; 698 break; 699 case 1: 700 permutationEncoding |= RenumRegs[5]; 701 break; 702 } 703 704 assert((permutationEncoding & 0x3FF) == permutationEncoding && 705 "Invalid compact register encoding!"); 706 return permutationEncoding; 707 } 708 709public: 710 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU, 711 bool Is64Bit) 712 : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) { 713 memset(SavedRegs, 0, sizeof(SavedRegs)); 714 OffsetSize = Is64Bit ? 8 : 4; 715 MoveInstrSize = Is64Bit ? 3 : 2; 716 StackDivide = Is64Bit ? 8 : 4; 717 PushInstrSize = 1; 718 } 719}; 720 721class DarwinX86_32AsmBackend : public DarwinX86AsmBackend { 722 bool SupportsCU; 723public: 724 DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI, 725 StringRef CPU, bool SupportsCU) 726 : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {} 727 728 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 729 return createX86MachObjectWriter(OS, /*Is64Bit=*/false, 730 MachO::CPU_TYPE_I386, 731 MachO::CPU_SUBTYPE_I386_ALL); 732 } 733 734 /// \brief Generate the compact unwind encoding for the CFI instructions. 735 virtual uint32_t 736 generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const { 737 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0; 738 } 739}; 740 741class DarwinX86_64AsmBackend : public DarwinX86AsmBackend { 742 bool SupportsCU; 743 const MachO::CPUSubTypeX86 Subtype; 744public: 745 DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI, 746 StringRef CPU, bool SupportsCU, 747 MachO::CPUSubTypeX86 st) 748 : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU), 749 Subtype(st) { 750 HasReliableSymbolDifference = true; 751 } 752 753 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 754 return createX86MachObjectWriter(OS, /*Is64Bit=*/true, 755 MachO::CPU_TYPE_X86_64, Subtype); 756 } 757 758 virtual bool doesSectionRequireSymbols(const MCSection &Section) const { 759 // Temporary labels in the string literals sections require symbols. The 760 // issue is that the x86_64 relocation format does not allow symbol + 761 // offset, and so the linker does not have enough information to resolve the 762 // access to the appropriate atom unless an external relocation is used. For 763 // non-cstring sections, we expect the compiler to use a non-temporary label 764 // for anything that could have an addend pointing outside the symbol. 765 // 766 // See <rdar://problem/4765733>. 767 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); 768 return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS; 769 } 770 771 virtual bool isSectionAtomizable(const MCSection &Section) const { 772 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); 773 // Fixed sized data sections are uniqued, they cannot be diced into atoms. 774 switch (SMO.getType()) { 775 default: 776 return true; 777 778 case MCSectionMachO::S_4BYTE_LITERALS: 779 case MCSectionMachO::S_8BYTE_LITERALS: 780 case MCSectionMachO::S_16BYTE_LITERALS: 781 case MCSectionMachO::S_LITERAL_POINTERS: 782 case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS: 783 case MCSectionMachO::S_LAZY_SYMBOL_POINTERS: 784 case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS: 785 case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS: 786 case MCSectionMachO::S_INTERPOSING: 787 return false; 788 } 789 } 790 791 /// \brief Generate the compact unwind encoding for the CFI instructions. 792 virtual uint32_t 793 generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const { 794 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0; 795 } 796}; 797 798} // end anonymous namespace 799 800MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, 801 const MCRegisterInfo &MRI, 802 StringRef TT, 803 StringRef CPU) { 804 Triple TheTriple(TT); 805 806 if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) 807 return new DarwinX86_32AsmBackend(T, MRI, CPU, 808 TheTriple.isMacOSX() && 809 !TheTriple.isMacOSXVersionLT(10, 7)); 810 811 if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) 812 return new WindowsX86AsmBackend(T, false, CPU); 813 814 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 815 return new ELFX86_32AsmBackend(T, OSABI, CPU); 816} 817 818MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, 819 const MCRegisterInfo &MRI, 820 StringRef TT, 821 StringRef CPU) { 822 Triple TheTriple(TT); 823 824 if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) { 825 MachO::CPUSubTypeX86 CS = 826 StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName()) 827 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H) 828 .Default(MachO::CPU_SUBTYPE_X86_64_ALL); 829 return new DarwinX86_64AsmBackend(T, MRI, CPU, 830 TheTriple.isMacOSX() && 831 !TheTriple.isMacOSXVersionLT(10, 7), CS); 832 } 833 834 if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) 835 return new WindowsX86AsmBackend(T, true, CPU); 836 837 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 838 return new ELFX86_64AsmBackend(T, OSABI, CPU); 839} 840