X86AsmBackend.cpp revision 35de9946d5fc01d2fed970bdcc7966bad92bdbc4
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/X86BaseInfo.h" 11#include "MCTargetDesc/X86FixupKinds.h" 12#include "llvm/ADT/StringSwitch.h" 13#include "llvm/MC/MCAsmBackend.h" 14#include "llvm/MC/MCAssembler.h" 15#include "llvm/MC/MCELFObjectWriter.h" 16#include "llvm/MC/MCExpr.h" 17#include "llvm/MC/MCFixupKindInfo.h" 18#include "llvm/MC/MCMachObjectWriter.h" 19#include "llvm/MC/MCObjectWriter.h" 20#include "llvm/MC/MCSectionCOFF.h" 21#include "llvm/MC/MCSectionELF.h" 22#include "llvm/MC/MCSectionMachO.h" 23#include "llvm/Support/CommandLine.h" 24#include "llvm/Support/ELF.h" 25#include "llvm/Support/ErrorHandling.h" 26#include "llvm/Support/MachO.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29using namespace llvm; 30 31// Option to allow disabling arithmetic relaxation to workaround PR9807, which 32// is useful when running bitwise comparison experiments on Darwin. We should be 33// able to remove this once PR9807 is resolved. 34static cl::opt<bool> 35MCDisableArithRelaxation("mc-x86-disable-arith-relaxation", 36 cl::desc("Disable relaxation of arithmetic instruction for X86")); 37 38static unsigned getFixupKindLog2Size(unsigned Kind) { 39 switch (Kind) { 40 default: llvm_unreachable("invalid fixup kind!"); 41 case FK_PCRel_1: 42 case FK_SecRel_1: 43 case FK_Data_1: return 0; 44 case FK_PCRel_2: 45 case FK_SecRel_2: 46 case FK_Data_2: return 1; 47 case FK_PCRel_4: 48 case X86::reloc_riprel_4byte: 49 case X86::reloc_riprel_4byte_movq_load: 50 case X86::reloc_signed_4byte: 51 case X86::reloc_global_offset_table: 52 case FK_SecRel_4: 53 case FK_Data_4: return 2; 54 case FK_PCRel_8: 55 case FK_SecRel_8: 56 case FK_Data_8: return 3; 57 } 58} 59 60namespace { 61 62class X86ELFObjectWriter : public MCELFObjectTargetWriter { 63public: 64 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine, 65 bool HasRelocationAddend, bool foobar) 66 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {} 67}; 68 69class X86AsmBackend : public MCAsmBackend { 70 StringRef CPU; 71public: 72 X86AsmBackend(const Target &T, StringRef _CPU) 73 : MCAsmBackend(), CPU(_CPU) {} 74 75 unsigned getNumFixupKinds() const { 76 return X86::NumTargetFixupKinds; 77 } 78 79 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const { 80 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = { 81 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }, 82 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel}, 83 { "reloc_signed_4byte", 0, 4 * 8, 0}, 84 { "reloc_global_offset_table", 0, 4 * 8, 0} 85 }; 86 87 if (Kind < FirstTargetFixupKind) 88 return MCAsmBackend::getFixupKindInfo(Kind); 89 90 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 91 "Invalid kind!"); 92 return Infos[Kind - FirstTargetFixupKind]; 93 } 94 95 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 96 uint64_t Value) const { 97 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind()); 98 99 assert(Fixup.getOffset() + Size <= DataSize && 100 "Invalid fixup offset!"); 101 102 // Check that uppper bits are either all zeros or all ones. 103 // Specifically ignore overflow/underflow as long as the leakage is 104 // limited to the lower bits. This is to remain compatible with 105 // other assemblers. 106 assert(isIntN(Size * 8 + 1, Value) && 107 "Value does not fit in the Fixup field"); 108 109 for (unsigned i = 0; i != Size; ++i) 110 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8)); 111 } 112 113 bool mayNeedRelaxation(const MCInst &Inst) const; 114 115 bool fixupNeedsRelaxation(const MCFixup &Fixup, 116 uint64_t Value, 117 const MCRelaxableFragment *DF, 118 const MCAsmLayout &Layout) const; 119 120 void relaxInstruction(const MCInst &Inst, MCInst &Res) const; 121 122 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const; 123}; 124} // end anonymous namespace 125 126static unsigned getRelaxedOpcodeBranch(unsigned Op) { 127 switch (Op) { 128 default: 129 return Op; 130 131 case X86::JAE_1: return X86::JAE_4; 132 case X86::JA_1: return X86::JA_4; 133 case X86::JBE_1: return X86::JBE_4; 134 case X86::JB_1: return X86::JB_4; 135 case X86::JE_1: return X86::JE_4; 136 case X86::JGE_1: return X86::JGE_4; 137 case X86::JG_1: return X86::JG_4; 138 case X86::JLE_1: return X86::JLE_4; 139 case X86::JL_1: return X86::JL_4; 140 case X86::JMP_1: return X86::JMP_4; 141 case X86::JNE_1: return X86::JNE_4; 142 case X86::JNO_1: return X86::JNO_4; 143 case X86::JNP_1: return X86::JNP_4; 144 case X86::JNS_1: return X86::JNS_4; 145 case X86::JO_1: return X86::JO_4; 146 case X86::JP_1: return X86::JP_4; 147 case X86::JS_1: return X86::JS_4; 148 } 149} 150 151static unsigned getRelaxedOpcodeArith(unsigned Op) { 152 switch (Op) { 153 default: 154 return Op; 155 156 // IMUL 157 case X86::IMUL16rri8: return X86::IMUL16rri; 158 case X86::IMUL16rmi8: return X86::IMUL16rmi; 159 case X86::IMUL32rri8: return X86::IMUL32rri; 160 case X86::IMUL32rmi8: return X86::IMUL32rmi; 161 case X86::IMUL64rri8: return X86::IMUL64rri32; 162 case X86::IMUL64rmi8: return X86::IMUL64rmi32; 163 164 // AND 165 case X86::AND16ri8: return X86::AND16ri; 166 case X86::AND16mi8: return X86::AND16mi; 167 case X86::AND32ri8: return X86::AND32ri; 168 case X86::AND32mi8: return X86::AND32mi; 169 case X86::AND64ri8: return X86::AND64ri32; 170 case X86::AND64mi8: return X86::AND64mi32; 171 172 // OR 173 case X86::OR16ri8: return X86::OR16ri; 174 case X86::OR16mi8: return X86::OR16mi; 175 case X86::OR32ri8: return X86::OR32ri; 176 case X86::OR32mi8: return X86::OR32mi; 177 case X86::OR64ri8: return X86::OR64ri32; 178 case X86::OR64mi8: return X86::OR64mi32; 179 180 // XOR 181 case X86::XOR16ri8: return X86::XOR16ri; 182 case X86::XOR16mi8: return X86::XOR16mi; 183 case X86::XOR32ri8: return X86::XOR32ri; 184 case X86::XOR32mi8: return X86::XOR32mi; 185 case X86::XOR64ri8: return X86::XOR64ri32; 186 case X86::XOR64mi8: return X86::XOR64mi32; 187 188 // ADD 189 case X86::ADD16ri8: return X86::ADD16ri; 190 case X86::ADD16mi8: return X86::ADD16mi; 191 case X86::ADD32ri8: return X86::ADD32ri; 192 case X86::ADD32mi8: return X86::ADD32mi; 193 case X86::ADD64ri8: return X86::ADD64ri32; 194 case X86::ADD64mi8: return X86::ADD64mi32; 195 196 // SUB 197 case X86::SUB16ri8: return X86::SUB16ri; 198 case X86::SUB16mi8: return X86::SUB16mi; 199 case X86::SUB32ri8: return X86::SUB32ri; 200 case X86::SUB32mi8: return X86::SUB32mi; 201 case X86::SUB64ri8: return X86::SUB64ri32; 202 case X86::SUB64mi8: return X86::SUB64mi32; 203 204 // CMP 205 case X86::CMP16ri8: return X86::CMP16ri; 206 case X86::CMP16mi8: return X86::CMP16mi; 207 case X86::CMP32ri8: return X86::CMP32ri; 208 case X86::CMP32mi8: return X86::CMP32mi; 209 case X86::CMP64ri8: return X86::CMP64ri32; 210 case X86::CMP64mi8: return X86::CMP64mi32; 211 212 // PUSH 213 case X86::PUSHi8: return X86::PUSHi32; 214 case X86::PUSHi16: return X86::PUSHi32; 215 case X86::PUSH64i8: return X86::PUSH64i32; 216 case X86::PUSH64i16: return X86::PUSH64i32; 217 } 218} 219 220static unsigned getRelaxedOpcode(unsigned Op) { 221 unsigned R = getRelaxedOpcodeArith(Op); 222 if (R != Op) 223 return R; 224 return getRelaxedOpcodeBranch(Op); 225} 226 227bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 228 // Branches can always be relaxed. 229 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode()) 230 return true; 231 232 if (MCDisableArithRelaxation) 233 return false; 234 235 // Check if this instruction is ever relaxable. 236 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode()) 237 return false; 238 239 240 // Check if it has an expression and is not RIP relative. 241 bool hasExp = false; 242 bool hasRIP = false; 243 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) { 244 const MCOperand &Op = Inst.getOperand(i); 245 if (Op.isExpr()) 246 hasExp = true; 247 248 if (Op.isReg() && Op.getReg() == X86::RIP) 249 hasRIP = true; 250 } 251 252 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on 253 // how we do relaxations? 254 return hasExp && !hasRIP; 255} 256 257bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, 258 uint64_t Value, 259 const MCRelaxableFragment *DF, 260 const MCAsmLayout &Layout) const { 261 // Relax if the value is too big for a (signed) i8. 262 return int64_t(Value) != int64_t(int8_t(Value)); 263} 264 265// FIXME: Can tblgen help at all here to verify there aren't other instructions 266// we can relax? 267void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { 268 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel. 269 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode()); 270 271 if (RelaxedOp == Inst.getOpcode()) { 272 SmallString<256> Tmp; 273 raw_svector_ostream OS(Tmp); 274 Inst.dump_pretty(OS); 275 OS << "\n"; 276 report_fatal_error("unexpected instruction to relax: " + OS.str()); 277 } 278 279 Res = Inst; 280 Res.setOpcode(RelaxedOp); 281} 282 283/// \brief Write a sequence of optimal nops to the output, covering \p Count 284/// bytes. 285/// \return - true on success, false on failure 286bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 287 static const uint8_t Nops[10][10] = { 288 // nop 289 {0x90}, 290 // xchg %ax,%ax 291 {0x66, 0x90}, 292 // nopl (%[re]ax) 293 {0x0f, 0x1f, 0x00}, 294 // nopl 0(%[re]ax) 295 {0x0f, 0x1f, 0x40, 0x00}, 296 // nopl 0(%[re]ax,%[re]ax,1) 297 {0x0f, 0x1f, 0x44, 0x00, 0x00}, 298 // nopw 0(%[re]ax,%[re]ax,1) 299 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00}, 300 // nopl 0L(%[re]ax) 301 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00}, 302 // nopl 0L(%[re]ax,%[re]ax,1) 303 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 304 // nopw 0L(%[re]ax,%[re]ax,1) 305 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 306 // nopw %cs:0L(%[re]ax,%[re]ax,1) 307 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 308 }; 309 310 // This CPU doesnt support long nops. If needed add more. 311 // FIXME: Can we get this from the subtarget somehow? 312 if (CPU == "generic" || CPU == "i386" || CPU == "i486" || CPU == "i586" || 313 CPU == "pentium" || CPU == "pentium-mmx" || CPU == "geode") { 314 for (uint64_t i = 0; i < Count; ++i) 315 OW->Write8(0x90); 316 return true; 317 } 318 319 // 15 is the longest single nop instruction. Emit as many 15-byte nops as 320 // needed, then emit a nop of the remaining length. 321 do { 322 const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t) 15); 323 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10; 324 for (uint8_t i = 0; i < Prefixes; i++) 325 OW->Write8(0x66); 326 const uint8_t Rest = ThisNopLength - Prefixes; 327 for (uint8_t i = 0; i < Rest; i++) 328 OW->Write8(Nops[Rest - 1][i]); 329 Count -= ThisNopLength; 330 } while (Count != 0); 331 332 return true; 333} 334 335/* *** */ 336 337namespace { 338 339class ELFX86AsmBackend : public X86AsmBackend { 340public: 341 uint8_t OSABI; 342 ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU) 343 : X86AsmBackend(T, CPU), OSABI(_OSABI) { 344 HasReliableSymbolDifference = true; 345 } 346 347 virtual bool doesSectionRequireSymbols(const MCSection &Section) const { 348 const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section); 349 return ES.getFlags() & ELF::SHF_MERGE; 350 } 351}; 352 353class ELFX86_32AsmBackend : public ELFX86AsmBackend { 354public: 355 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 356 : ELFX86AsmBackend(T, OSABI, CPU) {} 357 358 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 359 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386); 360 } 361}; 362 363class ELFX86_64AsmBackend : public ELFX86AsmBackend { 364public: 365 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 366 : ELFX86AsmBackend(T, OSABI, CPU) {} 367 368 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 369 return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64); 370 } 371}; 372 373class WindowsX86AsmBackend : public X86AsmBackend { 374 bool Is64Bit; 375 376public: 377 WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU) 378 : X86AsmBackend(T, CPU) 379 , Is64Bit(is64Bit) { 380 } 381 382 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 383 return createX86WinCOFFObjectWriter(OS, Is64Bit); 384 } 385}; 386 387namespace CU { 388 389 /// Compact unwind encoding values. 390 enum CompactUnwindEncodings { 391 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after 392 /// the return address, then [RE]SP is moved to [RE]BP. 393 UNWIND_MODE_BP_FRAME = 0x01000000, 394 395 /// A frameless function with a small constant stack size. 396 UNWIND_MODE_STACK_IMMD = 0x02000000, 397 398 /// A frameless function with a large constant stack size. 399 UNWIND_MODE_STACK_IND = 0x03000000, 400 401 /// No compact unwind encoding is available. 402 UNWIND_MODE_DWARF = 0x04000000, 403 404 /// Mask for encoding the frame registers. 405 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF, 406 407 /// Mask for encoding the frameless registers. 408 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF 409 }; 410 411} // end CU namespace 412 413class DarwinX86AsmBackend : public X86AsmBackend { 414 const MCRegisterInfo &MRI; 415 416 /// \brief Number of registers that can be saved in a compact unwind encoding. 417 enum { CU_NUM_SAVED_REGS = 6 }; 418 419 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS]; 420 bool Is64Bit; 421 422 unsigned OffsetSize; ///< Offset of a "push" instruction. 423 unsigned PushInstrSize; ///< Size of a "push" instruction. 424 unsigned MoveInstrSize; ///< Size of a "move" instruction. 425 unsigned StackDivide; ///< Amount to adjust stack stize by. 426protected: 427 /// \brief Implementation of algorithm to generate the compact unwind encoding 428 /// for the CFI instructions. 429 uint32_t 430 generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const { 431 if (Instrs.empty()) return 0; 432 433 // Reset the saved registers. 434 unsigned SavedRegIdx = 0; 435 memset(SavedRegs, 0, sizeof(SavedRegs)); 436 437 bool HasFP = false; 438 439 // Encode that we are using EBP/RBP as the frame pointer. 440 uint32_t CompactUnwindEncoding = 0; 441 442 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2; 443 unsigned InstrOffset = 0; 444 unsigned StackAdjust = 0; 445 unsigned StackSize = 0; 446 unsigned PrevStackSize = 0; 447 unsigned NumDefCFAOffsets = 0; 448 449 for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { 450 const MCCFIInstruction &Inst = Instrs[i]; 451 452 switch (Inst.getOperation()) { 453 default: 454 // Any other CFI directives indicate a frame that we aren't prepared 455 // to represent via compact unwind, so just bail out. 456 return 0; 457 case MCCFIInstruction::OpDefCfaRegister: { 458 // Defines a frame pointer. E.g. 459 // 460 // movq %rsp, %rbp 461 // L0: 462 // .cfi_def_cfa_register %rbp 463 // 464 HasFP = true; 465 assert(MRI.getLLVMRegNum(Inst.getRegister(), true) == 466 (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!"); 467 468 // Reset the counts. 469 memset(SavedRegs, 0, sizeof(SavedRegs)); 470 StackAdjust = 0; 471 SavedRegIdx = 0; 472 InstrOffset += MoveInstrSize; 473 break; 474 } 475 case MCCFIInstruction::OpDefCfaOffset: { 476 // Defines a new offset for the CFA. E.g. 477 // 478 // With frame: 479 // 480 // pushq %rbp 481 // L0: 482 // .cfi_def_cfa_offset 16 483 // 484 // Without frame: 485 // 486 // subq $72, %rsp 487 // L0: 488 // .cfi_def_cfa_offset 80 489 // 490 PrevStackSize = StackSize; 491 StackSize = std::abs(Inst.getOffset()) / StackDivide; 492 ++NumDefCFAOffsets; 493 break; 494 } 495 case MCCFIInstruction::OpOffset: { 496 // Defines a "push" of a callee-saved register. E.g. 497 // 498 // pushq %r15 499 // pushq %r14 500 // pushq %rbx 501 // L0: 502 // subq $120, %rsp 503 // L1: 504 // .cfi_offset %rbx, -40 505 // .cfi_offset %r14, -32 506 // .cfi_offset %r15, -24 507 // 508 if (SavedRegIdx == CU_NUM_SAVED_REGS) 509 // If there are too many saved registers, we cannot use a compact 510 // unwind encoding. 511 return CU::UNWIND_MODE_DWARF; 512 513 unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); 514 SavedRegs[SavedRegIdx++] = Reg; 515 StackAdjust += OffsetSize; 516 InstrOffset += PushInstrSize; 517 break; 518 } 519 } 520 } 521 522 StackAdjust /= StackDivide; 523 524 if (HasFP) { 525 if ((StackAdjust & 0xFF) != StackAdjust) 526 // Offset was too big for a compact unwind encoding. 527 return CU::UNWIND_MODE_DWARF; 528 529 // Get the encoding of the saved registers when we have a frame pointer. 530 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(); 531 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 532 533 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME; 534 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; 535 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS; 536 } else { 537 // If the amount of the stack allocation is the size of a register, then 538 // we "push" the RAX/EAX register onto the stack instead of adjusting the 539 // stack pointer with a SUB instruction. We don't support the push of the 540 // RAX/EAX register with compact unwind. So we check for that situation 541 // here. 542 if ((NumDefCFAOffsets == SavedRegIdx + 1 && 543 StackSize - PrevStackSize == 1) || 544 (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2)) 545 return CU::UNWIND_MODE_DWARF; 546 547 SubtractInstrIdx += InstrOffset; 548 ++StackAdjust; 549 550 if ((StackSize & 0xFF) == StackSize) { 551 // Frameless stack with a small stack size. 552 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD; 553 554 // Encode the stack size. 555 CompactUnwindEncoding |= (StackSize & 0xFF) << 16; 556 } else { 557 if ((StackAdjust & 0x7) != StackAdjust) 558 // The extra stack adjustments are too big for us to handle. 559 return CU::UNWIND_MODE_DWARF; 560 561 // Frameless stack with an offset too large for us to encode compactly. 562 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND; 563 564 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' 565 // instruction. 566 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; 567 568 // Encode any extra stack stack adjustments (done via push 569 // instructions). 570 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; 571 } 572 573 // Encode the number of registers saved. (Reverse the list first.) 574 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]); 575 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10; 576 577 // Get the encoding of the saved registers when we don't have a frame 578 // pointer. 579 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx); 580 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 581 582 // Encode the register encoding. 583 CompactUnwindEncoding |= 584 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION; 585 } 586 587 return CompactUnwindEncoding; 588 } 589 590private: 591 /// \brief Get the compact unwind number for a given register. The number 592 /// corresponds to the enum lists in compact_unwind_encoding.h. 593 int getCompactUnwindRegNum(unsigned Reg) const { 594 static const uint16_t CU32BitRegs[7] = { 595 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 596 }; 597 static const uint16_t CU64BitRegs[] = { 598 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 599 }; 600 const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs; 601 for (int Idx = 1; *CURegs; ++CURegs, ++Idx) 602 if (*CURegs == Reg) 603 return Idx; 604 605 return -1; 606 } 607 608 /// \brief Return the registers encoded for a compact encoding with a frame 609 /// pointer. 610 uint32_t encodeCompactUnwindRegistersWithFrame() const { 611 // Encode the registers in the order they were saved --- 3-bits per 612 // register. The list of saved registers is assumed to be in reverse 613 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS. 614 uint32_t RegEnc = 0; 615 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) { 616 unsigned Reg = SavedRegs[i]; 617 if (Reg == 0) break; 618 619 int CURegNum = getCompactUnwindRegNum(Reg); 620 if (CURegNum == -1) return ~0U; 621 622 // Encode the 3-bit register number in order, skipping over 3-bits for 623 // each register. 624 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3); 625 } 626 627 assert((RegEnc & 0x3FFFF) == RegEnc && 628 "Invalid compact register encoding!"); 629 return RegEnc; 630 } 631 632 /// \brief Create the permutation encoding used with frameless stacks. It is 633 /// passed the number of registers to be saved and an array of the registers 634 /// saved. 635 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const { 636 // The saved registers are numbered from 1 to 6. In order to encode the 637 // order in which they were saved, we re-number them according to their 638 // place in the register order. The re-numbering is relative to the last 639 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in 640 // that order: 641 // 642 // Orig Re-Num 643 // ---- ------ 644 // 6 6 645 // 2 2 646 // 4 3 647 // 5 3 648 // 649 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) { 650 int CUReg = getCompactUnwindRegNum(SavedRegs[i]); 651 if (CUReg == -1) return ~0U; 652 SavedRegs[i] = CUReg; 653 } 654 655 // Reverse the list. 656 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]); 657 658 uint32_t RenumRegs[CU_NUM_SAVED_REGS]; 659 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){ 660 unsigned Countless = 0; 661 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j) 662 if (SavedRegs[j] < SavedRegs[i]) 663 ++Countless; 664 665 RenumRegs[i] = SavedRegs[i] - Countless - 1; 666 } 667 668 // Take the renumbered values and encode them into a 10-bit number. 669 uint32_t permutationEncoding = 0; 670 switch (RegCount) { 671 case 6: 672 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] 673 + 6 * RenumRegs[2] + 2 * RenumRegs[3] 674 + RenumRegs[4]; 675 break; 676 case 5: 677 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] 678 + 6 * RenumRegs[3] + 2 * RenumRegs[4] 679 + RenumRegs[5]; 680 break; 681 case 4: 682 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] 683 + 3 * RenumRegs[4] + RenumRegs[5]; 684 break; 685 case 3: 686 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] 687 + RenumRegs[5]; 688 break; 689 case 2: 690 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; 691 break; 692 case 1: 693 permutationEncoding |= RenumRegs[5]; 694 break; 695 } 696 697 assert((permutationEncoding & 0x3FF) == permutationEncoding && 698 "Invalid compact register encoding!"); 699 return permutationEncoding; 700 } 701 702public: 703 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU, 704 bool Is64Bit) 705 : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) { 706 memset(SavedRegs, 0, sizeof(SavedRegs)); 707 OffsetSize = Is64Bit ? 8 : 4; 708 MoveInstrSize = Is64Bit ? 3 : 2; 709 StackDivide = Is64Bit ? 8 : 4; 710 PushInstrSize = 1; 711 } 712}; 713 714class DarwinX86_32AsmBackend : public DarwinX86AsmBackend { 715 bool SupportsCU; 716public: 717 DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI, 718 StringRef CPU, bool SupportsCU) 719 : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {} 720 721 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 722 return createX86MachObjectWriter(OS, /*Is64Bit=*/false, 723 MachO::CPU_TYPE_I386, 724 MachO::CPU_SUBTYPE_I386_ALL); 725 } 726 727 /// \brief Generate the compact unwind encoding for the CFI instructions. 728 virtual uint32_t 729 generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const { 730 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0; 731 } 732}; 733 734class DarwinX86_64AsmBackend : public DarwinX86AsmBackend { 735 bool SupportsCU; 736 const MachO::CPUSubTypeX86 Subtype; 737public: 738 DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI, 739 StringRef CPU, bool SupportsCU, 740 MachO::CPUSubTypeX86 st) 741 : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU), 742 Subtype(st) { 743 HasReliableSymbolDifference = true; 744 } 745 746 MCObjectWriter *createObjectWriter(raw_ostream &OS) const { 747 return createX86MachObjectWriter(OS, /*Is64Bit=*/true, 748 MachO::CPU_TYPE_X86_64, Subtype); 749 } 750 751 virtual bool doesSectionRequireSymbols(const MCSection &Section) const { 752 // Temporary labels in the string literals sections require symbols. The 753 // issue is that the x86_64 relocation format does not allow symbol + 754 // offset, and so the linker does not have enough information to resolve the 755 // access to the appropriate atom unless an external relocation is used. For 756 // non-cstring sections, we expect the compiler to use a non-temporary label 757 // for anything that could have an addend pointing outside the symbol. 758 // 759 // See <rdar://problem/4765733>. 760 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); 761 return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS; 762 } 763 764 virtual bool isSectionAtomizable(const MCSection &Section) const { 765 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); 766 // Fixed sized data sections are uniqued, they cannot be diced into atoms. 767 switch (SMO.getType()) { 768 default: 769 return true; 770 771 case MCSectionMachO::S_4BYTE_LITERALS: 772 case MCSectionMachO::S_8BYTE_LITERALS: 773 case MCSectionMachO::S_16BYTE_LITERALS: 774 case MCSectionMachO::S_LITERAL_POINTERS: 775 case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS: 776 case MCSectionMachO::S_LAZY_SYMBOL_POINTERS: 777 case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS: 778 case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS: 779 case MCSectionMachO::S_INTERPOSING: 780 return false; 781 } 782 } 783 784 /// \brief Generate the compact unwind encoding for the CFI instructions. 785 virtual uint32_t 786 generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const { 787 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0; 788 } 789}; 790 791} // end anonymous namespace 792 793MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, 794 const MCRegisterInfo &MRI, 795 StringRef TT, 796 StringRef CPU) { 797 Triple TheTriple(TT); 798 799 if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) 800 return new DarwinX86_32AsmBackend(T, MRI, CPU, 801 TheTriple.isMacOSX() && 802 !TheTriple.isMacOSXVersionLT(10, 7)); 803 804 if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) 805 return new WindowsX86AsmBackend(T, false, CPU); 806 807 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 808 return new ELFX86_32AsmBackend(T, OSABI, CPU); 809} 810 811MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, 812 const MCRegisterInfo &MRI, 813 StringRef TT, 814 StringRef CPU) { 815 Triple TheTriple(TT); 816 817 if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) { 818 MachO::CPUSubTypeX86 CS = 819 StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName()) 820 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H) 821 .Default(MachO::CPU_SUBTYPE_X86_64_ALL); 822 return new DarwinX86_64AsmBackend(T, MRI, CPU, 823 TheTriple.isMacOSX() && 824 !TheTriple.isMacOSXVersionLT(10, 7), CS); 825 } 826 827 if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) 828 return new WindowsX86AsmBackend(T, true, CPU); 829 830 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 831 return new ELFX86_64AsmBackend(T, OSABI, CPU); 832} 833