X86AsmBackend.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/X86BaseInfo.h" 11#include "MCTargetDesc/X86FixupKinds.h" 12#include "llvm/ADT/StringSwitch.h" 13#include "llvm/MC/MCAsmBackend.h" 14#include "llvm/MC/MCAssembler.h" 15#include "llvm/MC/MCELFObjectWriter.h" 16#include "llvm/MC/MCExpr.h" 17#include "llvm/MC/MCFixupKindInfo.h" 18#include "llvm/MC/MCMachObjectWriter.h" 19#include "llvm/MC/MCObjectWriter.h" 20#include "llvm/MC/MCSectionCOFF.h" 21#include "llvm/MC/MCSectionELF.h" 22#include "llvm/MC/MCSectionMachO.h" 23#include "llvm/Support/CommandLine.h" 24#include "llvm/Support/ELF.h" 25#include "llvm/Support/ErrorHandling.h" 26#include "llvm/Support/MachO.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29using namespace llvm; 30 31// Option to allow disabling arithmetic relaxation to workaround PR9807, which 32// is useful when running bitwise comparison experiments on Darwin. We should be 33// able to remove this once PR9807 is resolved. 34static cl::opt<bool> 35MCDisableArithRelaxation("mc-x86-disable-arith-relaxation", 36 cl::desc("Disable relaxation of arithmetic instruction for X86")); 37 38static unsigned getFixupKindLog2Size(unsigned Kind) { 39 switch (Kind) { 40 default: llvm_unreachable("invalid fixup kind!"); 41 case FK_PCRel_1: 42 case FK_SecRel_1: 43 case FK_Data_1: return 0; 44 case FK_PCRel_2: 45 case FK_SecRel_2: 46 case FK_Data_2: return 1; 47 case FK_PCRel_4: 48 case X86::reloc_riprel_4byte: 49 case X86::reloc_riprel_4byte_movq_load: 50 case X86::reloc_signed_4byte: 51 case X86::reloc_global_offset_table: 52 case FK_SecRel_4: 53 case FK_Data_4: return 2; 54 case FK_PCRel_8: 55 case FK_SecRel_8: 56 case FK_Data_8: return 3; 57 } 58} 59 60namespace { 61 62class X86ELFObjectWriter : public MCELFObjectTargetWriter { 63public: 64 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine, 65 bool HasRelocationAddend, bool foobar) 66 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {} 67}; 68 69class X86AsmBackend : public MCAsmBackend { 70 StringRef CPU; 71 bool HasNopl; 72public: 73 X86AsmBackend(const Target &T, StringRef _CPU) 74 : MCAsmBackend(), CPU(_CPU) { 75 HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" && 76 CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" && 77 CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" && 78 CPU != "geode" && CPU != "winchip-c6" && CPU != "winchip2" && 79 CPU != "c3" && CPU != "c3-2"; 80 } 81 82 unsigned getNumFixupKinds() const override { 83 return X86::NumTargetFixupKinds; 84 } 85 86 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { 87 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = { 88 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }, 89 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel}, 90 { "reloc_signed_4byte", 0, 4 * 8, 0}, 91 { "reloc_global_offset_table", 0, 4 * 8, 0} 92 }; 93 94 if (Kind < FirstTargetFixupKind) 95 return MCAsmBackend::getFixupKindInfo(Kind); 96 97 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 98 "Invalid kind!"); 99 return Infos[Kind - FirstTargetFixupKind]; 100 } 101 102 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 103 uint64_t Value, bool IsPCRel) const override { 104 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind()); 105 106 assert(Fixup.getOffset() + Size <= DataSize && 107 "Invalid fixup offset!"); 108 109 // Check that uppper bits are either all zeros or all ones. 110 // Specifically ignore overflow/underflow as long as the leakage is 111 // limited to the lower bits. This is to remain compatible with 112 // other assemblers. 113 assert(isIntN(Size * 8 + 1, Value) && 114 "Value does not fit in the Fixup field"); 115 116 for (unsigned i = 0; i != Size; ++i) 117 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8)); 118 } 119 120 bool mayNeedRelaxation(const MCInst &Inst) const override; 121 122 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 123 const MCRelaxableFragment *DF, 124 const MCAsmLayout &Layout) const override; 125 126 void relaxInstruction(const MCInst &Inst, MCInst &Res) const override; 127 128 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; 129}; 130} // end anonymous namespace 131 132static unsigned getRelaxedOpcodeBranch(unsigned Op) { 133 switch (Op) { 134 default: 135 return Op; 136 137 case X86::JAE_1: return X86::JAE_4; 138 case X86::JA_1: return X86::JA_4; 139 case X86::JBE_1: return X86::JBE_4; 140 case X86::JB_1: return X86::JB_4; 141 case X86::JE_1: return X86::JE_4; 142 case X86::JGE_1: return X86::JGE_4; 143 case X86::JG_1: return X86::JG_4; 144 case X86::JLE_1: return X86::JLE_4; 145 case X86::JL_1: return X86::JL_4; 146 case X86::JMP_1: return X86::JMP_4; 147 case X86::JNE_1: return X86::JNE_4; 148 case X86::JNO_1: return X86::JNO_4; 149 case X86::JNP_1: return X86::JNP_4; 150 case X86::JNS_1: return X86::JNS_4; 151 case X86::JO_1: return X86::JO_4; 152 case X86::JP_1: return X86::JP_4; 153 case X86::JS_1: return X86::JS_4; 154 } 155} 156 157static unsigned getRelaxedOpcodeArith(unsigned Op) { 158 switch (Op) { 159 default: 160 return Op; 161 162 // IMUL 163 case X86::IMUL16rri8: return X86::IMUL16rri; 164 case X86::IMUL16rmi8: return X86::IMUL16rmi; 165 case X86::IMUL32rri8: return X86::IMUL32rri; 166 case X86::IMUL32rmi8: return X86::IMUL32rmi; 167 case X86::IMUL64rri8: return X86::IMUL64rri32; 168 case X86::IMUL64rmi8: return X86::IMUL64rmi32; 169 170 // AND 171 case X86::AND16ri8: return X86::AND16ri; 172 case X86::AND16mi8: return X86::AND16mi; 173 case X86::AND32ri8: return X86::AND32ri; 174 case X86::AND32mi8: return X86::AND32mi; 175 case X86::AND64ri8: return X86::AND64ri32; 176 case X86::AND64mi8: return X86::AND64mi32; 177 178 // OR 179 case X86::OR16ri8: return X86::OR16ri; 180 case X86::OR16mi8: return X86::OR16mi; 181 case X86::OR32ri8: return X86::OR32ri; 182 case X86::OR32mi8: return X86::OR32mi; 183 case X86::OR64ri8: return X86::OR64ri32; 184 case X86::OR64mi8: return X86::OR64mi32; 185 186 // XOR 187 case X86::XOR16ri8: return X86::XOR16ri; 188 case X86::XOR16mi8: return X86::XOR16mi; 189 case X86::XOR32ri8: return X86::XOR32ri; 190 case X86::XOR32mi8: return X86::XOR32mi; 191 case X86::XOR64ri8: return X86::XOR64ri32; 192 case X86::XOR64mi8: return X86::XOR64mi32; 193 194 // ADD 195 case X86::ADD16ri8: return X86::ADD16ri; 196 case X86::ADD16mi8: return X86::ADD16mi; 197 case X86::ADD32ri8: return X86::ADD32ri; 198 case X86::ADD32mi8: return X86::ADD32mi; 199 case X86::ADD64ri8: return X86::ADD64ri32; 200 case X86::ADD64mi8: return X86::ADD64mi32; 201 202 // SUB 203 case X86::SUB16ri8: return X86::SUB16ri; 204 case X86::SUB16mi8: return X86::SUB16mi; 205 case X86::SUB32ri8: return X86::SUB32ri; 206 case X86::SUB32mi8: return X86::SUB32mi; 207 case X86::SUB64ri8: return X86::SUB64ri32; 208 case X86::SUB64mi8: return X86::SUB64mi32; 209 210 // CMP 211 case X86::CMP16ri8: return X86::CMP16ri; 212 case X86::CMP16mi8: return X86::CMP16mi; 213 case X86::CMP32ri8: return X86::CMP32ri; 214 case X86::CMP32mi8: return X86::CMP32mi; 215 case X86::CMP64ri8: return X86::CMP64ri32; 216 case X86::CMP64mi8: return X86::CMP64mi32; 217 218 // PUSH 219 case X86::PUSH32i8: return X86::PUSHi32; 220 case X86::PUSH16i8: return X86::PUSHi16; 221 case X86::PUSH64i8: return X86::PUSH64i32; 222 case X86::PUSH64i16: return X86::PUSH64i32; 223 } 224} 225 226static unsigned getRelaxedOpcode(unsigned Op) { 227 unsigned R = getRelaxedOpcodeArith(Op); 228 if (R != Op) 229 return R; 230 return getRelaxedOpcodeBranch(Op); 231} 232 233bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 234 // Branches can always be relaxed. 235 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode()) 236 return true; 237 238 if (MCDisableArithRelaxation) 239 return false; 240 241 // Check if this instruction is ever relaxable. 242 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode()) 243 return false; 244 245 246 // Check if it has an expression and is not RIP relative. 247 bool hasExp = false; 248 bool hasRIP = false; 249 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) { 250 const MCOperand &Op = Inst.getOperand(i); 251 if (Op.isExpr()) 252 hasExp = true; 253 254 if (Op.isReg() && Op.getReg() == X86::RIP) 255 hasRIP = true; 256 } 257 258 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on 259 // how we do relaxations? 260 return hasExp && !hasRIP; 261} 262 263bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, 264 uint64_t Value, 265 const MCRelaxableFragment *DF, 266 const MCAsmLayout &Layout) const { 267 // Relax if the value is too big for a (signed) i8. 268 return int64_t(Value) != int64_t(int8_t(Value)); 269} 270 271// FIXME: Can tblgen help at all here to verify there aren't other instructions 272// we can relax? 273void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { 274 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel. 275 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode()); 276 277 if (RelaxedOp == Inst.getOpcode()) { 278 SmallString<256> Tmp; 279 raw_svector_ostream OS(Tmp); 280 Inst.dump_pretty(OS); 281 OS << "\n"; 282 report_fatal_error("unexpected instruction to relax: " + OS.str()); 283 } 284 285 Res = Inst; 286 Res.setOpcode(RelaxedOp); 287} 288 289/// \brief Write a sequence of optimal nops to the output, covering \p Count 290/// bytes. 291/// \return - true on success, false on failure 292bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 293 static const uint8_t Nops[10][10] = { 294 // nop 295 {0x90}, 296 // xchg %ax,%ax 297 {0x66, 0x90}, 298 // nopl (%[re]ax) 299 {0x0f, 0x1f, 0x00}, 300 // nopl 0(%[re]ax) 301 {0x0f, 0x1f, 0x40, 0x00}, 302 // nopl 0(%[re]ax,%[re]ax,1) 303 {0x0f, 0x1f, 0x44, 0x00, 0x00}, 304 // nopw 0(%[re]ax,%[re]ax,1) 305 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00}, 306 // nopl 0L(%[re]ax) 307 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00}, 308 // nopl 0L(%[re]ax,%[re]ax,1) 309 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 310 // nopw 0L(%[re]ax,%[re]ax,1) 311 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 312 // nopw %cs:0L(%[re]ax,%[re]ax,1) 313 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 314 }; 315 316 // This CPU doesn't support long nops. If needed add more. 317 // FIXME: Can we get this from the subtarget somehow? 318 // FIXME: We could generated something better than plain 0x90. 319 if (!HasNopl) { 320 for (uint64_t i = 0; i < Count; ++i) 321 OW->Write8(0x90); 322 return true; 323 } 324 325 // 15 is the longest single nop instruction. Emit as many 15-byte nops as 326 // needed, then emit a nop of the remaining length. 327 do { 328 const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t) 15); 329 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10; 330 for (uint8_t i = 0; i < Prefixes; i++) 331 OW->Write8(0x66); 332 const uint8_t Rest = ThisNopLength - Prefixes; 333 for (uint8_t i = 0; i < Rest; i++) 334 OW->Write8(Nops[Rest - 1][i]); 335 Count -= ThisNopLength; 336 } while (Count != 0); 337 338 return true; 339} 340 341/* *** */ 342 343namespace { 344 345class ELFX86AsmBackend : public X86AsmBackend { 346public: 347 uint8_t OSABI; 348 ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU) 349 : X86AsmBackend(T, CPU), OSABI(_OSABI) {} 350}; 351 352class ELFX86_32AsmBackend : public ELFX86AsmBackend { 353public: 354 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 355 : ELFX86AsmBackend(T, OSABI, CPU) {} 356 357 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 358 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386); 359 } 360}; 361 362class ELFX86_64AsmBackend : public ELFX86AsmBackend { 363public: 364 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) 365 : ELFX86AsmBackend(T, OSABI, CPU) {} 366 367 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 368 return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64); 369 } 370}; 371 372class WindowsX86AsmBackend : public X86AsmBackend { 373 bool Is64Bit; 374 375public: 376 WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU) 377 : X86AsmBackend(T, CPU) 378 , Is64Bit(is64Bit) { 379 } 380 381 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 382 return createX86WinCOFFObjectWriter(OS, Is64Bit); 383 } 384}; 385 386namespace CU { 387 388 /// Compact unwind encoding values. 389 enum CompactUnwindEncodings { 390 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after 391 /// the return address, then [RE]SP is moved to [RE]BP. 392 UNWIND_MODE_BP_FRAME = 0x01000000, 393 394 /// A frameless function with a small constant stack size. 395 UNWIND_MODE_STACK_IMMD = 0x02000000, 396 397 /// A frameless function with a large constant stack size. 398 UNWIND_MODE_STACK_IND = 0x03000000, 399 400 /// No compact unwind encoding is available. 401 UNWIND_MODE_DWARF = 0x04000000, 402 403 /// Mask for encoding the frame registers. 404 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF, 405 406 /// Mask for encoding the frameless registers. 407 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF 408 }; 409 410} // end CU namespace 411 412class DarwinX86AsmBackend : public X86AsmBackend { 413 const MCRegisterInfo &MRI; 414 415 /// \brief Number of registers that can be saved in a compact unwind encoding. 416 enum { CU_NUM_SAVED_REGS = 6 }; 417 418 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS]; 419 bool Is64Bit; 420 421 unsigned OffsetSize; ///< Offset of a "push" instruction. 422 unsigned PushInstrSize; ///< Size of a "push" instruction. 423 unsigned MoveInstrSize; ///< Size of a "move" instruction. 424 unsigned StackDivide; ///< Amount to adjust stack stize by. 425protected: 426 /// \brief Implementation of algorithm to generate the compact unwind encoding 427 /// for the CFI instructions. 428 uint32_t 429 generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const { 430 if (Instrs.empty()) return 0; 431 432 // Reset the saved registers. 433 unsigned SavedRegIdx = 0; 434 memset(SavedRegs, 0, sizeof(SavedRegs)); 435 436 bool HasFP = false; 437 438 // Encode that we are using EBP/RBP as the frame pointer. 439 uint32_t CompactUnwindEncoding = 0; 440 441 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2; 442 unsigned InstrOffset = 0; 443 unsigned StackAdjust = 0; 444 unsigned StackSize = 0; 445 unsigned PrevStackSize = 0; 446 unsigned NumDefCFAOffsets = 0; 447 448 for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { 449 const MCCFIInstruction &Inst = Instrs[i]; 450 451 switch (Inst.getOperation()) { 452 default: 453 // Any other CFI directives indicate a frame that we aren't prepared 454 // to represent via compact unwind, so just bail out. 455 return 0; 456 case MCCFIInstruction::OpDefCfaRegister: { 457 // Defines a frame pointer. E.g. 458 // 459 // movq %rsp, %rbp 460 // L0: 461 // .cfi_def_cfa_register %rbp 462 // 463 HasFP = true; 464 assert(MRI.getLLVMRegNum(Inst.getRegister(), true) == 465 (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!"); 466 467 // Reset the counts. 468 memset(SavedRegs, 0, sizeof(SavedRegs)); 469 StackAdjust = 0; 470 SavedRegIdx = 0; 471 InstrOffset += MoveInstrSize; 472 break; 473 } 474 case MCCFIInstruction::OpDefCfaOffset: { 475 // Defines a new offset for the CFA. E.g. 476 // 477 // With frame: 478 // 479 // pushq %rbp 480 // L0: 481 // .cfi_def_cfa_offset 16 482 // 483 // Without frame: 484 // 485 // subq $72, %rsp 486 // L0: 487 // .cfi_def_cfa_offset 80 488 // 489 PrevStackSize = StackSize; 490 StackSize = std::abs(Inst.getOffset()) / StackDivide; 491 ++NumDefCFAOffsets; 492 break; 493 } 494 case MCCFIInstruction::OpOffset: { 495 // Defines a "push" of a callee-saved register. E.g. 496 // 497 // pushq %r15 498 // pushq %r14 499 // pushq %rbx 500 // L0: 501 // subq $120, %rsp 502 // L1: 503 // .cfi_offset %rbx, -40 504 // .cfi_offset %r14, -32 505 // .cfi_offset %r15, -24 506 // 507 if (SavedRegIdx == CU_NUM_SAVED_REGS) 508 // If there are too many saved registers, we cannot use a compact 509 // unwind encoding. 510 return CU::UNWIND_MODE_DWARF; 511 512 unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); 513 SavedRegs[SavedRegIdx++] = Reg; 514 StackAdjust += OffsetSize; 515 InstrOffset += PushInstrSize; 516 break; 517 } 518 } 519 } 520 521 StackAdjust /= StackDivide; 522 523 if (HasFP) { 524 if ((StackAdjust & 0xFF) != StackAdjust) 525 // Offset was too big for a compact unwind encoding. 526 return CU::UNWIND_MODE_DWARF; 527 528 // Get the encoding of the saved registers when we have a frame pointer. 529 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(); 530 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 531 532 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME; 533 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; 534 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS; 535 } else { 536 // If the amount of the stack allocation is the size of a register, then 537 // we "push" the RAX/EAX register onto the stack instead of adjusting the 538 // stack pointer with a SUB instruction. We don't support the push of the 539 // RAX/EAX register with compact unwind. So we check for that situation 540 // here. 541 if ((NumDefCFAOffsets == SavedRegIdx + 1 && 542 StackSize - PrevStackSize == 1) || 543 (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2)) 544 return CU::UNWIND_MODE_DWARF; 545 546 SubtractInstrIdx += InstrOffset; 547 ++StackAdjust; 548 549 if ((StackSize & 0xFF) == StackSize) { 550 // Frameless stack with a small stack size. 551 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD; 552 553 // Encode the stack size. 554 CompactUnwindEncoding |= (StackSize & 0xFF) << 16; 555 } else { 556 if ((StackAdjust & 0x7) != StackAdjust) 557 // The extra stack adjustments are too big for us to handle. 558 return CU::UNWIND_MODE_DWARF; 559 560 // Frameless stack with an offset too large for us to encode compactly. 561 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND; 562 563 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' 564 // instruction. 565 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; 566 567 // Encode any extra stack stack adjustments (done via push 568 // instructions). 569 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; 570 } 571 572 // Encode the number of registers saved. (Reverse the list first.) 573 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]); 574 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10; 575 576 // Get the encoding of the saved registers when we don't have a frame 577 // pointer. 578 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx); 579 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 580 581 // Encode the register encoding. 582 CompactUnwindEncoding |= 583 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION; 584 } 585 586 return CompactUnwindEncoding; 587 } 588 589private: 590 /// \brief Get the compact unwind number for a given register. The number 591 /// corresponds to the enum lists in compact_unwind_encoding.h. 592 int getCompactUnwindRegNum(unsigned Reg) const { 593 static const uint16_t CU32BitRegs[7] = { 594 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 595 }; 596 static const uint16_t CU64BitRegs[] = { 597 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 598 }; 599 const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs; 600 for (int Idx = 1; *CURegs; ++CURegs, ++Idx) 601 if (*CURegs == Reg) 602 return Idx; 603 604 return -1; 605 } 606 607 /// \brief Return the registers encoded for a compact encoding with a frame 608 /// pointer. 609 uint32_t encodeCompactUnwindRegistersWithFrame() const { 610 // Encode the registers in the order they were saved --- 3-bits per 611 // register. The list of saved registers is assumed to be in reverse 612 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS. 613 uint32_t RegEnc = 0; 614 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) { 615 unsigned Reg = SavedRegs[i]; 616 if (Reg == 0) break; 617 618 int CURegNum = getCompactUnwindRegNum(Reg); 619 if (CURegNum == -1) return ~0U; 620 621 // Encode the 3-bit register number in order, skipping over 3-bits for 622 // each register. 623 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3); 624 } 625 626 assert((RegEnc & 0x3FFFF) == RegEnc && 627 "Invalid compact register encoding!"); 628 return RegEnc; 629 } 630 631 /// \brief Create the permutation encoding used with frameless stacks. It is 632 /// passed the number of registers to be saved and an array of the registers 633 /// saved. 634 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const { 635 // The saved registers are numbered from 1 to 6. In order to encode the 636 // order in which they were saved, we re-number them according to their 637 // place in the register order. The re-numbering is relative to the last 638 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in 639 // that order: 640 // 641 // Orig Re-Num 642 // ---- ------ 643 // 6 6 644 // 2 2 645 // 4 3 646 // 5 3 647 // 648 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) { 649 int CUReg = getCompactUnwindRegNum(SavedRegs[i]); 650 if (CUReg == -1) return ~0U; 651 SavedRegs[i] = CUReg; 652 } 653 654 // Reverse the list. 655 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]); 656 657 uint32_t RenumRegs[CU_NUM_SAVED_REGS]; 658 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){ 659 unsigned Countless = 0; 660 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j) 661 if (SavedRegs[j] < SavedRegs[i]) 662 ++Countless; 663 664 RenumRegs[i] = SavedRegs[i] - Countless - 1; 665 } 666 667 // Take the renumbered values and encode them into a 10-bit number. 668 uint32_t permutationEncoding = 0; 669 switch (RegCount) { 670 case 6: 671 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] 672 + 6 * RenumRegs[2] + 2 * RenumRegs[3] 673 + RenumRegs[4]; 674 break; 675 case 5: 676 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] 677 + 6 * RenumRegs[3] + 2 * RenumRegs[4] 678 + RenumRegs[5]; 679 break; 680 case 4: 681 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] 682 + 3 * RenumRegs[4] + RenumRegs[5]; 683 break; 684 case 3: 685 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] 686 + RenumRegs[5]; 687 break; 688 case 2: 689 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; 690 break; 691 case 1: 692 permutationEncoding |= RenumRegs[5]; 693 break; 694 } 695 696 assert((permutationEncoding & 0x3FF) == permutationEncoding && 697 "Invalid compact register encoding!"); 698 return permutationEncoding; 699 } 700 701public: 702 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU, 703 bool Is64Bit) 704 : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) { 705 memset(SavedRegs, 0, sizeof(SavedRegs)); 706 OffsetSize = Is64Bit ? 8 : 4; 707 MoveInstrSize = Is64Bit ? 3 : 2; 708 StackDivide = Is64Bit ? 8 : 4; 709 PushInstrSize = 1; 710 } 711}; 712 713class DarwinX86_32AsmBackend : public DarwinX86AsmBackend { 714 bool SupportsCU; 715public: 716 DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI, 717 StringRef CPU, bool SupportsCU) 718 : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {} 719 720 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 721 return createX86MachObjectWriter(OS, /*Is64Bit=*/false, 722 MachO::CPU_TYPE_I386, 723 MachO::CPU_SUBTYPE_I386_ALL); 724 } 725 726 /// \brief Generate the compact unwind encoding for the CFI instructions. 727 uint32_t generateCompactUnwindEncoding( 728 ArrayRef<MCCFIInstruction> Instrs) const override { 729 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0; 730 } 731}; 732 733class DarwinX86_64AsmBackend : public DarwinX86AsmBackend { 734 bool SupportsCU; 735 const MachO::CPUSubTypeX86 Subtype; 736public: 737 DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI, 738 StringRef CPU, bool SupportsCU, 739 MachO::CPUSubTypeX86 st) 740 : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU), 741 Subtype(st) { 742 } 743 744 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override { 745 return createX86MachObjectWriter(OS, /*Is64Bit=*/true, 746 MachO::CPU_TYPE_X86_64, Subtype); 747 } 748 749 bool doesSectionRequireSymbols(const MCSection &Section) const override { 750 // Temporary labels in the string literals sections require symbols. The 751 // issue is that the x86_64 relocation format does not allow symbol + 752 // offset, and so the linker does not have enough information to resolve the 753 // access to the appropriate atom unless an external relocation is used. For 754 // non-cstring sections, we expect the compiler to use a non-temporary label 755 // for anything that could have an addend pointing outside the symbol. 756 // 757 // See <rdar://problem/4765733>. 758 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); 759 return SMO.getType() == MachO::S_CSTRING_LITERALS; 760 } 761 762 bool isSectionAtomizable(const MCSection &Section) const override { 763 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); 764 // Fixed sized data sections are uniqued, they cannot be diced into atoms. 765 switch (SMO.getType()) { 766 default: 767 return true; 768 769 case MachO::S_4BYTE_LITERALS: 770 case MachO::S_8BYTE_LITERALS: 771 case MachO::S_16BYTE_LITERALS: 772 case MachO::S_LITERAL_POINTERS: 773 case MachO::S_NON_LAZY_SYMBOL_POINTERS: 774 case MachO::S_LAZY_SYMBOL_POINTERS: 775 case MachO::S_MOD_INIT_FUNC_POINTERS: 776 case MachO::S_MOD_TERM_FUNC_POINTERS: 777 case MachO::S_INTERPOSING: 778 return false; 779 } 780 } 781 782 /// \brief Generate the compact unwind encoding for the CFI instructions. 783 uint32_t generateCompactUnwindEncoding( 784 ArrayRef<MCCFIInstruction> Instrs) const override { 785 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0; 786 } 787}; 788 789} // end anonymous namespace 790 791MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, 792 const MCRegisterInfo &MRI, 793 StringRef TT, 794 StringRef CPU) { 795 Triple TheTriple(TT); 796 797 if (TheTriple.isOSBinFormatMachO()) 798 return new DarwinX86_32AsmBackend(T, MRI, CPU, 799 TheTriple.isMacOSX() && 800 !TheTriple.isMacOSXVersionLT(10, 7)); 801 802 if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF()) 803 return new WindowsX86AsmBackend(T, false, CPU); 804 805 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 806 return new ELFX86_32AsmBackend(T, OSABI, CPU); 807} 808 809MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, 810 const MCRegisterInfo &MRI, 811 StringRef TT, 812 StringRef CPU) { 813 Triple TheTriple(TT); 814 815 if (TheTriple.isOSBinFormatMachO()) { 816 MachO::CPUSubTypeX86 CS = 817 StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName()) 818 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H) 819 .Default(MachO::CPU_SUBTYPE_X86_64_ALL); 820 return new DarwinX86_64AsmBackend(T, MRI, CPU, 821 TheTriple.isMacOSX() && 822 !TheTriple.isMacOSXVersionLT(10, 7), CS); 823 } 824 825 if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF()) 826 return new WindowsX86AsmBackend(T, true, CPU); 827 828 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 829 return new ELFX86_64AsmBackend(T, OSABI, CPU); 830} 831