PPCJITInfo.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===-- PPCJITInfo.cpp - Implement the JIT interfaces for the PowerPC -----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the JIT interfaces for the 32-bit PowerPC target. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCJITInfo.h" 15#include "PPCRelocations.h" 16#include "PPCTargetMachine.h" 17#include "llvm/IR/Function.h" 18#include "llvm/Support/Debug.h" 19#include "llvm/Support/ErrorHandling.h" 20#include "llvm/Support/Memory.h" 21#include "llvm/Support/raw_ostream.h" 22using namespace llvm; 23 24#define DEBUG_TYPE "jit" 25 26static TargetJITInfo::JITCompilerFn JITCompilerFunction; 27 28#define BUILD_ADDIS(RD,RS,IMM16) \ 29 ((15 << 26) | ((RD) << 21) | ((RS) << 16) | ((IMM16) & 65535)) 30#define BUILD_ORI(RD,RS,UIMM16) \ 31 ((24 << 26) | ((RS) << 21) | ((RD) << 16) | ((UIMM16) & 65535)) 32#define BUILD_ORIS(RD,RS,UIMM16) \ 33 ((25 << 26) | ((RS) << 21) | ((RD) << 16) | ((UIMM16) & 65535)) 34#define BUILD_RLDICR(RD,RS,SH,ME) \ 35 ((30 << 26) | ((RS) << 21) | ((RD) << 16) | (((SH) & 31) << 11) | \ 36 (((ME) & 63) << 6) | (1 << 2) | ((((SH) >> 5) & 1) << 1)) 37#define BUILD_MTSPR(RS,SPR) \ 38 ((31 << 26) | ((RS) << 21) | ((SPR) << 16) | (467 << 1)) 39#define BUILD_BCCTRx(BO,BI,LINK) \ 40 ((19 << 26) | ((BO) << 21) | ((BI) << 16) | (528 << 1) | ((LINK) & 1)) 41#define BUILD_B(TARGET, LINK) \ 42 ((18 << 26) | (((TARGET) & 0x00FFFFFF) << 2) | ((LINK) & 1)) 43 44// Pseudo-ops 45#define BUILD_LIS(RD,IMM16) BUILD_ADDIS(RD,0,IMM16) 46#define BUILD_SLDI(RD,RS,IMM6) BUILD_RLDICR(RD,RS,IMM6,63-IMM6) 47#define BUILD_MTCTR(RS) BUILD_MTSPR(RS,9) 48#define BUILD_BCTR(LINK) BUILD_BCCTRx(20,0,LINK) 49 50static void EmitBranchToAt(uint64_t At, uint64_t To, bool isCall, bool is64Bit){ 51 intptr_t Offset = ((intptr_t)To - (intptr_t)At) >> 2; 52 unsigned *AtI = (unsigned*)(intptr_t)At; 53 54 if (Offset >= -(1 << 23) && Offset < (1 << 23)) { // In range? 55 AtI[0] = BUILD_B(Offset, isCall); // b/bl target 56 } else if (!is64Bit) { 57 AtI[0] = BUILD_LIS(12, To >> 16); // lis r12, hi16(address) 58 AtI[1] = BUILD_ORI(12, 12, To); // ori r12, r12, lo16(address) 59 AtI[2] = BUILD_MTCTR(12); // mtctr r12 60 AtI[3] = BUILD_BCTR(isCall); // bctr/bctrl 61 } else { 62 AtI[0] = BUILD_LIS(12, To >> 48); // lis r12, hi16(address) 63 AtI[1] = BUILD_ORI(12, 12, To >> 32); // ori r12, r12, lo16(address) 64 AtI[2] = BUILD_SLDI(12, 12, 32); // sldi r12, r12, 32 65 AtI[3] = BUILD_ORIS(12, 12, To >> 16); // oris r12, r12, hi16(address) 66 AtI[4] = BUILD_ORI(12, 12, To); // ori r12, r12, lo16(address) 67 AtI[5] = BUILD_MTCTR(12); // mtctr r12 68 AtI[6] = BUILD_BCTR(isCall); // bctr/bctrl 69 } 70} 71 72extern "C" void PPC32CompilationCallback(); 73extern "C" void PPC64CompilationCallback(); 74 75// The first clause of the preprocessor directive looks wrong, but it is 76// necessary when compiling this code on non-PowerPC hosts. 77#if (!defined(__ppc__) && !defined(__powerpc__)) || defined(__powerpc64__) || defined(__ppc64__) 78void PPC32CompilationCallback() { 79 llvm_unreachable("This is not a 32bit PowerPC, you can't execute this!"); 80} 81#elif !defined(__ELF__) 82// CompilationCallback stub - We can't use a C function with inline assembly in 83// it, because we the prolog/epilog inserted by GCC won't work for us. Instead, 84// write our own wrapper, which does things our way, so we have complete control 85// over register saving and restoring. 86asm( 87 ".text\n" 88 ".align 2\n" 89 ".globl _PPC32CompilationCallback\n" 90"_PPC32CompilationCallback:\n" 91 // Make space for 8 ints r[3-10] and 13 doubles f[1-13] and the 92 // FIXME: need to save v[0-19] for altivec? 93 // FIXME: could shrink frame 94 // Set up a proper stack frame 95 // FIXME Layout 96 // PowerPC32 ABI linkage - 24 bytes 97 // parameters - 32 bytes 98 // 13 double registers - 104 bytes 99 // 8 int registers - 32 bytes 100 "mflr r0\n" 101 "stw r0, 8(r1)\n" 102 "stwu r1, -208(r1)\n" 103 // Save all int arg registers 104 "stw r10, 204(r1)\n" "stw r9, 200(r1)\n" 105 "stw r8, 196(r1)\n" "stw r7, 192(r1)\n" 106 "stw r6, 188(r1)\n" "stw r5, 184(r1)\n" 107 "stw r4, 180(r1)\n" "stw r3, 176(r1)\n" 108 // Save all call-clobbered FP regs. 109 "stfd f13, 168(r1)\n" "stfd f12, 160(r1)\n" 110 "stfd f11, 152(r1)\n" "stfd f10, 144(r1)\n" 111 "stfd f9, 136(r1)\n" "stfd f8, 128(r1)\n" 112 "stfd f7, 120(r1)\n" "stfd f6, 112(r1)\n" 113 "stfd f5, 104(r1)\n" "stfd f4, 96(r1)\n" 114 "stfd f3, 88(r1)\n" "stfd f2, 80(r1)\n" 115 "stfd f1, 72(r1)\n" 116 // Arguments to Compilation Callback: 117 // r3 - our lr (address of the call instruction in stub plus 4) 118 // r4 - stub's lr (address of instruction that called the stub plus 4) 119 // r5 - is64Bit - always 0. 120 "mr r3, r0\n" 121 "lwz r2, 208(r1)\n" // stub's frame 122 "lwz r4, 8(r2)\n" // stub's lr 123 "li r5, 0\n" // 0 == 32 bit 124 "bl _LLVMPPCCompilationCallback\n" 125 "mtctr r3\n" 126 // Restore all int arg registers 127 "lwz r10, 204(r1)\n" "lwz r9, 200(r1)\n" 128 "lwz r8, 196(r1)\n" "lwz r7, 192(r1)\n" 129 "lwz r6, 188(r1)\n" "lwz r5, 184(r1)\n" 130 "lwz r4, 180(r1)\n" "lwz r3, 176(r1)\n" 131 // Restore all FP arg registers 132 "lfd f13, 168(r1)\n" "lfd f12, 160(r1)\n" 133 "lfd f11, 152(r1)\n" "lfd f10, 144(r1)\n" 134 "lfd f9, 136(r1)\n" "lfd f8, 128(r1)\n" 135 "lfd f7, 120(r1)\n" "lfd f6, 112(r1)\n" 136 "lfd f5, 104(r1)\n" "lfd f4, 96(r1)\n" 137 "lfd f3, 88(r1)\n" "lfd f2, 80(r1)\n" 138 "lfd f1, 72(r1)\n" 139 // Pop 3 frames off the stack and branch to target 140 "lwz r1, 208(r1)\n" 141 "lwz r2, 8(r1)\n" 142 "mtlr r2\n" 143 "bctr\n" 144 ); 145 146#else 147// ELF PPC 32 support 148 149// CompilationCallback stub - We can't use a C function with inline assembly in 150// it, because we the prolog/epilog inserted by GCC won't work for us. Instead, 151// write our own wrapper, which does things our way, so we have complete control 152// over register saving and restoring. 153asm( 154 ".text\n" 155 ".align 2\n" 156 ".globl PPC32CompilationCallback\n" 157"PPC32CompilationCallback:\n" 158 // Make space for 8 ints r[3-10] and 8 doubles f[1-8] and the 159 // FIXME: need to save v[0-19] for altivec? 160 // FIXME: could shrink frame 161 // Set up a proper stack frame 162 // FIXME Layout 163 // 8 double registers - 64 bytes 164 // 8 int registers - 32 bytes 165 "mflr 0\n" 166 "stw 0, 4(1)\n" 167 "stwu 1, -104(1)\n" 168 // Save all int arg registers 169 "stw 10, 100(1)\n" "stw 9, 96(1)\n" 170 "stw 8, 92(1)\n" "stw 7, 88(1)\n" 171 "stw 6, 84(1)\n" "stw 5, 80(1)\n" 172 "stw 4, 76(1)\n" "stw 3, 72(1)\n" 173 // Save all call-clobbered FP regs. 174 "stfd 8, 64(1)\n" 175 "stfd 7, 56(1)\n" "stfd 6, 48(1)\n" 176 "stfd 5, 40(1)\n" "stfd 4, 32(1)\n" 177 "stfd 3, 24(1)\n" "stfd 2, 16(1)\n" 178 "stfd 1, 8(1)\n" 179 // Arguments to Compilation Callback: 180 // r3 - our lr (address of the call instruction in stub plus 4) 181 // r4 - stub's lr (address of instruction that called the stub plus 4) 182 // r5 - is64Bit - always 0. 183 "mr 3, 0\n" 184 "lwz 5, 104(1)\n" // stub's frame 185 "lwz 4, 4(5)\n" // stub's lr 186 "li 5, 0\n" // 0 == 32 bit 187 "bl LLVMPPCCompilationCallback\n" 188 "mtctr 3\n" 189 // Restore all int arg registers 190 "lwz 10, 100(1)\n" "lwz 9, 96(1)\n" 191 "lwz 8, 92(1)\n" "lwz 7, 88(1)\n" 192 "lwz 6, 84(1)\n" "lwz 5, 80(1)\n" 193 "lwz 4, 76(1)\n" "lwz 3, 72(1)\n" 194 // Restore all FP arg registers 195 "lfd 8, 64(1)\n" 196 "lfd 7, 56(1)\n" "lfd 6, 48(1)\n" 197 "lfd 5, 40(1)\n" "lfd 4, 32(1)\n" 198 "lfd 3, 24(1)\n" "lfd 2, 16(1)\n" 199 "lfd 1, 8(1)\n" 200 // Pop 3 frames off the stack and branch to target 201 "lwz 1, 104(1)\n" 202 "lwz 0, 4(1)\n" 203 "mtlr 0\n" 204 "bctr\n" 205 ); 206#endif 207 208#if !defined(__powerpc64__) && !defined(__ppc64__) 209void PPC64CompilationCallback() { 210 llvm_unreachable("This is not a 64bit PowerPC, you can't execute this!"); 211} 212#else 213# ifdef __ELF__ 214asm( 215 ".text\n" 216 ".align 2\n" 217 ".globl PPC64CompilationCallback\n" 218#if _CALL_ELF == 2 219 ".type PPC64CompilationCallback,@function\n" 220"PPC64CompilationCallback:\n" 221#else 222 ".section \".opd\",\"aw\",@progbits\n" 223 ".align 3\n" 224"PPC64CompilationCallback:\n" 225 ".quad .L.PPC64CompilationCallback,.TOC.@tocbase,0\n" 226 ".size PPC64CompilationCallback,24\n" 227 ".previous\n" 228 ".align 4\n" 229 ".type PPC64CompilationCallback,@function\n" 230".L.PPC64CompilationCallback:\n" 231#endif 232# else 233asm( 234 ".text\n" 235 ".align 2\n" 236 ".globl _PPC64CompilationCallback\n" 237"_PPC64CompilationCallback:\n" 238# endif 239 // Make space for 8 ints r[3-10] and 13 doubles f[1-13] and the 240 // FIXME: need to save v[0-19] for altivec? 241 // Set up a proper stack frame 242 // Layout 243 // PowerPC64 ABI linkage - 48 bytes 244 // parameters - 64 bytes 245 // 13 double registers - 104 bytes 246 // 8 int registers - 64 bytes 247 "mflr 0\n" 248 "std 0, 16(1)\n" 249 "stdu 1, -280(1)\n" 250 // Save all int arg registers 251 "std 10, 272(1)\n" "std 9, 264(1)\n" 252 "std 8, 256(1)\n" "std 7, 248(1)\n" 253 "std 6, 240(1)\n" "std 5, 232(1)\n" 254 "std 4, 224(1)\n" "std 3, 216(1)\n" 255 // Save all call-clobbered FP regs. 256 "stfd 13, 208(1)\n" "stfd 12, 200(1)\n" 257 "stfd 11, 192(1)\n" "stfd 10, 184(1)\n" 258 "stfd 9, 176(1)\n" "stfd 8, 168(1)\n" 259 "stfd 7, 160(1)\n" "stfd 6, 152(1)\n" 260 "stfd 5, 144(1)\n" "stfd 4, 136(1)\n" 261 "stfd 3, 128(1)\n" "stfd 2, 120(1)\n" 262 "stfd 1, 112(1)\n" 263 // Arguments to Compilation Callback: 264 // r3 - our lr (address of the call instruction in stub plus 4) 265 // r4 - stub's lr (address of instruction that called the stub plus 4) 266 // r5 - is64Bit - always 1. 267 "mr 3, 0\n" // return address (still in r0) 268 "ld 5, 280(1)\n" // stub's frame 269 "ld 4, 16(5)\n" // stub's lr 270 "li 5, 1\n" // 1 == 64 bit 271# ifdef __ELF__ 272 "bl LLVMPPCCompilationCallback\n" 273 "nop\n" 274# else 275 "bl _LLVMPPCCompilationCallback\n" 276# endif 277 "mtctr 3\n" 278 // Restore all int arg registers 279 "ld 10, 272(1)\n" "ld 9, 264(1)\n" 280 "ld 8, 256(1)\n" "ld 7, 248(1)\n" 281 "ld 6, 240(1)\n" "ld 5, 232(1)\n" 282 "ld 4, 224(1)\n" "ld 3, 216(1)\n" 283 // Restore all FP arg registers 284 "lfd 13, 208(1)\n" "lfd 12, 200(1)\n" 285 "lfd 11, 192(1)\n" "lfd 10, 184(1)\n" 286 "lfd 9, 176(1)\n" "lfd 8, 168(1)\n" 287 "lfd 7, 160(1)\n" "lfd 6, 152(1)\n" 288 "lfd 5, 144(1)\n" "lfd 4, 136(1)\n" 289 "lfd 3, 128(1)\n" "lfd 2, 120(1)\n" 290 "lfd 1, 112(1)\n" 291 // Pop 3 frames off the stack and branch to target 292 "ld 1, 280(1)\n" 293 "ld 0, 16(1)\n" 294 "mtlr 0\n" 295 // XXX: any special TOC handling in the ELF case for JIT? 296 "bctr\n" 297 ); 298#endif 299 300extern "C" { 301LLVM_LIBRARY_VISIBILITY void * 302LLVMPPCCompilationCallback(unsigned *StubCallAddrPlus4, 303 unsigned *OrigCallAddrPlus4, 304 bool is64Bit) { 305 // Adjust the pointer to the address of the call instruction in the stub 306 // emitted by emitFunctionStub, rather than the instruction after it. 307 unsigned *StubCallAddr = StubCallAddrPlus4 - 1; 308 unsigned *OrigCallAddr = OrigCallAddrPlus4 - 1; 309 310 void *Target = JITCompilerFunction(StubCallAddr); 311 312 // Check to see if *OrigCallAddr is a 'bl' instruction, and if we can rewrite 313 // it to branch directly to the destination. If so, rewrite it so it does not 314 // need to go through the stub anymore. 315 unsigned OrigCallInst = *OrigCallAddr; 316 if ((OrigCallInst >> 26) == 18) { // Direct call. 317 intptr_t Offset = ((intptr_t)Target - (intptr_t)OrigCallAddr) >> 2; 318 319 if (Offset >= -(1 << 23) && Offset < (1 << 23)) { // In range? 320 // Clear the original target out. 321 OrigCallInst &= (63 << 26) | 3; 322 // Fill in the new target. 323 OrigCallInst |= (Offset & ((1 << 24)-1)) << 2; 324 // Replace the call. 325 *OrigCallAddr = OrigCallInst; 326 } 327 } 328 329 // Assert that we are coming from a stub that was created with our 330 // emitFunctionStub. 331 if ((*StubCallAddr >> 26) == 18) 332 StubCallAddr -= 3; 333 else { 334 assert((*StubCallAddr >> 26) == 19 && "Call in stub is not indirect!"); 335 StubCallAddr -= is64Bit ? 9 : 6; 336 } 337 338 // Rewrite the stub with an unconditional branch to the target, for any users 339 // who took the address of the stub. 340 EmitBranchToAt((intptr_t)StubCallAddr, (intptr_t)Target, false, is64Bit); 341 sys::Memory::InvalidateInstructionCache(StubCallAddr, 7*4); 342 343 // Put the address of the target function to call and the address to return to 344 // after calling the target function in a place that is easy to get on the 345 // stack after we restore all regs. 346 return Target; 347} 348} 349 350 351 352TargetJITInfo::LazyResolverFn 353PPCJITInfo::getLazyResolverFunction(JITCompilerFn Fn) { 354 JITCompilerFunction = Fn; 355 return is64Bit ? PPC64CompilationCallback : PPC32CompilationCallback; 356} 357 358TargetJITInfo::StubLayout PPCJITInfo::getStubLayout() { 359 // The stub contains up to 10 4-byte instructions, aligned at 4 bytes: 3 360 // instructions to save the caller's address if this is a lazy-compilation 361 // stub, plus a 1-, 4-, or 7-instruction sequence to load an arbitrary address 362 // into a register and jump through it. 363 StubLayout Result = {10*4, 4}; 364 return Result; 365} 366 367#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \ 368defined(__APPLE__) 369extern "C" void sys_icache_invalidate(const void *Addr, size_t len); 370#endif 371 372void *PPCJITInfo::emitFunctionStub(const Function* F, void *Fn, 373 JITCodeEmitter &JCE) { 374 // If this is just a call to an external function, emit a branch instead of a 375 // call. The code is the same except for one bit of the last instruction. 376 if (Fn != (void*)(intptr_t)PPC32CompilationCallback && 377 Fn != (void*)(intptr_t)PPC64CompilationCallback) { 378 void *Addr = (void*)JCE.getCurrentPCValue(); 379 JCE.emitWordBE(0); 380 JCE.emitWordBE(0); 381 JCE.emitWordBE(0); 382 JCE.emitWordBE(0); 383 JCE.emitWordBE(0); 384 JCE.emitWordBE(0); 385 JCE.emitWordBE(0); 386 EmitBranchToAt((intptr_t)Addr, (intptr_t)Fn, false, is64Bit); 387 sys::Memory::InvalidateInstructionCache(Addr, 7*4); 388 return Addr; 389 } 390 391 void *Addr = (void*)JCE.getCurrentPCValue(); 392 if (is64Bit) { 393 JCE.emitWordBE(0xf821ffb1); // stdu r1,-80(r1) 394 JCE.emitWordBE(0x7d6802a6); // mflr r11 395 JCE.emitWordBE(0xf9610060); // std r11, 96(r1) 396 } else if (TM.getSubtargetImpl()->isDarwinABI()){ 397 JCE.emitWordBE(0x9421ffe0); // stwu r1,-32(r1) 398 JCE.emitWordBE(0x7d6802a6); // mflr r11 399 JCE.emitWordBE(0x91610028); // stw r11, 40(r1) 400 } else { 401 JCE.emitWordBE(0x9421ffe0); // stwu r1,-32(r1) 402 JCE.emitWordBE(0x7d6802a6); // mflr r11 403 JCE.emitWordBE(0x91610024); // stw r11, 36(r1) 404 } 405 intptr_t BranchAddr = (intptr_t)JCE.getCurrentPCValue(); 406 JCE.emitWordBE(0); 407 JCE.emitWordBE(0); 408 JCE.emitWordBE(0); 409 JCE.emitWordBE(0); 410 JCE.emitWordBE(0); 411 JCE.emitWordBE(0); 412 JCE.emitWordBE(0); 413 EmitBranchToAt(BranchAddr, (intptr_t)Fn, true, is64Bit); 414 sys::Memory::InvalidateInstructionCache(Addr, 10*4); 415 return Addr; 416} 417 418 419void PPCJITInfo::relocate(void *Function, MachineRelocation *MR, 420 unsigned NumRelocs, unsigned char* GOTBase) { 421 for (unsigned i = 0; i != NumRelocs; ++i, ++MR) { 422 unsigned *RelocPos = (unsigned*)Function + MR->getMachineCodeOffset()/4; 423 intptr_t ResultPtr = (intptr_t)MR->getResultPointer(); 424 switch ((PPC::RelocationType)MR->getRelocationType()) { 425 default: llvm_unreachable("Unknown relocation type!"); 426 case PPC::reloc_pcrel_bx: 427 // PC-relative relocation for b and bl instructions. 428 ResultPtr = (ResultPtr-(intptr_t)RelocPos) >> 2; 429 assert(ResultPtr >= -(1 << 23) && ResultPtr < (1 << 23) && 430 "Relocation out of range!"); 431 *RelocPos |= (ResultPtr & ((1 << 24)-1)) << 2; 432 break; 433 case PPC::reloc_pcrel_bcx: 434 // PC-relative relocation for BLT,BLE,BEQ,BGE,BGT,BNE, or other 435 // bcx instructions. 436 ResultPtr = (ResultPtr-(intptr_t)RelocPos) >> 2; 437 assert(ResultPtr >= -(1 << 13) && ResultPtr < (1 << 13) && 438 "Relocation out of range!"); 439 *RelocPos |= (ResultPtr & ((1 << 14)-1)) << 2; 440 break; 441 case PPC::reloc_absolute_high: // high bits of ref -> low 16 of instr 442 case PPC::reloc_absolute_low: { // low bits of ref -> low 16 of instr 443 ResultPtr += MR->getConstantVal(); 444 445 // If this is a high-part access, get the high-part. 446 if (MR->getRelocationType() == PPC::reloc_absolute_high) { 447 // If the low part will have a carry (really a borrow) from the low 448 // 16-bits into the high 16, add a bit to borrow from. 449 if (((int)ResultPtr << 16) < 0) 450 ResultPtr += 1 << 16; 451 ResultPtr >>= 16; 452 } 453 454 // Do the addition then mask, so the addition does not overflow the 16-bit 455 // immediate section of the instruction. 456 unsigned LowBits = (*RelocPos + ResultPtr) & 65535; 457 unsigned HighBits = *RelocPos & ~65535; 458 *RelocPos = LowBits | HighBits; // Slam into low 16-bits 459 break; 460 } 461 case PPC::reloc_absolute_low_ix: { // low bits of ref -> low 14 of instr 462 ResultPtr += MR->getConstantVal(); 463 // Do the addition then mask, so the addition does not overflow the 16-bit 464 // immediate section of the instruction. 465 unsigned LowBits = (*RelocPos + ResultPtr) & 0xFFFC; 466 unsigned HighBits = *RelocPos & 0xFFFF0003; 467 *RelocPos = LowBits | HighBits; // Slam into low 14-bits. 468 break; 469 } 470 } 471 } 472} 473 474void PPCJITInfo::replaceMachineCodeForFunction(void *Old, void *New) { 475 EmitBranchToAt((intptr_t)Old, (intptr_t)New, false, is64Bit); 476 sys::Memory::InvalidateInstructionCache(Old, 7*4); 477} 478