RSForEachExpand.cpp revision 326d02a9f3cfe30caa21e5c2aecbd4c85112b363
1/* 2 * Copyright 2012, The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "bcc/Assert.h" 18#include "bcc/Renderscript/RSTransforms.h" 19 20#include <cstdlib> 21 22#include <llvm/IR/DerivedTypes.h> 23#include <llvm/IR/Function.h> 24#include <llvm/IR/Instructions.h> 25#include <llvm/IR/IRBuilder.h> 26#include <llvm/IR/MDBuilder.h> 27#include <llvm/IR/Module.h> 28#include <llvm/Pass.h> 29#include <llvm/Support/raw_ostream.h> 30#include <llvm/IR/DataLayout.h> 31#include <llvm/IR/Function.h> 32#include <llvm/IR/Type.h> 33#include <llvm/Transforms/Utils/BasicBlockUtils.h> 34 35#include "bcc/Config/Config.h" 36#include "bcc/Support/Log.h" 37 38#include "bcinfo/MetadataExtractor.h" 39 40#define NUM_EXPANDED_FUNCTION_PARAMS 5 41 42using namespace bcc; 43 44namespace { 45 46static const bool gEnableRsTbaa = true; 47 48/* RSForEachExpandPass - This pass operates on functions that are able to be 49 * called via rsForEach() or "foreach_<NAME>". We create an inner loop for the 50 * ForEach-able function to be invoked over the appropriate data cells of the 51 * input/output allocations (adjusting other relevant parameters as we go). We 52 * support doing this for any ForEach-able compute kernels. The new function 53 * name is the original function name followed by ".expand". Note that we 54 * still generate code for the original function. 55 */ 56class RSForEachExpandPass : public llvm::ModulePass { 57private: 58 static char ID; 59 60 llvm::Module *Module; 61 llvm::LLVMContext *Context; 62 63 /* 64 * Pointer to LLVM type information for the ForEachStubType and the function 65 * signature for expanded kernels. These must be re-calculated for each 66 * module the pass is run on. 67 */ 68 llvm::StructType *ForEachStubType; 69 llvm::FunctionType *ExpandedFunctionType; 70 71 uint32_t mExportForEachCount; 72 const char **mExportForEachNameList; 73 const uint32_t *mExportForEachSignatureList; 74 75 // Turns on optimization of allocation stride values. 76 bool mEnableStepOpt; 77 78 uint32_t getRootSignature(llvm::Function *Function) { 79 const llvm::NamedMDNode *ExportForEachMetadata = 80 Module->getNamedMetadata("#rs_export_foreach"); 81 82 if (!ExportForEachMetadata) { 83 llvm::SmallVector<llvm::Type*, 8> RootArgTys; 84 for (llvm::Function::arg_iterator B = Function->arg_begin(), 85 E = Function->arg_end(); 86 B != E; 87 ++B) { 88 RootArgTys.push_back(B->getType()); 89 } 90 91 // For pre-ICS bitcode, we may not have signature information. In that 92 // case, we use the size of the RootArgTys to select the number of 93 // arguments. 94 return (1 << RootArgTys.size()) - 1; 95 } 96 97 if (ExportForEachMetadata->getNumOperands() == 0) { 98 return 0; 99 } 100 101 bccAssert(ExportForEachMetadata->getNumOperands() > 0); 102 103 // We only handle the case for legacy root() functions here, so this is 104 // hard-coded to look at only the first such function. 105 llvm::MDNode *SigNode = ExportForEachMetadata->getOperand(0); 106 if (SigNode != NULL && SigNode->getNumOperands() == 1) { 107 llvm::Value *SigVal = SigNode->getOperand(0); 108 if (SigVal->getValueID() == llvm::Value::MDStringVal) { 109 llvm::StringRef SigString = 110 static_cast<llvm::MDString*>(SigVal)->getString(); 111 uint32_t Signature = 0; 112 if (SigString.getAsInteger(10, Signature)) { 113 ALOGE("Non-integer signature value '%s'", SigString.str().c_str()); 114 return 0; 115 } 116 return Signature; 117 } 118 } 119 120 return 0; 121 } 122 123 // Get the actual value we should use to step through an allocation. 124 // 125 // Normally the value we use to step through an allocation is given to us by 126 // the driver. However, for certain primitive data types, we can derive an 127 // integer constant for the step value. We use this integer constant whenever 128 // possible to allow further compiler optimizations to take place. 129 // 130 // DL - Target Data size/layout information. 131 // T - Type of allocation (should be a pointer). 132 // OrigStep - Original step increment (root.expand() input from driver). 133 llvm::Value *getStepValue(llvm::DataLayout *DL, llvm::Type *AllocType, 134 llvm::Value *OrigStep) { 135 bccAssert(DL); 136 bccAssert(AllocType); 137 bccAssert(OrigStep); 138 llvm::PointerType *PT = llvm::dyn_cast<llvm::PointerType>(AllocType); 139 llvm::Type *VoidPtrTy = llvm::Type::getInt8PtrTy(*Context); 140 if (mEnableStepOpt && AllocType != VoidPtrTy && PT) { 141 llvm::Type *ET = PT->getElementType(); 142 uint64_t ETSize = DL->getTypeAllocSize(ET); 143 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(*Context); 144 return llvm::ConstantInt::get(Int32Ty, ETSize); 145 } else { 146 return OrigStep; 147 } 148 } 149 150#define PARAM_FIELD_IN 0 151#define PARAM_FIELD_OUT 1 152#define PARAM_FIELD_Y 2 153#define PARAM_FIELD_Z 3 154#define PARAM_FIELD_LID 4 155#define PARAM_FIELD_INS 5 156#define PARAM_FIELD_ESTRIDEINS 6 157#define PARAM_FIELD_USR 7 158#define PARAM_FIELD_DIMX 8 159#define PARAM_FIELD_DIMY 9 160#define PARAM_FIELD_DIMZ 10 161#define PARAM_FIELD_SLOT 11 162 163 /// Builds the types required by the pass for the given context. 164 void buildTypes(void) { 165 // Create the RsForEachStubParam struct. 166 167 llvm::Type *VoidPtrTy = llvm::Type::getInt8PtrTy(*Context); 168 llvm::Type *VoidPtrPtrTy = VoidPtrTy->getPointerTo(); 169 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(*Context); 170 llvm::Type *Int32PtrTy = Int32Ty->getPointerTo(); 171 172 /* Defined in frameworks/base/libs/rs/cpu_ref/rsCpuCore.h: 173 * 174 * struct RsForEachKernelStruct{ 175 * const void *in; 176 * void *out; 177 * uint32_t y; 178 * uint32_t z; 179 * uint32_t lid; 180 * const void **ins; 181 * uint32_t *eStrideIns; 182 * const void *usr; 183 * uint32_t dimX; 184 * uint32_t dimY; 185 * uint32_t dimZ; 186 * uint32_t slot; 187 * }; 188 */ 189 llvm::SmallVector<llvm::Type*, 12> StructTypes; 190 StructTypes.push_back(VoidPtrTy); // const void *in 191 StructTypes.push_back(VoidPtrTy); // void *out 192 StructTypes.push_back(Int32Ty); // uint32_t y 193 StructTypes.push_back(Int32Ty); // uint32_t z 194 StructTypes.push_back(Int32Ty); // uint32_t lid 195 StructTypes.push_back(VoidPtrPtrTy); // const void **ins 196 StructTypes.push_back(Int32PtrTy); // uint32_t *eStrideIns 197 StructTypes.push_back(VoidPtrTy); // const void *usr 198 StructTypes.push_back(Int32Ty); // uint32_t dimX 199 StructTypes.push_back(Int32Ty); // uint32_t dimY 200 StructTypes.push_back(Int32Ty); // uint32_t dimZ 201 StructTypes.push_back(Int32Ty); // uint32_t slot 202 203 ForEachStubType = 204 llvm::StructType::create(StructTypes, "RsForEachStubParamStruct"); 205 206 // Create the function type for expanded kernels. 207 208 llvm::Type *ForEachStubPtrTy = ForEachStubType->getPointerTo(); 209 210 llvm::SmallVector<llvm::Type*, 8> ParamTypes; 211 ParamTypes.push_back(ForEachStubPtrTy); // const RsForEachStubParamStruct *p 212 ParamTypes.push_back(Int32Ty); // uint32_t x1 213 ParamTypes.push_back(Int32Ty); // uint32_t x2 214 ParamTypes.push_back(Int32Ty); // uint32_t instep 215 ParamTypes.push_back(Int32Ty); // uint32_t outstep 216 217 ExpandedFunctionType = llvm::FunctionType::get(llvm::Type::getVoidTy(*Context), 218 ParamTypes, 219 false); 220 } 221 222 /// @brief Create skeleton of the expanded function. 223 /// 224 /// This creates a function with the following signature: 225 /// 226 /// void (const RsForEachStubParamStruct *p, uint32_t x1, uint32_t x2, 227 /// uint32_t instep, uint32_t outstep) 228 /// 229 llvm::Function *createEmptyExpandedFunction(llvm::StringRef OldName) { 230 llvm::Function *ExpandedFunction = 231 llvm::Function::Create(ExpandedFunctionType, 232 llvm::GlobalValue::ExternalLinkage, 233 OldName + ".expand", Module); 234 235 bccAssert(ExpandedFunction->arg_size() == NUM_EXPANDED_FUNCTION_PARAMS); 236 237 llvm::Function::arg_iterator AI = ExpandedFunction->arg_begin(); 238 239 (AI++)->setName("p"); 240 (AI++)->setName("x1"); 241 (AI++)->setName("x2"); 242 (AI++)->setName("arg_instep"); 243 (AI++)->setName("arg_outstep"); 244 245 llvm::BasicBlock *Begin = llvm::BasicBlock::Create(*Context, "Begin", 246 ExpandedFunction); 247 llvm::IRBuilder<> Builder(Begin); 248 Builder.CreateRetVoid(); 249 250 return ExpandedFunction; 251 } 252 253 /// @brief Create an empty loop 254 /// 255 /// Create a loop of the form: 256 /// 257 /// for (i = LowerBound; i < UpperBound; i++) 258 /// ; 259 /// 260 /// After the loop has been created, the builder is set such that 261 /// instructions can be added to the loop body. 262 /// 263 /// @param Builder The builder to use to build this loop. The current 264 /// position of the builder is the position the loop 265 /// will be inserted. 266 /// @param LowerBound The first value of the loop iterator 267 /// @param UpperBound The maximal value of the loop iterator 268 /// @param LoopIV A reference that will be set to the loop iterator. 269 /// @return The BasicBlock that will be executed after the loop. 270 llvm::BasicBlock *createLoop(llvm::IRBuilder<> &Builder, 271 llvm::Value *LowerBound, 272 llvm::Value *UpperBound, 273 llvm::PHINode **LoopIV) { 274 assert(LowerBound->getType() == UpperBound->getType()); 275 276 llvm::BasicBlock *CondBB, *AfterBB, *HeaderBB; 277 llvm::Value *Cond, *IVNext; 278 llvm::PHINode *IV; 279 280 CondBB = Builder.GetInsertBlock(); 281 AfterBB = llvm::SplitBlock(CondBB, Builder.GetInsertPoint(), this); 282 HeaderBB = llvm::BasicBlock::Create(*Context, "Loop", CondBB->getParent()); 283 284 // if (LowerBound < Upperbound) 285 // goto LoopHeader 286 // else 287 // goto AfterBB 288 CondBB->getTerminator()->eraseFromParent(); 289 Builder.SetInsertPoint(CondBB); 290 Cond = Builder.CreateICmpULT(LowerBound, UpperBound); 291 Builder.CreateCondBr(Cond, HeaderBB, AfterBB); 292 293 // iv = PHI [CondBB -> LowerBound], [LoopHeader -> NextIV ] 294 // iv.next = iv + 1 295 // if (iv.next < Upperbound) 296 // goto LoopHeader 297 // else 298 // goto AfterBB 299 Builder.SetInsertPoint(HeaderBB); 300 IV = Builder.CreatePHI(LowerBound->getType(), 2, "X"); 301 IV->addIncoming(LowerBound, CondBB); 302 IVNext = Builder.CreateNUWAdd(IV, Builder.getInt32(1)); 303 IV->addIncoming(IVNext, HeaderBB); 304 Cond = Builder.CreateICmpULT(IVNext, UpperBound); 305 Builder.CreateCondBr(Cond, HeaderBB, AfterBB); 306 AfterBB->setName("Exit"); 307 Builder.SetInsertPoint(HeaderBB->getFirstNonPHI()); 308 *LoopIV = IV; 309 return AfterBB; 310 } 311 312public: 313 RSForEachExpandPass(bool pEnableStepOpt) 314 : ModulePass(ID), Module(NULL), Context(NULL), 315 mEnableStepOpt(pEnableStepOpt) { 316 317 } 318 319 /* Performs the actual optimization on a selected function. On success, the 320 * Module will contain a new function of the name "<NAME>.expand" that 321 * invokes <NAME>() in a loop with the appropriate parameters. 322 */ 323 bool ExpandFunction(llvm::Function *Function, uint32_t Signature) { 324 ALOGV("Expanding ForEach-able Function %s", 325 Function->getName().str().c_str()); 326 327 if (!Signature) { 328 Signature = getRootSignature(Function); 329 if (!Signature) { 330 // We couldn't determine how to expand this function based on its 331 // function signature. 332 return false; 333 } 334 } 335 336 llvm::DataLayout DL(Module); 337 338 llvm::Function *ExpandedFunction = 339 createEmptyExpandedFunction(Function->getName()); 340 341 bccAssert(ExpandedFunction->arg_size() == NUM_EXPANDED_FUNCTION_PARAMS); 342 343 /* 344 * Extract the expanded function's parameters. It is guaranteed by 345 * createEmptyExpandedFunction that there will be five parameters. 346 */ 347 llvm::Function::arg_iterator ExpandedFunctionArgIter = 348 ExpandedFunction->arg_begin(); 349 350 llvm::Value *Arg_p = &*(ExpandedFunctionArgIter++); 351 llvm::Value *Arg_x1 = &*(ExpandedFunctionArgIter++); 352 llvm::Value *Arg_x2 = &*(ExpandedFunctionArgIter++); 353 llvm::Value *Arg_instep = &*(ExpandedFunctionArgIter++); 354 llvm::Value *Arg_outstep = &*ExpandedFunctionArgIter; 355 356 llvm::Value *InStep = NULL; 357 llvm::Value *OutStep = NULL; 358 359 // Construct the actual function body. 360 llvm::IRBuilder<> Builder(ExpandedFunction->getEntryBlock().begin()); 361 362 // Collect and construct the arguments for the kernel(). 363 // Note that we load any loop-invariant arguments before entering the Loop. 364 llvm::Function::arg_iterator FunctionArgIter = Function->arg_begin(); 365 366 llvm::Type *InTy = NULL; 367 llvm::Value *InBasePtr = NULL; 368 if (bcinfo::MetadataExtractor::hasForEachSignatureIn(Signature)) { 369 InTy = (FunctionArgIter++)->getType(); 370 InStep = getStepValue(&DL, InTy, Arg_instep); 371 InStep->setName("instep"); 372 InBasePtr = Builder.CreateLoad( 373 Builder.CreateStructGEP(Arg_p, PARAM_FIELD_IN)); 374 } 375 376 llvm::Type *OutTy = NULL; 377 llvm::Value *OutBasePtr = NULL; 378 if (bcinfo::MetadataExtractor::hasForEachSignatureOut(Signature)) { 379 OutTy = (FunctionArgIter++)->getType(); 380 OutStep = getStepValue(&DL, OutTy, Arg_outstep); 381 OutStep->setName("outstep"); 382 OutBasePtr = Builder.CreateLoad( 383 Builder.CreateStructGEP(Arg_p, PARAM_FIELD_OUT)); 384 } 385 386 llvm::Value *UsrData = NULL; 387 if (bcinfo::MetadataExtractor::hasForEachSignatureUsrData(Signature)) { 388 llvm::Type *UsrDataTy = (FunctionArgIter++)->getType(); 389 UsrData = Builder.CreatePointerCast(Builder.CreateLoad( 390 Builder.CreateStructGEP(Arg_p, PARAM_FIELD_USR)), UsrDataTy); 391 UsrData->setName("UsrData"); 392 } 393 394 if (bcinfo::MetadataExtractor::hasForEachSignatureX(Signature)) { 395 FunctionArgIter++; 396 } 397 398 llvm::Value *Y = NULL; 399 if (bcinfo::MetadataExtractor::hasForEachSignatureY(Signature)) { 400 Y = Builder.CreateLoad( 401 Builder.CreateStructGEP(Arg_p, PARAM_FIELD_Y), "Y"); 402 403 FunctionArgIter++; 404 } 405 406 bccAssert(FunctionArgIter == Function->arg_end()); 407 408 llvm::PHINode *IV; 409 createLoop(Builder, Arg_x1, Arg_x2, &IV); 410 411 // Populate the actual call to kernel(). 412 llvm::SmallVector<llvm::Value*, 8> RootArgs; 413 414 llvm::Value *InPtr = NULL; 415 llvm::Value *OutPtr = NULL; 416 417 // Calculate the current input and output pointers 418 // 419 // We always calculate the input/output pointers with a GEP operating on i8 420 // values and only cast at the very end to OutTy. This is because the step 421 // between two values is given in bytes. 422 // 423 // TODO: We could further optimize the output by using a GEP operation of 424 // type 'OutTy' in cases where the element type of the allocation allows. 425 if (OutBasePtr) { 426 llvm::Value *OutOffset = Builder.CreateSub(IV, Arg_x1); 427 OutOffset = Builder.CreateMul(OutOffset, OutStep); 428 OutPtr = Builder.CreateGEP(OutBasePtr, OutOffset); 429 OutPtr = Builder.CreatePointerCast(OutPtr, OutTy); 430 } 431 432 if (InBasePtr) { 433 llvm::Value *InOffset = Builder.CreateSub(IV, Arg_x1); 434 InOffset = Builder.CreateMul(InOffset, InStep); 435 InPtr = Builder.CreateGEP(InBasePtr, InOffset); 436 InPtr = Builder.CreatePointerCast(InPtr, InTy); 437 } 438 439 if (InPtr) { 440 RootArgs.push_back(InPtr); 441 } 442 443 if (OutPtr) { 444 RootArgs.push_back(OutPtr); 445 } 446 447 if (UsrData) { 448 RootArgs.push_back(UsrData); 449 } 450 451 llvm::Value *X = IV; 452 if (bcinfo::MetadataExtractor::hasForEachSignatureX(Signature)) { 453 RootArgs.push_back(X); 454 } 455 456 if (Y) { 457 RootArgs.push_back(Y); 458 } 459 460 Builder.CreateCall(Function, RootArgs); 461 462 return true; 463 } 464 465 /* Expand a pass-by-value kernel. 466 */ 467 bool ExpandKernel(llvm::Function *Function, uint32_t Signature) { 468 bccAssert(bcinfo::MetadataExtractor::hasForEachSignatureKernel(Signature)); 469 ALOGV("Expanding kernel Function %s", Function->getName().str().c_str()); 470 471 // TODO: Refactor this to share functionality with ExpandFunction. 472 llvm::DataLayout DL(Module); 473 474 llvm::Function *ExpandedFunction = 475 createEmptyExpandedFunction(Function->getName()); 476 477 /* 478 * Extract the expanded function's parameters. It is guaranteed by 479 * createEmptyExpandedFunction that there will be five parameters. 480 */ 481 482 bccAssert(ExpandedFunction->arg_size() == NUM_EXPANDED_FUNCTION_PARAMS); 483 484 llvm::Function::arg_iterator ExpandedFunctionArgIter = 485 ExpandedFunction->arg_begin(); 486 487 llvm::Value *Arg_p = &*(ExpandedFunctionArgIter++); 488 llvm::Value *Arg_x1 = &*(ExpandedFunctionArgIter++); 489 llvm::Value *Arg_x2 = &*(ExpandedFunctionArgIter++); 490 llvm::Value *Arg_instep = &*(ExpandedFunctionArgIter++); 491 llvm::Value *Arg_outstep = &*ExpandedFunctionArgIter; 492 493 // Construct the actual function body. 494 llvm::IRBuilder<> Builder(ExpandedFunction->getEntryBlock().begin()); 495 496 // Create TBAA meta-data. 497 llvm::MDNode *TBAARenderScript, *TBAAAllocation, *TBAAPointer; 498 llvm::MDBuilder MDHelper(*Context); 499 500 TBAARenderScript = MDHelper.createTBAARoot("RenderScript TBAA"); 501 TBAAAllocation = MDHelper.createTBAAScalarTypeNode("allocation", TBAARenderScript); 502 TBAAAllocation = MDHelper.createTBAAStructTagNode(TBAAAllocation, TBAAAllocation, 0); 503 TBAAPointer = MDHelper.createTBAAScalarTypeNode("pointer", TBAARenderScript); 504 TBAAPointer = MDHelper.createTBAAStructTagNode(TBAAPointer, TBAAPointer, 0); 505 506 /* 507 * Collect and construct the arguments for the kernel(). 508 * 509 * Note that we load any loop-invariant arguments before entering the Loop. 510 */ 511 size_t NumInputs = Function->arg_size(); 512 513 llvm::Value *Y = NULL; 514 if (bcinfo::MetadataExtractor::hasForEachSignatureY(Signature)) { 515 Y = Builder.CreateLoad( 516 Builder.CreateStructGEP(Arg_p, PARAM_FIELD_Y), "Y"); 517 518 --NumInputs; 519 } 520 521 if (bcinfo::MetadataExtractor::hasForEachSignatureX(Signature)) { 522 --NumInputs; 523 } 524 525 // No usrData parameter on kernels. 526 bccAssert( 527 !bcinfo::MetadataExtractor::hasForEachSignatureUsrData(Signature)); 528 529 llvm::Function::arg_iterator ArgIter = Function->arg_begin(); 530 531 // Check the return type 532 llvm::Type *OutTy = NULL; 533 llvm::Value *OutStep = NULL; 534 llvm::LoadInst *OutBasePtr = NULL; 535 536 bool PassOutByReference = false; 537 538 if (bcinfo::MetadataExtractor::hasForEachSignatureOut(Signature)) { 539 llvm::Type *OutBaseTy = Function->getReturnType(); 540 541 if (OutBaseTy->isVoidTy()) { 542 PassOutByReference = true; 543 OutTy = ArgIter->getType(); 544 545 ArgIter++; 546 --NumInputs; 547 } else { 548 // We don't increment Args, since we are using the actual return type. 549 OutTy = OutBaseTy->getPointerTo(); 550 } 551 552 OutStep = getStepValue(&DL, OutTy, Arg_outstep); 553 OutStep->setName("outstep"); 554 OutBasePtr = Builder.CreateLoad( 555 Builder.CreateStructGEP(Arg_p, PARAM_FIELD_OUT)); 556 557 if (gEnableRsTbaa) { 558 OutBasePtr->setMetadata("tbaa", TBAAPointer); 559 } 560 } 561 562 llvm::SmallVector<llvm::Type*, 8> InTypes; 563 llvm::SmallVector<llvm::Value*, 8> InSteps; 564 llvm::SmallVector<llvm::LoadInst*, 8> InBasePtrs; 565 llvm::SmallVector<bool, 8> InIsStructPointer; 566 567 if (NumInputs == 1) { 568 llvm::Type *InType = ArgIter->getType(); 569 570 /* 571 * AArch64 calling dictate that structs of sufficient size get passed by 572 * pointer instead of passed by value. This, combined with the fact that 573 * we don't allow kernels to operate on pointer data means that if we see 574 * a kernel with a pointer parameter we know that it is struct input that 575 * has been promoted. As such we don't need to convert its type to a 576 * pointer. Later we will need to know to avoid a load, so we save this 577 * information in InIsStructPointer. 578 */ 579 if (!InType->isPointerTy()) { 580 InType = InType->getPointerTo(); 581 InIsStructPointer.push_back(false); 582 } else { 583 InIsStructPointer.push_back(true); 584 } 585 586 llvm::Value *InStep = getStepValue(&DL, InType, Arg_instep); 587 588 InStep->setName("instep"); 589 590 llvm::Value *Input = Builder.CreateStructGEP(Arg_p, PARAM_FIELD_IN); 591 llvm::LoadInst *InBasePtr = Builder.CreateLoad(Input, "input_base"); 592 593 if (gEnableRsTbaa) { 594 InBasePtr->setMetadata("tbaa", TBAAPointer); 595 } 596 597 InTypes.push_back(InType); 598 InSteps.push_back(InStep); 599 InBasePtrs.push_back(InBasePtr); 600 601 } else if (NumInputs > 1) { 602 llvm::Value *InsMember = Builder.CreateStructGEP(Arg_p, PARAM_FIELD_INS); 603 llvm::LoadInst *InsBasePtr = Builder.CreateLoad(InsMember, 604 "inputs_base"); 605 606 llvm::Value *InStepsMember = Builder.CreateStructGEP(Arg_p, PARAM_FIELD_ESTRIDEINS); 607 llvm::LoadInst *InStepsBase = Builder.CreateLoad(InStepsMember, 608 "insteps_base"); 609 610 for (size_t InputIndex = 0; InputIndex < NumInputs; 611 ++InputIndex, ArgIter++) { 612 613 llvm::Value *IndexVal = Builder.getInt32(InputIndex); 614 615 llvm::Value *InStepAddr = Builder.CreateGEP(InStepsBase, IndexVal); 616 llvm::LoadInst *InStepArg = Builder.CreateLoad(InStepAddr, 617 "instep_addr"); 618 619 llvm::Type *InType = ArgIter->getType(); 620 621 /* 622 * AArch64 calling dictate that structs of sufficient size get passed by 623 * pointer instead of passed by value. This, combined with the fact 624 * that we don't allow kernels to operate on pointer data means that if 625 * we see a kernel with a pointer parameter we know that it is struct 626 * input that has been promoted. As such we don't need to convert its 627 * type to a pointer. Later we will need to know to avoid a load, so we 628 * save this information in InIsStructPointer. 629 */ 630 if (!InType->isPointerTy()) { 631 InType = InType->getPointerTo(); 632 InIsStructPointer.push_back(false); 633 } else { 634 InIsStructPointer.push_back(true); 635 } 636 637 llvm::Value *InStep = getStepValue(&DL, InType, InStepArg); 638 639 InStep->setName("instep"); 640 641 llvm::Value *InputAddr = Builder.CreateGEP(InsBasePtr, IndexVal); 642 llvm::LoadInst *InBasePtr = Builder.CreateLoad(InputAddr, 643 "input_base"); 644 645 if (gEnableRsTbaa) { 646 InBasePtr->setMetadata("tbaa", TBAAPointer); 647 } 648 649 InTypes.push_back(InType); 650 InSteps.push_back(InStep); 651 InBasePtrs.push_back(InBasePtr); 652 } 653 } 654 655 llvm::PHINode *IV; 656 createLoop(Builder, Arg_x1, Arg_x2, &IV); 657 658 // Populate the actual call to kernel(). 659 llvm::SmallVector<llvm::Value*, 8> RootArgs; 660 661 // Calculate the current input and output pointers 662 // 663 // 664 // We always calculate the input/output pointers with a GEP operating on i8 665 // values combined with a multiplication and only cast at the very end to 666 // OutTy. This is to account for dynamic stepping sizes when the value 667 // isn't apparent at compile time. In the (very common) case when we know 668 // the step size at compile time, due to haveing complete type information 669 // this multiplication will optmized out and produces code equivalent to a 670 // a GEP on a pointer of the correct type. 671 672 // Output 673 674 llvm::Value *OutPtr = NULL; 675 if (OutBasePtr) { 676 llvm::Value *OutOffset = Builder.CreateSub(IV, Arg_x1); 677 678 OutOffset = Builder.CreateMul(OutOffset, OutStep); 679 OutPtr = Builder.CreateGEP(OutBasePtr, OutOffset); 680 OutPtr = Builder.CreatePointerCast(OutPtr, OutTy); 681 682 if (PassOutByReference) { 683 RootArgs.push_back(OutPtr); 684 } 685 } 686 687 // Inputs 688 689 if (NumInputs > 0) { 690 llvm::Value *Offset = Builder.CreateSub(IV, Arg_x1); 691 692 for (size_t Index = 0; Index < NumInputs; ++Index) { 693 llvm::Value *InOffset = Builder.CreateMul(Offset, InSteps[Index]); 694 llvm::Value *InPtr = Builder.CreateGEP(InBasePtrs[Index], InOffset); 695 696 InPtr = Builder.CreatePointerCast(InPtr, InTypes[Index]); 697 698 llvm::Value *Input; 699 700 if (InIsStructPointer[Index]) { 701 Input = InPtr; 702 703 } else { 704 llvm::LoadInst *InputLoad = Builder.CreateLoad(InPtr, "input"); 705 706 if (gEnableRsTbaa) { 707 InputLoad->setMetadata("tbaa", TBAAAllocation); 708 } 709 710 Input = InputLoad; 711 } 712 713 RootArgs.push_back(Input); 714 } 715 } 716 717 llvm::Value *X = IV; 718 if (bcinfo::MetadataExtractor::hasForEachSignatureX(Signature)) { 719 RootArgs.push_back(X); 720 } 721 722 if (Y) { 723 RootArgs.push_back(Y); 724 } 725 726 llvm::Value *RetVal = Builder.CreateCall(Function, RootArgs); 727 728 if (OutPtr && !PassOutByReference) { 729 llvm::StoreInst *Store = Builder.CreateStore(RetVal, OutPtr); 730 if (gEnableRsTbaa) { 731 Store->setMetadata("tbaa", TBAAAllocation); 732 } 733 } 734 735 return true; 736 } 737 738 /// @brief Checks if pointers to allocation internals are exposed 739 /// 740 /// This function verifies if through the parameters passed to the kernel 741 /// or through calls to the runtime library the script gains access to 742 /// pointers pointing to data within a RenderScript Allocation. 743 /// If we know we control all loads from and stores to data within 744 /// RenderScript allocations and if we know the run-time internal accesses 745 /// are all annotated with RenderScript TBAA metadata, only then we 746 /// can safely use TBAA to distinguish between generic and from-allocation 747 /// pointers. 748 bool allocPointersExposed(llvm::Module &Module) { 749 // Old style kernel function can expose pointers to elements within 750 // allocations. 751 // TODO: Extend analysis to allow simple cases of old-style kernels. 752 for (size_t i = 0; i < mExportForEachCount; ++i) { 753 const char *Name = mExportForEachNameList[i]; 754 uint32_t Signature = mExportForEachSignatureList[i]; 755 if (Module.getFunction(Name) && 756 !bcinfo::MetadataExtractor::hasForEachSignatureKernel(Signature)) { 757 return true; 758 } 759 } 760 761 // Check for library functions that expose a pointer to an Allocation or 762 // that are not yet annotated with RenderScript-specific tbaa information. 763 static std::vector<std::string> Funcs; 764 765 // rsGetElementAt(...) 766 Funcs.push_back("_Z14rsGetElementAt13rs_allocationj"); 767 Funcs.push_back("_Z14rsGetElementAt13rs_allocationjj"); 768 Funcs.push_back("_Z14rsGetElementAt13rs_allocationjjj"); 769 // rsSetElementAt() 770 Funcs.push_back("_Z14rsSetElementAt13rs_allocationPvj"); 771 Funcs.push_back("_Z14rsSetElementAt13rs_allocationPvjj"); 772 Funcs.push_back("_Z14rsSetElementAt13rs_allocationPvjjj"); 773 // rsGetElementAtYuv_uchar_Y() 774 Funcs.push_back("_Z25rsGetElementAtYuv_uchar_Y13rs_allocationjj"); 775 // rsGetElementAtYuv_uchar_U() 776 Funcs.push_back("_Z25rsGetElementAtYuv_uchar_U13rs_allocationjj"); 777 // rsGetElementAtYuv_uchar_V() 778 Funcs.push_back("_Z25rsGetElementAtYuv_uchar_V13rs_allocationjj"); 779 780 for (std::vector<std::string>::iterator FI = Funcs.begin(), 781 FE = Funcs.end(); 782 FI != FE; ++FI) { 783 llvm::Function *Function = Module.getFunction(*FI); 784 785 if (!Function) { 786 ALOGE("Missing run-time function '%s'", FI->c_str()); 787 return true; 788 } 789 790 if (Function->getNumUses() > 0) { 791 return true; 792 } 793 } 794 795 return false; 796 } 797 798 /// @brief Connect RenderScript TBAA metadata to C/C++ metadata 799 /// 800 /// The TBAA metadata used to annotate loads/stores from RenderScript 801 /// Allocations is generated in a separate TBAA tree with a "RenderScript TBAA" 802 /// root node. LLVM does assume may-alias for all nodes in unrelated alias 803 /// analysis trees. This function makes the RenderScript TBAA a subtree of the 804 /// normal C/C++ TBAA tree aside of normal C/C++ types. With the connected trees 805 /// every access to an Allocation is resolved to must-alias if compared to 806 /// a normal C/C++ access. 807 void connectRenderScriptTBAAMetadata(llvm::Module &Module) { 808 llvm::MDBuilder MDHelper(*Context); 809 llvm::MDNode *TBAARenderScript = 810 MDHelper.createTBAARoot("RenderScript TBAA"); 811 812 llvm::MDNode *TBAARoot = MDHelper.createTBAARoot("Simple C/C++ TBAA"); 813 llvm::MDNode *TBAAMergedRS = MDHelper.createTBAANode("RenderScript", 814 TBAARoot); 815 816 TBAARenderScript->replaceAllUsesWith(TBAAMergedRS); 817 } 818 819 virtual bool runOnModule(llvm::Module &Module) { 820 bool Changed = false; 821 this->Module = &Module; 822 this->Context = &Module.getContext(); 823 824 this->buildTypes(); 825 826 bcinfo::MetadataExtractor me(&Module); 827 if (!me.extract()) { 828 ALOGE("Could not extract metadata from module!"); 829 return false; 830 } 831 mExportForEachCount = me.getExportForEachSignatureCount(); 832 mExportForEachNameList = me.getExportForEachNameList(); 833 mExportForEachSignatureList = me.getExportForEachSignatureList(); 834 835 bool AllocsExposed = allocPointersExposed(Module); 836 837 for (size_t i = 0; i < mExportForEachCount; ++i) { 838 const char *name = mExportForEachNameList[i]; 839 uint32_t signature = mExportForEachSignatureList[i]; 840 llvm::Function *kernel = Module.getFunction(name); 841 if (kernel) { 842 if (bcinfo::MetadataExtractor::hasForEachSignatureKernel(signature)) { 843 Changed |= ExpandKernel(kernel, signature); 844 kernel->setLinkage(llvm::GlobalValue::InternalLinkage); 845 } else if (kernel->getReturnType()->isVoidTy()) { 846 Changed |= ExpandFunction(kernel, signature); 847 kernel->setLinkage(llvm::GlobalValue::InternalLinkage); 848 } else { 849 // There are some graphics root functions that are not 850 // expanded, but that will be called directly. For those 851 // functions, we can not set the linkage to internal. 852 } 853 } 854 } 855 856 if (gEnableRsTbaa && !AllocsExposed) { 857 connectRenderScriptTBAAMetadata(Module); 858 } 859 860 return Changed; 861 } 862 863 virtual const char *getPassName() const { 864 return "ForEach-able Function Expansion"; 865 } 866 867}; // end RSForEachExpandPass 868 869} // end anonymous namespace 870 871char RSForEachExpandPass::ID = 0; 872 873namespace bcc { 874 875llvm::ModulePass * 876createRSForEachExpandPass(bool pEnableStepOpt){ 877 return new RSForEachExpandPass(pEnableStepOpt); 878} 879 880} // end namespace bcc 881