RSForEachExpand.cpp revision bb73b74a9f6ad26c2ab30557bfe6916a44ed75f6
1/*
2 * Copyright 2012, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bcc/Assert.h"
18#include "bcc/Renderscript/RSTransforms.h"
19
20#include <cstdlib>
21
22#include <llvm/IR/DerivedTypes.h>
23#include <llvm/IR/Function.h>
24#include <llvm/IR/Instructions.h>
25#include <llvm/IR/IRBuilder.h>
26#include <llvm/IR/MDBuilder.h>
27#include <llvm/IR/Module.h>
28#include <llvm/Pass.h>
29#include <llvm/Support/raw_ostream.h>
30#include <llvm/IR/DataLayout.h>
31#include <llvm/IR/Function.h>
32#include <llvm/IR/Type.h>
33#include <llvm/Transforms/Utils/BasicBlockUtils.h>
34
35#include "bcc/Config/Config.h"
36#include "bcc/Support/Log.h"
37
38#include "bcinfo/MetadataExtractor.h"
39
40#define NUM_EXPANDED_FUNCTION_PARAMS 4
41
42using namespace bcc;
43
44namespace {
45
46static const bool gEnableRsTbaa = true;
47
48/* RSForEachExpandPass - This pass operates on functions that are able to be
49 * called via rsForEach() or "foreach_<NAME>". We create an inner loop for the
50 * ForEach-able function to be invoked over the appropriate data cells of the
51 * input/output allocations (adjusting other relevant parameters as we go). We
52 * support doing this for any ForEach-able compute kernels. The new function
53 * name is the original function name followed by ".expand". Note that we
54 * still generate code for the original function.
55 */
56class RSForEachExpandPass : public llvm::ModulePass {
57private:
58  static char ID;
59
60  llvm::Module *Module;
61  llvm::LLVMContext *Context;
62
63  /*
64   * Pointer to LLVM type information for the ForEachStubType and the function
65   * signature for expanded kernels.  These must be re-calculated for each
66   * module the pass is run on.
67   */
68  llvm::StructType   *ForEachStubType;
69  llvm::FunctionType *ExpandedFunctionType;
70
71  uint32_t mExportForEachCount;
72  const char **mExportForEachNameList;
73  const uint32_t *mExportForEachSignatureList;
74
75  // Turns on optimization of allocation stride values.
76  bool mEnableStepOpt;
77
78  uint32_t getRootSignature(llvm::Function *Function) {
79    const llvm::NamedMDNode *ExportForEachMetadata =
80        Module->getNamedMetadata("#rs_export_foreach");
81
82    if (!ExportForEachMetadata) {
83      llvm::SmallVector<llvm::Type*, 8> RootArgTys;
84      for (llvm::Function::arg_iterator B = Function->arg_begin(),
85                                        E = Function->arg_end();
86           B != E;
87           ++B) {
88        RootArgTys.push_back(B->getType());
89      }
90
91      // For pre-ICS bitcode, we may not have signature information. In that
92      // case, we use the size of the RootArgTys to select the number of
93      // arguments.
94      return (1 << RootArgTys.size()) - 1;
95    }
96
97    if (ExportForEachMetadata->getNumOperands() == 0) {
98      return 0;
99    }
100
101    bccAssert(ExportForEachMetadata->getNumOperands() > 0);
102
103    // We only handle the case for legacy root() functions here, so this is
104    // hard-coded to look at only the first such function.
105    llvm::MDNode *SigNode = ExportForEachMetadata->getOperand(0);
106    if (SigNode != nullptr && SigNode->getNumOperands() == 1) {
107      llvm::Value *SigVal = SigNode->getOperand(0);
108      if (SigVal->getValueID() == llvm::Value::MDStringVal) {
109        llvm::StringRef SigString =
110            static_cast<llvm::MDString*>(SigVal)->getString();
111        uint32_t Signature = 0;
112        if (SigString.getAsInteger(10, Signature)) {
113          ALOGE("Non-integer signature value '%s'", SigString.str().c_str());
114          return 0;
115        }
116        return Signature;
117      }
118    }
119
120    return 0;
121  }
122
123  bool isStepOptSupported(llvm::Type *AllocType) {
124
125    llvm::PointerType *PT = llvm::dyn_cast<llvm::PointerType>(AllocType);
126    llvm::Type *VoidPtrTy = llvm::Type::getInt8PtrTy(*Context);
127
128    if (mEnableStepOpt) {
129      return false;
130    }
131
132    if (AllocType == VoidPtrTy) {
133      return false;
134    }
135
136    if (!PT) {
137      return false;
138    }
139
140    // remaining conditions are 64-bit only
141    if (VoidPtrTy->getPrimitiveSizeInBits() == 32) {
142      return true;
143    }
144
145    // coerce suggests an upconverted struct type, which we can't support
146    if (AllocType->getStructName().find("coerce") != llvm::StringRef::npos) {
147      return false;
148    }
149
150    // 2xi64 and i128 suggest an upconverted struct type, which are also unsupported
151    llvm::Type *V2xi64Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(*Context), 2);
152    llvm::Type *Int128Ty = llvm::Type::getIntNTy(*Context, 128);
153    if (AllocType == V2xi64Ty || AllocType == Int128Ty) {
154      return false;
155    }
156
157    return true;
158  }
159
160  // Get the actual value we should use to step through an allocation.
161  //
162  // Normally the value we use to step through an allocation is given to us by
163  // the driver. However, for certain primitive data types, we can derive an
164  // integer constant for the step value. We use this integer constant whenever
165  // possible to allow further compiler optimizations to take place.
166  //
167  // DL - Target Data size/layout information.
168  // T - Type of allocation (should be a pointer).
169  // OrigStep - Original step increment (root.expand() input from driver).
170  llvm::Value *getStepValue(llvm::DataLayout *DL, llvm::Type *AllocType,
171                            llvm::Value *OrigStep) {
172    bccAssert(DL);
173    bccAssert(AllocType);
174    bccAssert(OrigStep);
175    llvm::PointerType *PT = llvm::dyn_cast<llvm::PointerType>(AllocType);
176    if (isStepOptSupported(AllocType)) {
177      llvm::Type *ET = PT->getElementType();
178      uint64_t ETSize = DL->getTypeAllocSize(ET);
179      llvm::Type *Int32Ty = llvm::Type::getInt32Ty(*Context);
180      return llvm::ConstantInt::get(Int32Ty, ETSize);
181    } else {
182      return OrigStep;
183    }
184  }
185
186#define PARAM_FIELD_INS         0
187#define PARAM_FIELD_INESTRIDES  1
188#define PARAM_FIELD_OUT         2
189#define PARAM_FIELD_Y           3
190#define PARAM_FIELD_Z           4
191#define PARAM_FIELD_LID         5
192#define PARAM_FIELD_USR         6
193#define PARAM_FIELD_DIMX        7
194#define PARAM_FIELD_DIMY        8
195#define PARAM_FIELD_DIMZ        9
196#define PARAM_FIELD_SLOT       10
197
198  /// Builds the types required by the pass for the given context.
199  void buildTypes(void) {
200    // Create the RsForEachStubParam struct.
201
202    llvm::Type *VoidPtrTy    = llvm::Type::getInt8PtrTy(*Context);
203    llvm::Type *VoidPtrPtrTy = VoidPtrTy->getPointerTo();
204    llvm::Type *Int32Ty      = llvm::Type::getInt32Ty(*Context);
205    llvm::Type *Int32PtrTy   = Int32Ty->getPointerTo();
206
207    /* Defined in frameworks/base/libs/rs/cpu_ref/rsCpuCore.h:
208     *
209     * struct RsForEachKernelStruct{
210     *   const void *in;
211     *   void *out;
212     *   uint32_t y;
213     *   uint32_t z;
214     *   uint32_t lid;
215     *   const void **ins;
216     *   uint32_t *inEStrides;
217     *   const void *usr;
218     *   uint32_t dimX;
219     *   uint32_t dimY;
220     *   uint32_t dimZ;
221     *   uint32_t slot;
222     * };
223     */
224    llvm::SmallVector<llvm::Type*, 12> StructTypes;
225    StructTypes.push_back(VoidPtrPtrTy); // const void **ins
226    StructTypes.push_back(Int32PtrTy);   // uint32_t *inEStrides
227    StructTypes.push_back(VoidPtrTy);    // void *out
228    StructTypes.push_back(Int32Ty);      // uint32_t y
229    StructTypes.push_back(Int32Ty);      // uint32_t z
230    StructTypes.push_back(Int32Ty);      // uint32_t lid
231    StructTypes.push_back(VoidPtrTy);    // const void *usr
232    StructTypes.push_back(Int32Ty);      // uint32_t dimX
233    StructTypes.push_back(Int32Ty);      // uint32_t dimY
234    StructTypes.push_back(Int32Ty);      // uint32_t dimZ
235    StructTypes.push_back(Int32Ty);      // uint32_t slot
236
237    ForEachStubType =
238      llvm::StructType::create(StructTypes, "RsForEachStubParamStruct");
239
240    // Create the function type for expanded kernels.
241
242    llvm::Type *ForEachStubPtrTy = ForEachStubType->getPointerTo();
243
244    llvm::SmallVector<llvm::Type*, 8> ParamTypes;
245    ParamTypes.push_back(ForEachStubPtrTy); // const RsForEachStubParamStruct *p
246    ParamTypes.push_back(Int32Ty);          // uint32_t x1
247    ParamTypes.push_back(Int32Ty);          // uint32_t x2
248    ParamTypes.push_back(Int32Ty);          // uint32_t outstep
249
250    ExpandedFunctionType =
251        llvm::FunctionType::get(llvm::Type::getVoidTy(*Context), ParamTypes,
252                                false);
253  }
254
255  /// @brief Create skeleton of the expanded function.
256  ///
257  /// This creates a function with the following signature:
258  ///
259  ///   void (const RsForEachStubParamStruct *p, uint32_t x1, uint32_t x2,
260  ///         uint32_t outstep)
261  ///
262  llvm::Function *createEmptyExpandedFunction(llvm::StringRef OldName) {
263    llvm::Function *ExpandedFunction =
264      llvm::Function::Create(ExpandedFunctionType,
265                             llvm::GlobalValue::ExternalLinkage,
266                             OldName + ".expand", Module);
267
268    bccAssert(ExpandedFunction->arg_size() == NUM_EXPANDED_FUNCTION_PARAMS);
269
270    llvm::Function::arg_iterator AI = ExpandedFunction->arg_begin();
271
272    (AI++)->setName("p");
273    (AI++)->setName("x1");
274    (AI++)->setName("x2");
275    (AI++)->setName("arg_outstep");
276
277    llvm::BasicBlock *Begin = llvm::BasicBlock::Create(*Context, "Begin",
278                                                       ExpandedFunction);
279    llvm::IRBuilder<> Builder(Begin);
280    Builder.CreateRetVoid();
281
282    return ExpandedFunction;
283  }
284
285  /// @brief Create an empty loop
286  ///
287  /// Create a loop of the form:
288  ///
289  /// for (i = LowerBound; i < UpperBound; i++)
290  ///   ;
291  ///
292  /// After the loop has been created, the builder is set such that
293  /// instructions can be added to the loop body.
294  ///
295  /// @param Builder The builder to use to build this loop. The current
296  ///                position of the builder is the position the loop
297  ///                will be inserted.
298  /// @param LowerBound The first value of the loop iterator
299  /// @param UpperBound The maximal value of the loop iterator
300  /// @param LoopIV A reference that will be set to the loop iterator.
301  /// @return The BasicBlock that will be executed after the loop.
302  llvm::BasicBlock *createLoop(llvm::IRBuilder<> &Builder,
303                               llvm::Value *LowerBound,
304                               llvm::Value *UpperBound,
305                               llvm::PHINode **LoopIV) {
306    assert(LowerBound->getType() == UpperBound->getType());
307
308    llvm::BasicBlock *CondBB, *AfterBB, *HeaderBB;
309    llvm::Value *Cond, *IVNext;
310    llvm::PHINode *IV;
311
312    CondBB = Builder.GetInsertBlock();
313    AfterBB = llvm::SplitBlock(CondBB, Builder.GetInsertPoint(), this);
314    HeaderBB = llvm::BasicBlock::Create(*Context, "Loop", CondBB->getParent());
315
316    // if (LowerBound < Upperbound)
317    //   goto LoopHeader
318    // else
319    //   goto AfterBB
320    CondBB->getTerminator()->eraseFromParent();
321    Builder.SetInsertPoint(CondBB);
322    Cond = Builder.CreateICmpULT(LowerBound, UpperBound);
323    Builder.CreateCondBr(Cond, HeaderBB, AfterBB);
324
325    // iv = PHI [CondBB -> LowerBound], [LoopHeader -> NextIV ]
326    // iv.next = iv + 1
327    // if (iv.next < Upperbound)
328    //   goto LoopHeader
329    // else
330    //   goto AfterBB
331    Builder.SetInsertPoint(HeaderBB);
332    IV = Builder.CreatePHI(LowerBound->getType(), 2, "X");
333    IV->addIncoming(LowerBound, CondBB);
334    IVNext = Builder.CreateNUWAdd(IV, Builder.getInt32(1));
335    IV->addIncoming(IVNext, HeaderBB);
336    Cond = Builder.CreateICmpULT(IVNext, UpperBound);
337    Builder.CreateCondBr(Cond, HeaderBB, AfterBB);
338    AfterBB->setName("Exit");
339    Builder.SetInsertPoint(HeaderBB->getFirstNonPHI());
340    *LoopIV = IV;
341    return AfterBB;
342  }
343
344public:
345  RSForEachExpandPass(bool pEnableStepOpt)
346      : ModulePass(ID), Module(nullptr), Context(nullptr),
347        mEnableStepOpt(pEnableStepOpt) {
348
349  }
350
351  /* Performs the actual optimization on a selected function. On success, the
352   * Module will contain a new function of the name "<NAME>.expand" that
353   * invokes <NAME>() in a loop with the appropriate parameters.
354   */
355  bool ExpandFunction(llvm::Function *Function, uint32_t Signature) {
356    ALOGV("Expanding ForEach-able Function %s",
357          Function->getName().str().c_str());
358
359    if (!Signature) {
360      Signature = getRootSignature(Function);
361      if (!Signature) {
362        // We couldn't determine how to expand this function based on its
363        // function signature.
364        return false;
365      }
366    }
367
368    llvm::DataLayout DL(Module);
369
370    llvm::Function *ExpandedFunction =
371      createEmptyExpandedFunction(Function->getName());
372
373    bccAssert(ExpandedFunction->arg_size() == NUM_EXPANDED_FUNCTION_PARAMS);
374
375    /*
376     * Extract the expanded function's parameters.  It is guaranteed by
377     * createEmptyExpandedFunction that there will be five parameters.
378     */
379    llvm::Function::arg_iterator ExpandedFunctionArgIter =
380      ExpandedFunction->arg_begin();
381
382    llvm::Value *Arg_p       = &*(ExpandedFunctionArgIter++);
383    llvm::Value *Arg_x1      = &*(ExpandedFunctionArgIter++);
384    llvm::Value *Arg_x2      = &*(ExpandedFunctionArgIter++);
385    llvm::Value *Arg_outstep = &*(ExpandedFunctionArgIter);
386
387    llvm::Value *InStep  = nullptr;
388    llvm::Value *OutStep = nullptr;
389
390    // Construct the actual function body.
391    llvm::IRBuilder<> Builder(ExpandedFunction->getEntryBlock().begin());
392
393    // Collect and construct the arguments for the kernel().
394    // Note that we load any loop-invariant arguments before entering the Loop.
395    llvm::Function::arg_iterator FunctionArgIter = Function->arg_begin();
396
397    llvm::Type  *InTy      = nullptr;
398    llvm::Value *InBasePtr = nullptr;
399    if (bcinfo::MetadataExtractor::hasForEachSignatureIn(Signature)) {
400      llvm::Value    *InsMember  = Builder.CreateStructGEP(Arg_p,
401                                                           PARAM_FIELD_INS);
402      llvm::LoadInst *InsBasePtr = Builder.CreateLoad(InsMember, "inputs_base");
403
404      llvm::Value *InStepsMember =
405        Builder.CreateStructGEP(Arg_p, PARAM_FIELD_INESTRIDES);
406      llvm::LoadInst *InStepsBase = Builder.CreateLoad(InStepsMember,
407                                                       "insteps_base");
408
409      llvm::Value *IndexVal = Builder.getInt32(0);
410
411      llvm::Value    *InStepAddr = Builder.CreateGEP(InStepsBase, IndexVal);
412      llvm::LoadInst *InStepArg  = Builder.CreateLoad(InStepAddr,
413                                                      "instep_addr");
414
415      InTy = (FunctionArgIter++)->getType();
416      InStep = getStepValue(&DL, InTy, InStepArg);
417
418      InStep->setName("instep");
419
420      llvm::Value *InputAddr = Builder.CreateGEP(InsBasePtr, IndexVal);
421      InBasePtr = Builder.CreateLoad(InputAddr, "input_base");
422    }
423
424    llvm::Type *OutTy = nullptr;
425    llvm::Value *OutBasePtr = nullptr;
426    if (bcinfo::MetadataExtractor::hasForEachSignatureOut(Signature)) {
427      OutTy = (FunctionArgIter++)->getType();
428      OutStep = getStepValue(&DL, OutTy, Arg_outstep);
429      OutStep->setName("outstep");
430      OutBasePtr = Builder.CreateLoad(
431                     Builder.CreateStructGEP(Arg_p, PARAM_FIELD_OUT));
432    }
433
434    llvm::Value *UsrData = nullptr;
435    if (bcinfo::MetadataExtractor::hasForEachSignatureUsrData(Signature)) {
436      llvm::Type *UsrDataTy = (FunctionArgIter++)->getType();
437      UsrData = Builder.CreatePointerCast(Builder.CreateLoad(
438          Builder.CreateStructGEP(Arg_p, PARAM_FIELD_USR)), UsrDataTy);
439      UsrData->setName("UsrData");
440    }
441
442    if (bcinfo::MetadataExtractor::hasForEachSignatureX(Signature)) {
443      FunctionArgIter++;
444    }
445
446    llvm::Value *Y = nullptr;
447    if (bcinfo::MetadataExtractor::hasForEachSignatureY(Signature)) {
448      Y = Builder.CreateLoad(
449            Builder.CreateStructGEP(Arg_p, PARAM_FIELD_Y), "Y");
450
451      FunctionArgIter++;
452    }
453
454    bccAssert(FunctionArgIter == Function->arg_end());
455
456    llvm::PHINode *IV;
457    createLoop(Builder, Arg_x1, Arg_x2, &IV);
458
459    // Populate the actual call to kernel().
460    llvm::SmallVector<llvm::Value*, 8> RootArgs;
461
462    llvm::Value *InPtr  = nullptr;
463    llvm::Value *OutPtr = nullptr;
464
465    // Calculate the current input and output pointers
466    //
467    // We always calculate the input/output pointers with a GEP operating on i8
468    // values and only cast at the very end to OutTy. This is because the step
469    // between two values is given in bytes.
470    //
471    // TODO: We could further optimize the output by using a GEP operation of
472    // type 'OutTy' in cases where the element type of the allocation allows.
473    if (OutBasePtr) {
474      llvm::Value *OutOffset = Builder.CreateSub(IV, Arg_x1);
475      OutOffset = Builder.CreateMul(OutOffset, OutStep);
476      OutPtr = Builder.CreateGEP(OutBasePtr, OutOffset);
477      OutPtr = Builder.CreatePointerCast(OutPtr, OutTy);
478    }
479
480    if (InBasePtr) {
481      llvm::Value *InOffset = Builder.CreateSub(IV, Arg_x1);
482      InOffset = Builder.CreateMul(InOffset, InStep);
483      InPtr = Builder.CreateGEP(InBasePtr, InOffset);
484      InPtr = Builder.CreatePointerCast(InPtr, InTy);
485    }
486
487    if (InPtr) {
488      RootArgs.push_back(InPtr);
489    }
490
491    if (OutPtr) {
492      RootArgs.push_back(OutPtr);
493    }
494
495    if (UsrData) {
496      RootArgs.push_back(UsrData);
497    }
498
499    llvm::Value *X = IV;
500    if (bcinfo::MetadataExtractor::hasForEachSignatureX(Signature)) {
501      RootArgs.push_back(X);
502    }
503
504    if (Y) {
505      RootArgs.push_back(Y);
506    }
507
508    Builder.CreateCall(Function, RootArgs);
509
510    return true;
511  }
512
513  /* Expand a pass-by-value kernel.
514   */
515  bool ExpandKernel(llvm::Function *Function, uint32_t Signature) {
516    bccAssert(bcinfo::MetadataExtractor::hasForEachSignatureKernel(Signature));
517    ALOGV("Expanding kernel Function %s", Function->getName().str().c_str());
518
519    // TODO: Refactor this to share functionality with ExpandFunction.
520    llvm::DataLayout DL(Module);
521
522    llvm::Function *ExpandedFunction =
523      createEmptyExpandedFunction(Function->getName());
524
525    /*
526     * Extract the expanded function's parameters.  It is guaranteed by
527     * createEmptyExpandedFunction that there will be five parameters.
528     */
529
530    bccAssert(ExpandedFunction->arg_size() == NUM_EXPANDED_FUNCTION_PARAMS);
531
532    llvm::Function::arg_iterator ExpandedFunctionArgIter =
533      ExpandedFunction->arg_begin();
534
535    llvm::Value *Arg_p       = &*(ExpandedFunctionArgIter++);
536    llvm::Value *Arg_x1      = &*(ExpandedFunctionArgIter++);
537    llvm::Value *Arg_x2      = &*(ExpandedFunctionArgIter++);
538    llvm::Value *Arg_outstep = &*(ExpandedFunctionArgIter);
539
540    // Construct the actual function body.
541    llvm::IRBuilder<> Builder(ExpandedFunction->getEntryBlock().begin());
542
543    // Create TBAA meta-data.
544    llvm::MDNode *TBAARenderScript, *TBAAAllocation, *TBAAPointer;
545    llvm::MDBuilder MDHelper(*Context);
546
547    TBAARenderScript = MDHelper.createTBAARoot("RenderScript TBAA");
548    TBAAAllocation = MDHelper.createTBAAScalarTypeNode("allocation",
549                                                       TBAARenderScript);
550    TBAAAllocation = MDHelper.createTBAAStructTagNode(TBAAAllocation,
551                                                      TBAAAllocation, 0);
552    TBAAPointer = MDHelper.createTBAAScalarTypeNode("pointer",
553                                                    TBAARenderScript);
554    TBAAPointer = MDHelper.createTBAAStructTagNode(TBAAPointer, TBAAPointer, 0);
555
556    /*
557     * Collect and construct the arguments for the kernel().
558     *
559     * Note that we load any loop-invariant arguments before entering the Loop.
560     */
561    size_t NumInputs = Function->arg_size();
562
563    llvm::Value *Y = nullptr;
564    if (bcinfo::MetadataExtractor::hasForEachSignatureY(Signature)) {
565      Y = Builder.CreateLoad(
566            Builder.CreateStructGEP(Arg_p, PARAM_FIELD_Y), "Y");
567
568      --NumInputs;
569    }
570
571    if (bcinfo::MetadataExtractor::hasForEachSignatureX(Signature)) {
572      --NumInputs;
573    }
574
575    // No usrData parameter on kernels.
576    bccAssert(
577        !bcinfo::MetadataExtractor::hasForEachSignatureUsrData(Signature));
578
579    llvm::Function::arg_iterator ArgIter = Function->arg_begin();
580
581    // Check the return type
582    llvm::Type     *OutTy            = nullptr;
583    llvm::Value    *OutStep          = nullptr;
584    llvm::LoadInst *OutBasePtr       = nullptr;
585    llvm::Value    *CastedOutBasePtr = nullptr;
586
587    bool PassOutByPointer = false;
588
589    if (bcinfo::MetadataExtractor::hasForEachSignatureOut(Signature)) {
590      llvm::Type *OutBaseTy = Function->getReturnType();
591
592      if (OutBaseTy->isVoidTy()) {
593        PassOutByPointer = true;
594        OutTy = ArgIter->getType();
595
596        ArgIter++;
597        --NumInputs;
598      } else {
599        // We don't increment Args, since we are using the actual return type.
600        OutTy = OutBaseTy->getPointerTo();
601      }
602
603      OutStep = getStepValue(&DL, OutTy, Arg_outstep);
604      OutStep->setName("outstep");
605      OutBasePtr = Builder.CreateLoad(
606                     Builder.CreateStructGEP(Arg_p, PARAM_FIELD_OUT));
607
608      if (gEnableRsTbaa) {
609        OutBasePtr->setMetadata("tbaa", TBAAPointer);
610      }
611      CastedOutBasePtr = Builder.CreatePointerCast(OutBasePtr, OutTy, "casted_out");
612    }
613
614    llvm::SmallVector<llvm::Type*,  8> InTypes;
615    llvm::SmallVector<llvm::Value*, 8> InSteps;
616    llvm::SmallVector<llvm::Value*, 8> InBasePtrs;
617    llvm::SmallVector<bool,         8> InIsStructPointer;
618
619    if (NumInputs > 0) {
620      llvm::Value *InsMember = Builder.CreateStructGEP(Arg_p, PARAM_FIELD_INS);
621      llvm::LoadInst *InsBasePtr = Builder.CreateLoad(InsMember, "inputs_base");
622
623      llvm::Value *InStepsMember =
624        Builder.CreateStructGEP(Arg_p, PARAM_FIELD_INESTRIDES);
625      llvm::LoadInst *InStepsBase = Builder.CreateLoad(InStepsMember,
626                                                         "insteps_base");
627
628      for (size_t InputIndex = 0; InputIndex < NumInputs;
629           ++InputIndex, ArgIter++) {
630
631          llvm::Value *IndexVal = Builder.getInt32(InputIndex);
632
633          llvm::Value    *InStepAddr = Builder.CreateGEP(InStepsBase, IndexVal);
634          llvm::LoadInst *InStepArg  = Builder.CreateLoad(InStepAddr,
635                                                          "instep_addr");
636
637          llvm::Type *InType = ArgIter->getType();
638
639        /*
640         * AArch64 calling dictate that structs of sufficient size get passed by
641         * pointer instead of passed by value.  This, combined with the fact
642         * that we don't allow kernels to operate on pointer data means that if
643         * we see a kernel with a pointer parameter we know that it is struct
644         * input that has been promoted.  As such we don't need to convert its
645         * type to a pointer.  Later we will need to know to avoid a load, so we
646         * save this information in InIsStructPointer.
647         */
648          if (!InType->isPointerTy()) {
649            InType = InType->getPointerTo();
650            InIsStructPointer.push_back(false);
651          } else {
652            InIsStructPointer.push_back(true);
653          }
654
655          llvm::Value *InStep = getStepValue(&DL, InType, InStepArg);
656
657          InStep->setName("instep");
658
659          llvm::Value    *InputAddr = Builder.CreateGEP(InsBasePtr, IndexVal);
660          llvm::LoadInst *InBasePtr = Builder.CreateLoad(InputAddr,
661                                                         "input_base");
662          llvm::Value    *CastInBasePtr = Builder.CreatePointerCast(InBasePtr,
663                                                                    InType, "casted_in");
664          if (gEnableRsTbaa) {
665            InBasePtr->setMetadata("tbaa", TBAAPointer);
666          }
667
668          InTypes.push_back(InType);
669          InSteps.push_back(InStep);
670          InBasePtrs.push_back(CastInBasePtr);
671      }
672    }
673
674    llvm::PHINode *IV;
675    createLoop(Builder, Arg_x1, Arg_x2, &IV);
676
677    // Populate the actual call to kernel().
678    llvm::SmallVector<llvm::Value*, 8> RootArgs;
679
680    // Calculate the current input and output pointers
681    //
682    //
683    // We always calculate the input/output pointers with a GEP operating on i8
684    // values combined with a multiplication and only cast at the very end to
685    // OutTy.  This is to account for dynamic stepping sizes when the value
686    // isn't apparent at compile time.  In the (very common) case when we know
687    // the step size at compile time, due to haveing complete type information
688    // this multiplication will optmized out and produces code equivalent to a
689    // a GEP on a pointer of the correct type.
690
691    // Output
692
693    llvm::Value *OutPtr = nullptr;
694    if (CastedOutBasePtr) {
695      llvm::Value *OutOffset = Builder.CreateSub(IV, Arg_x1);
696
697      OutPtr    = Builder.CreateGEP(CastedOutBasePtr, OutOffset);
698
699      if (PassOutByPointer) {
700        RootArgs.push_back(OutPtr);
701      }
702    }
703
704    // Inputs
705
706    if (NumInputs > 0) {
707      llvm::Value *Offset = Builder.CreateSub(IV, Arg_x1);
708
709      for (size_t Index = 0; Index < NumInputs; ++Index) {
710        llvm::Value *InPtr    = Builder.CreateGEP(InBasePtrs[Index], Offset);
711        llvm::Value *Input;
712
713        if (InIsStructPointer[Index]) {
714          Input = InPtr;
715
716        } else {
717          llvm::LoadInst *InputLoad = Builder.CreateLoad(InPtr, "input");
718
719          if (gEnableRsTbaa) {
720            InputLoad->setMetadata("tbaa", TBAAAllocation);
721          }
722
723          Input = InputLoad;
724        }
725
726        RootArgs.push_back(Input);
727      }
728    }
729
730    llvm::Value *X = IV;
731    if (bcinfo::MetadataExtractor::hasForEachSignatureX(Signature)) {
732      RootArgs.push_back(X);
733    }
734
735    if (Y) {
736      RootArgs.push_back(Y);
737    }
738
739    llvm::Value *RetVal = Builder.CreateCall(Function, RootArgs);
740
741    if (OutPtr && !PassOutByPointer) {
742      llvm::StoreInst *Store = Builder.CreateStore(RetVal, OutPtr);
743      if (gEnableRsTbaa) {
744        Store->setMetadata("tbaa", TBAAAllocation);
745      }
746    }
747
748    return true;
749  }
750
751  /// @brief Checks if pointers to allocation internals are exposed
752  ///
753  /// This function verifies if through the parameters passed to the kernel
754  /// or through calls to the runtime library the script gains access to
755  /// pointers pointing to data within a RenderScript Allocation.
756  /// If we know we control all loads from and stores to data within
757  /// RenderScript allocations and if we know the run-time internal accesses
758  /// are all annotated with RenderScript TBAA metadata, only then we
759  /// can safely use TBAA to distinguish between generic and from-allocation
760  /// pointers.
761  bool allocPointersExposed(llvm::Module &Module) {
762    // Old style kernel function can expose pointers to elements within
763    // allocations.
764    // TODO: Extend analysis to allow simple cases of old-style kernels.
765    for (size_t i = 0; i < mExportForEachCount; ++i) {
766      const char *Name = mExportForEachNameList[i];
767      uint32_t Signature = mExportForEachSignatureList[i];
768      if (Module.getFunction(Name) &&
769          !bcinfo::MetadataExtractor::hasForEachSignatureKernel(Signature)) {
770        return true;
771      }
772    }
773
774    // Check for library functions that expose a pointer to an Allocation or
775    // that are not yet annotated with RenderScript-specific tbaa information.
776    static std::vector<std::string> Funcs;
777
778    // rsGetElementAt(...)
779    Funcs.push_back("_Z14rsGetElementAt13rs_allocationj");
780    Funcs.push_back("_Z14rsGetElementAt13rs_allocationjj");
781    Funcs.push_back("_Z14rsGetElementAt13rs_allocationjjj");
782    // rsSetElementAt()
783    Funcs.push_back("_Z14rsSetElementAt13rs_allocationPvj");
784    Funcs.push_back("_Z14rsSetElementAt13rs_allocationPvjj");
785    Funcs.push_back("_Z14rsSetElementAt13rs_allocationPvjjj");
786    // rsGetElementAtYuv_uchar_Y()
787    Funcs.push_back("_Z25rsGetElementAtYuv_uchar_Y13rs_allocationjj");
788    // rsGetElementAtYuv_uchar_U()
789    Funcs.push_back("_Z25rsGetElementAtYuv_uchar_U13rs_allocationjj");
790    // rsGetElementAtYuv_uchar_V()
791    Funcs.push_back("_Z25rsGetElementAtYuv_uchar_V13rs_allocationjj");
792
793    for (std::vector<std::string>::iterator FI = Funcs.begin(),
794                                            FE = Funcs.end();
795         FI != FE; ++FI) {
796      llvm::Function *Function = Module.getFunction(*FI);
797
798      if (!Function) {
799        ALOGE("Missing run-time function '%s'", FI->c_str());
800        return true;
801      }
802
803      if (Function->getNumUses() > 0) {
804        return true;
805      }
806    }
807
808    return false;
809  }
810
811  /// @brief Connect RenderScript TBAA metadata to C/C++ metadata
812  ///
813  /// The TBAA metadata used to annotate loads/stores from RenderScript
814  /// Allocations is generated in a separate TBAA tree with a
815  /// "RenderScript TBAA" root node. LLVM does assume may-alias for all nodes in
816  /// unrelated alias analysis trees. This function makes the RenderScript TBAA
817  /// a subtree of the normal C/C++ TBAA tree aside of normal C/C++ types. With
818  /// the connected trees every access to an Allocation is resolved to
819  /// must-alias if compared to a normal C/C++ access.
820  void connectRenderScriptTBAAMetadata(llvm::Module &Module) {
821    llvm::MDBuilder MDHelper(*Context);
822    llvm::MDNode *TBAARenderScript =
823      MDHelper.createTBAARoot("RenderScript TBAA");
824
825    llvm::MDNode *TBAARoot     = MDHelper.createTBAARoot("Simple C/C++ TBAA");
826    llvm::MDNode *TBAAMergedRS = MDHelper.createTBAANode("RenderScript",
827                                                         TBAARoot);
828
829    TBAARenderScript->replaceAllUsesWith(TBAAMergedRS);
830  }
831
832  virtual bool runOnModule(llvm::Module &Module) {
833    bool Changed  = false;
834    this->Module  = &Module;
835    this->Context = &Module.getContext();
836
837    this->buildTypes();
838
839    bcinfo::MetadataExtractor me(&Module);
840    if (!me.extract()) {
841      ALOGE("Could not extract metadata from module!");
842      return false;
843    }
844    mExportForEachCount = me.getExportForEachSignatureCount();
845    mExportForEachNameList = me.getExportForEachNameList();
846    mExportForEachSignatureList = me.getExportForEachSignatureList();
847
848    bool AllocsExposed = allocPointersExposed(Module);
849
850    for (size_t i = 0; i < mExportForEachCount; ++i) {
851      const char *name = mExportForEachNameList[i];
852      uint32_t signature = mExportForEachSignatureList[i];
853      llvm::Function *kernel = Module.getFunction(name);
854      if (kernel) {
855        if (bcinfo::MetadataExtractor::hasForEachSignatureKernel(signature)) {
856          Changed |= ExpandKernel(kernel, signature);
857          kernel->setLinkage(llvm::GlobalValue::InternalLinkage);
858        } else if (kernel->getReturnType()->isVoidTy()) {
859          Changed |= ExpandFunction(kernel, signature);
860          kernel->setLinkage(llvm::GlobalValue::InternalLinkage);
861        } else {
862          // There are some graphics root functions that are not
863          // expanded, but that will be called directly. For those
864          // functions, we can not set the linkage to internal.
865        }
866      }
867    }
868
869    if (gEnableRsTbaa && !AllocsExposed) {
870      connectRenderScriptTBAAMetadata(Module);
871    }
872
873    return Changed;
874  }
875
876  virtual const char *getPassName() const {
877    return "ForEach-able Function Expansion";
878  }
879
880}; // end RSForEachExpandPass
881
882} // end anonymous namespace
883
884char RSForEachExpandPass::ID = 0;
885
886namespace bcc {
887
888llvm::ModulePass *
889createRSForEachExpandPass(bool pEnableStepOpt){
890  return new RSForEachExpandPass(pEnableStepOpt);
891}
892
893} // end namespace bcc
894