rsCpuScriptGroup2.cpp revision 8237638f87ca0e265d050fbb13725b41a795fe5f
1#include "rsCpuScriptGroup2.h" 2 3#include <dlfcn.h> 4#include <stdio.h> 5#include <stdlib.h> 6#include <unistd.h> 7 8#include <set> 9#include <sstream> 10#include <string> 11#include <vector> 12 13#ifndef RS_COMPATIBILITY_LIB 14#include "bcc/Config/Config.h" 15#endif 16 17#include "cpu_ref/rsCpuCore.h" 18#include "rsClosure.h" 19#include "rsContext.h" 20#include "rsCpuCore.h" 21#include "rsCpuExecutable.h" 22#include "rsCpuScript.h" 23#include "rsScript.h" 24#include "rsScriptGroup2.h" 25#include "rsScriptIntrinsic.h" 26 27using std::string; 28using std::vector; 29 30namespace android { 31namespace renderscript { 32 33namespace { 34 35const size_t DefaultKernelArgCount = 2; 36 37void groupRoot(const RsExpandKernelDriverInfo *kinfo, uint32_t xstart, 38 uint32_t xend, uint32_t outstep) { 39 const List<CPUClosure*>& closures = *(List<CPUClosure*>*)kinfo->usr; 40 RsExpandKernelDriverInfo *mutable_kinfo = const_cast<RsExpandKernelDriverInfo *>(kinfo); 41 42 const size_t oldInLen = mutable_kinfo->inLen; 43 44 decltype(mutable_kinfo->inStride) oldInStride; 45 memcpy(&oldInStride, &mutable_kinfo->inStride, sizeof(oldInStride)); 46 47 for (CPUClosure* cpuClosure : closures) { 48 const Closure* closure = cpuClosure->mClosure; 49 50 // There had better be enough space in mutable_kinfo 51 rsAssert(closure->mNumArg <= RS_KERNEL_INPUT_LIMIT); 52 53 for (size_t i = 0; i < closure->mNumArg; i++) { 54 const void* arg = closure->mArgs[i]; 55 const Allocation* a = (const Allocation*)arg; 56 const uint32_t eStride = a->mHal.state.elementSizeBytes; 57 const uint8_t* ptr = (uint8_t*)(a->mHal.drvState.lod[0].mallocPtr) + 58 eStride * xstart; 59 if (kinfo->dim.y > 1) { 60 ptr += a->mHal.drvState.lod[0].stride * kinfo->current.y; 61 } 62 mutable_kinfo->inPtr[i] = ptr; 63 mutable_kinfo->inStride[i] = eStride; 64 } 65 mutable_kinfo->inLen = closure->mNumArg; 66 67 const Allocation* out = closure->mReturnValue; 68 const uint32_t ostep = out->mHal.state.elementSizeBytes; 69 const uint8_t* ptr = (uint8_t *)(out->mHal.drvState.lod[0].mallocPtr) + 70 ostep * xstart; 71 if (kinfo->dim.y > 1) { 72 ptr += out->mHal.drvState.lod[0].stride * kinfo->current.y; 73 } 74 75 rsAssert(kinfo->outLen <= 1); 76 mutable_kinfo->outPtr[0] = const_cast<uint8_t*>(ptr); 77 78 cpuClosure->mFunc(kinfo, xstart, xend, ostep); 79 } 80 81 mutable_kinfo->inLen = oldInLen; 82 memcpy(&mutable_kinfo->inStride, &oldInStride, sizeof(oldInStride)); 83} 84 85} // namespace 86 87Batch::Batch(CpuScriptGroup2Impl* group, const char* name) : 88 mGroup(group), mFunc(nullptr) { 89 mName = strndup(name, strlen(name)); 90} 91 92Batch::~Batch() { 93 for (CPUClosure* c : mClosures) { 94 delete c; 95 } 96 free(mName); 97} 98 99bool Batch::conflict(CPUClosure* cpuClosure) const { 100 if (mClosures.empty()) { 101 return false; 102 } 103 104 const Closure* closure = cpuClosure->mClosure; 105 106 if (!closure->mIsKernel || !mClosures.front()->mClosure->mIsKernel) { 107 // An invoke should be in a batch by itself, so it conflicts with any other 108 // closure. 109 return true; 110 } 111 112 const auto& globalDeps = closure->mGlobalDeps; 113 const auto& argDeps = closure->mArgDeps; 114 115 for (CPUClosure* c : mClosures) { 116 const Closure* batched = c->mClosure; 117 if (globalDeps.find(batched) != globalDeps.end()) { 118 return true; 119 } 120 const auto& it = argDeps.find(batched); 121 if (it != argDeps.end()) { 122 const auto& args = (*it).second; 123 for (const auto &p1 : *args) { 124 if (p1.second.get() != nullptr) { 125 return true; 126 } 127 } 128 } 129 } 130 131 return false; 132} 133 134CpuScriptGroup2Impl::CpuScriptGroup2Impl(RsdCpuReferenceImpl *cpuRefImpl, 135 const ScriptGroupBase *sg) : 136 mCpuRefImpl(cpuRefImpl), mGroup((const ScriptGroup2*)(sg)), 137 mExecutable(nullptr), mScriptObj(nullptr) { 138 rsAssert(!mGroup->mClosures.empty()); 139 140 Batch* batch = new Batch(this, "Batch0"); 141 int i = 0; 142 for (Closure* closure: mGroup->mClosures) { 143 CPUClosure* cc; 144 const IDBase* funcID = closure->mFunctionID.get(); 145 RsdCpuScriptImpl* si = 146 (RsdCpuScriptImpl *)mCpuRefImpl->lookupScript(funcID->mScript); 147 if (closure->mIsKernel) { 148 MTLaunchStruct mtls; 149 si->forEachKernelSetup(funcID->mSlot, &mtls); 150 cc = new CPUClosure(closure, si, (ExpandFuncTy)mtls.kernel); 151 } else { 152 cc = new CPUClosure(closure, si); 153 } 154 155 if (batch->conflict(cc)) { 156 mBatches.push_back(batch); 157 std::stringstream ss; 158 ss << "Batch" << ++i; 159 batch = new Batch(this, ss.str().c_str()); 160 } 161 162 batch->mClosures.push_back(cc); 163 } 164 165 rsAssert(!batch->mClosures.empty()); 166 mBatches.push_back(batch); 167 168#ifndef RS_COMPATIBILITY_LIB 169 compile(mGroup->mCacheDir); 170 if (mScriptObj != nullptr && mExecutable != nullptr) { 171 for (Batch* batch : mBatches) { 172 batch->resolveFuncPtr(mScriptObj); 173 } 174 } 175#endif // RS_COMPATIBILITY_LIB 176} 177 178void Batch::resolveFuncPtr(void* sharedObj) { 179 std::string funcName(mName); 180 if (mClosures.front()->mClosure->mIsKernel) { 181 funcName.append(".expand"); 182 } 183 mFunc = dlsym(sharedObj, funcName.c_str()); 184 rsAssert (mFunc != nullptr); 185} 186 187CpuScriptGroup2Impl::~CpuScriptGroup2Impl() { 188 for (Batch* batch : mBatches) { 189 delete batch; 190 } 191 delete mExecutable; 192 // TODO: move this dlclose into ~ScriptExecutable(). 193 if (mScriptObj != nullptr) { 194 dlclose(mScriptObj); 195 } 196} 197 198namespace { 199 200#ifndef RS_COMPATIBILITY_LIB 201 202string getCoreLibPath(Context* context, string* coreLibRelaxedPath) { 203 *coreLibRelaxedPath = ""; 204 205 // If we're debugging, use the debug library. 206 if (context->getContextType() == RS_CONTEXT_TYPE_DEBUG) { 207 return SYSLIBPATH"/libclcore_debug.bc"; 208 } 209 210 // Check for a platform specific library 211 212#if defined(ARCH_ARM_HAVE_NEON) && !defined(DISABLE_CLCORE_NEON) 213 // NEON-capable ARMv7a devices can use an accelerated math library 214 // for all reduced precision scripts. 215 // ARMv8 does not use NEON, as ASIMD can be used with all precision 216 // levels. 217 *coreLibRelaxedPath = SYSLIBPATH"/libclcore_neon.bc"; 218#endif 219 220#if defined(__i386__) || defined(__x86_64__) 221 // x86 devices will use an optimized library. 222 return SYSLIBPATH"/libclcore_x86.bc"; 223#else 224 return SYSLIBPATH"/libclcore.bc"; 225#endif 226} 227 228void setupCompileArguments( 229 const vector<const char*>& inputs, const vector<string>& kernelBatches, 230 const vector<string>& invokeBatches, 231 const char* outputDir, const char* outputFileName, 232 const char* coreLibPath, const char* coreLibRelaxedPath, 233 const bool emitGlobalInfo, const bool emitGlobalInfoSkipConstant, 234 vector<const char*>* args) { 235 args->push_back(RsdCpuScriptImpl::BCC_EXE_PATH); 236 args->push_back("-fPIC"); 237 args->push_back("-embedRSInfo"); 238 if (emitGlobalInfo) { 239 args->push_back("-rs-global-info"); 240 if (emitGlobalInfoSkipConstant) { 241 args->push_back("-rs-global-info-skip-constant"); 242 } 243 } 244 args->push_back("-mtriple"); 245 args->push_back(DEFAULT_TARGET_TRIPLE_STRING); 246 args->push_back("-bclib"); 247 args->push_back(coreLibPath); 248 args->push_back("-bclib_relaxed"); 249 args->push_back(coreLibRelaxedPath); 250 for (const char* input : inputs) { 251 args->push_back(input); 252 } 253 for (const string& batch : kernelBatches) { 254 args->push_back("-merge"); 255 args->push_back(batch.c_str()); 256 } 257 for (const string& batch : invokeBatches) { 258 args->push_back("-invoke"); 259 args->push_back(batch.c_str()); 260 } 261 args->push_back("-output_path"); 262 args->push_back(outputDir); 263 args->push_back("-o"); 264 args->push_back(outputFileName); 265} 266 267void generateSourceSlot(const Closure& closure, 268 const std::vector<const char*>& inputs, 269 std::stringstream& ss) { 270 const IDBase* funcID = (const IDBase*)closure.mFunctionID.get(); 271 const Script* script = funcID->mScript; 272 273 rsAssert (!script->isIntrinsic()); 274 275 const RsdCpuScriptImpl *cpuScript = 276 (const RsdCpuScriptImpl*)script->mHal.drv; 277 const string& bitcodeFilename = cpuScript->getBitcodeFilePath(); 278 279 const int index = find(inputs.begin(), inputs.end(), bitcodeFilename) - 280 inputs.begin(); 281 282 ss << index << "," << funcID->mSlot << "."; 283} 284 285#endif // RS_COMPATIBILTY_LIB 286 287} // anonymous namespace 288 289void CpuScriptGroup2Impl::compile(const char* cacheDir) { 290#ifndef RS_COMPATIBILITY_LIB 291 if (mGroup->mClosures.size() < 2) { 292 return; 293 } 294 295 auto comparator = [](const char* str1, const char* str2) -> bool { 296 return strcmp(str1, str2) < 0; 297 }; 298 std::set<const char*, decltype(comparator)> inputSet(comparator); 299 300 for (Closure* closure : mGroup->mClosures) { 301 const Script* script = closure->mFunctionID.get()->mScript; 302 303 // If any script is an intrinsic, give up trying fusing the kernels. 304 if (script->isIntrinsic()) { 305 return; 306 } 307 308 const RsdCpuScriptImpl *cpuScript = 309 (const RsdCpuScriptImpl*)script->mHal.drv; 310 const char* bitcodeFilename = cpuScript->getBitcodeFilePath(); 311 inputSet.insert(bitcodeFilename); 312 } 313 314 std::vector<const char*> inputs(inputSet.begin(), inputSet.end()); 315 316 std::vector<string> kernelBatches; 317 std::vector<string> invokeBatches; 318 319 int i = 0; 320 for (const auto& batch : mBatches) { 321 rsAssert(batch->size() > 0); 322 323 std::stringstream ss; 324 ss << batch->mName << ":"; 325 326 if (!batch->mClosures.front()->mClosure->mIsKernel) { 327 rsAssert(batch->size() == 1); 328 generateSourceSlot(*batch->mClosures.front()->mClosure, inputs, ss); 329 invokeBatches.push_back(ss.str()); 330 } else { 331 for (const auto& cpuClosure : batch->mClosures) { 332 generateSourceSlot(*cpuClosure->mClosure, inputs, ss); 333 } 334 kernelBatches.push_back(ss.str()); 335 } 336 } 337 338 rsAssert(cacheDir != nullptr); 339 string objFilePath(cacheDir); 340 objFilePath.append("/"); 341 objFilePath.append(mGroup->mName); 342 objFilePath.append(".o"); 343 344 const char* resName = mGroup->mName; 345 string coreLibRelaxedPath; 346 const string& coreLibPath = getCoreLibPath(getCpuRefImpl()->getContext(), 347 &coreLibRelaxedPath); 348 349 vector<const char*> arguments; 350 bool emitGlobalInfo = getCpuRefImpl()->getEmbedGlobalInfo(); 351 bool emitGlobalInfoSkipConstant = getCpuRefImpl()->getEmbedGlobalInfoSkipConstant(); 352 setupCompileArguments(inputs, kernelBatches, invokeBatches, cacheDir, 353 resName, coreLibPath.c_str(), coreLibRelaxedPath.c_str(), 354 emitGlobalInfo, emitGlobalInfoSkipConstant, 355 &arguments); 356 357 std::unique_ptr<const char> cmdLine(rsuJoinStrings(arguments.size() - 1, 358 arguments.data())); 359 360 inputs.push_back(coreLibPath.c_str()); 361 inputs.push_back(coreLibRelaxedPath.c_str()); 362 363 uint32_t checksum = constructBuildChecksum(nullptr, 0, cmdLine.get(), 364 inputs.data(), inputs.size()); 365 366 if (checksum == 0) { 367 return; 368 } 369 370 std::stringstream ss; 371 ss << std::hex << checksum; 372 const char* checksumStr = ss.str().c_str(); 373 374 //===--------------------------------------------------------------------===// 375 // Try to load a shared lib from code cache matching filename and checksum 376 //===--------------------------------------------------------------------===// 377 378 mScriptObj = SharedLibraryUtils::loadSharedLibrary(cacheDir, resName); 379 if (mScriptObj != nullptr) { 380 mExecutable = ScriptExecutable::createFromSharedObject( 381 getCpuRefImpl()->getContext(), mScriptObj, checksum); 382 if (mExecutable != nullptr) { 383 return; 384 } else { 385 ALOGE("Failed to create an executable object from so file"); 386 } 387 dlclose(mScriptObj); 388 mScriptObj = nullptr; 389 } 390 391 //===--------------------------------------------------------------------===// 392 // Fuse the input kernels and generate native code in an object file 393 //===--------------------------------------------------------------------===// 394 395 arguments.push_back("-build-checksum"); 396 arguments.push_back(checksumStr); 397 arguments.push_back(nullptr); 398 399 bool compiled = rsuExecuteCommand(RsdCpuScriptImpl::BCC_EXE_PATH, 400 arguments.size()-1, 401 arguments.data()); 402 if (!compiled) { 403 return; 404 } 405 406 //===--------------------------------------------------------------------===// 407 // Create and load the shared lib 408 //===--------------------------------------------------------------------===// 409 410 if (!SharedLibraryUtils::createSharedLibrary( 411 getCpuRefImpl()->getContext()->getDriverName(), cacheDir, resName)) { 412 ALOGE("Failed to link object file '%s'", resName); 413 unlink(objFilePath.c_str()); 414 return; 415 } 416 417 unlink(objFilePath.c_str()); 418 419 mScriptObj = SharedLibraryUtils::loadSharedLibrary(cacheDir, resName); 420 if (mScriptObj == nullptr) { 421 ALOGE("Unable to load '%s'", resName); 422 return; 423 } 424 425 mExecutable = ScriptExecutable::createFromSharedObject( 426 getCpuRefImpl()->getContext(), 427 mScriptObj); 428 429#endif // RS_COMPATIBILITY_LIB 430} 431 432void CpuScriptGroup2Impl::execute() { 433 for (auto batch : mBatches) { 434 batch->setGlobalsForBatch(); 435 batch->run(); 436 } 437} 438 439void Batch::setGlobalsForBatch() { 440 for (CPUClosure* cpuClosure : mClosures) { 441 const Closure* closure = cpuClosure->mClosure; 442 const IDBase* funcID = closure->mFunctionID.get(); 443 Script* s = funcID->mScript;; 444 for (const auto& p : closure->mGlobals) { 445 const void* value = p.second.first; 446 int size = p.second.second; 447 if (value == nullptr && size == 0) { 448 // This indicates the current closure depends on another closure for a 449 // global in their shared module (script). In this case we don't need to 450 // copy the value. For example, an invoke intializes a global variable 451 // which a kernel later reads. 452 continue; 453 } 454 rsAssert(p.first != nullptr); 455 Script* script = p.first->mScript; 456 const RsdCpuScriptImpl *cpuScript = 457 (const RsdCpuScriptImpl*)script->mHal.drv; 458 int slot = p.first->mSlot; 459 ScriptExecutable* exec = mGroup->getExecutable(); 460 if (exec != nullptr) { 461 const char* varName = cpuScript->getFieldName(slot); 462 void* addr = exec->getFieldAddress(varName); 463 if (size < 0) { 464 rsrSetObject(mGroup->getCpuRefImpl()->getContext(), 465 (rs_object_base*)addr, (ObjectBase*)value); 466 } else { 467 memcpy(addr, (const void*)&value, size); 468 } 469 } else { 470 // We use -1 size to indicate an ObjectBase rather than a primitive type 471 if (size < 0) { 472 s->setVarObj(slot, (ObjectBase*)value); 473 } else { 474 s->setVar(slot, (const void*)&value, size); 475 } 476 } 477 } 478 } 479} 480 481void Batch::run() { 482 if (!mClosures.front()->mClosure->mIsKernel) { 483 rsAssert(mClosures.size() == 1); 484 485 // This batch contains a single closure for an invoke function 486 CPUClosure* cc = mClosures.front(); 487 const Closure* c = cc->mClosure; 488 489 if (mFunc != nullptr) { 490 // TODO: Need align pointers for x86_64. 491 // See RsdCpuScriptImpl::invokeFunction in rsCpuScript.cpp 492 ((InvokeFuncTy)mFunc)(c->mParams, c->mParamLength); 493 } else { 494 const ScriptInvokeID* invokeID = (const ScriptInvokeID*)c->mFunctionID.get(); 495 rsAssert(invokeID != nullptr); 496 cc->mSi->invokeFunction(invokeID->mSlot, c->mParams, c->mParamLength); 497 } 498 499 return; 500 } 501 502 if (mFunc != nullptr) { 503 MTLaunchStruct mtls; 504 const CPUClosure* firstCpuClosure = mClosures.front(); 505 const CPUClosure* lastCpuClosure = mClosures.back(); 506 507 firstCpuClosure->mSi->forEachMtlsSetup( 508 (const Allocation**)firstCpuClosure->mClosure->mArgs, 509 firstCpuClosure->mClosure->mNumArg, 510 lastCpuClosure->mClosure->mReturnValue, 511 nullptr, 0, nullptr, &mtls); 512 513 mtls.script = nullptr; 514 mtls.fep.usr = nullptr; 515 mtls.kernel = (ForEachFunc_t)mFunc; 516 517 mGroup->getCpuRefImpl()->launchThreads( 518 (const Allocation**)firstCpuClosure->mClosure->mArgs, 519 firstCpuClosure->mClosure->mNumArg, 520 lastCpuClosure->mClosure->mReturnValue, 521 nullptr, &mtls); 522 523 return; 524 } 525 526 for (CPUClosure* cpuClosure : mClosures) { 527 const Closure* closure = cpuClosure->mClosure; 528 const ScriptKernelID* kernelID = 529 (const ScriptKernelID*)closure->mFunctionID.get(); 530 cpuClosure->mSi->preLaunch(kernelID->mSlot, 531 (const Allocation**)closure->mArgs, 532 closure->mNumArg, closure->mReturnValue, 533 nullptr, 0, nullptr); 534 } 535 536 const CPUClosure* cpuClosure = mClosures.front(); 537 const Closure* closure = cpuClosure->mClosure; 538 MTLaunchStruct mtls; 539 540 if (cpuClosure->mSi->forEachMtlsSetup((const Allocation**)closure->mArgs, 541 closure->mNumArg, 542 closure->mReturnValue, 543 nullptr, 0, nullptr, &mtls)) { 544 545 mtls.script = nullptr; 546 mtls.kernel = (void (*)())&groupRoot; 547 mtls.fep.usr = &mClosures; 548 549 mGroup->getCpuRefImpl()->launchThreads(nullptr, 0, nullptr, nullptr, &mtls); 550 } 551 552 for (CPUClosure* cpuClosure : mClosures) { 553 const Closure* closure = cpuClosure->mClosure; 554 const ScriptKernelID* kernelID = 555 (const ScriptKernelID*)closure->mFunctionID.get(); 556 cpuClosure->mSi->postLaunch(kernelID->mSlot, 557 (const Allocation**)closure->mArgs, 558 closure->mNumArg, closure->mReturnValue, 559 nullptr, 0, nullptr); 560 } 561} 562 563} // namespace renderscript 564} // namespace android 565