rsCpuCore.cpp revision 10adb0c2029f112b5738228617d5645f6ecea0c5
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rsCpuCore.h"
18#include "rsCpuScript.h"
19#include "rsCpuScriptGroup.h"
20#include "rsCpuScriptGroup2.h"
21
22#include <malloc.h>
23#include "rsContext.h"
24
25#include <sys/types.h>
26#include <sys/resource.h>
27#include <sched.h>
28#include <sys/syscall.h>
29#include <stdio.h>
30#include <string.h>
31#include <unistd.h>
32
33#if !defined(RS_SERVER) && !defined(RS_COMPATIBILITY_LIB)
34#include <cutils/properties.h>
35#include "utils/StopWatch.h"
36#endif
37
38#ifdef RS_SERVER
39// Android exposes gettid(), standard Linux does not
40static pid_t gettid() {
41    return syscall(SYS_gettid);
42}
43#endif
44
45using namespace android;
46using namespace android::renderscript;
47
48#define REDUCE_NEW_ALOGV(...) /* ALOGV(__VA_ARGS__) */
49
50static pthread_key_t gThreadTLSKey = 0;
51static uint32_t gThreadTLSKeyCount = 0;
52static pthread_mutex_t gInitMutex = PTHREAD_MUTEX_INITIALIZER;
53
54bool android::renderscript::gArchUseSIMD = false;
55
56RsdCpuReference::~RsdCpuReference() {
57}
58
59RsdCpuReference * RsdCpuReference::create(Context *rsc, uint32_t version_major,
60        uint32_t version_minor, sym_lookup_t lfn, script_lookup_t slfn
61        , RSSelectRTCallback pSelectRTCallback,
62        const char *pBccPluginName
63        ) {
64
65    RsdCpuReferenceImpl *cpu = new RsdCpuReferenceImpl(rsc);
66    if (!cpu) {
67        return nullptr;
68    }
69    if (!cpu->init(version_major, version_minor, lfn, slfn)) {
70        delete cpu;
71        return nullptr;
72    }
73
74    cpu->setSelectRTCallback(pSelectRTCallback);
75    if (pBccPluginName) {
76        cpu->setBccPluginName(pBccPluginName);
77    }
78
79    return cpu;
80}
81
82
83Context * RsdCpuReference::getTlsContext() {
84    ScriptTLSStruct * tls = (ScriptTLSStruct *)pthread_getspecific(gThreadTLSKey);
85    return tls->mContext;
86}
87
88const Script * RsdCpuReference::getTlsScript() {
89    ScriptTLSStruct * tls = (ScriptTLSStruct *)pthread_getspecific(gThreadTLSKey);
90    return tls->mScript;
91}
92
93pthread_key_t RsdCpuReference::getThreadTLSKey(){ return gThreadTLSKey; }
94
95////////////////////////////////////////////////////////////
96///
97
98RsdCpuReferenceImpl::RsdCpuReferenceImpl(Context *rsc) {
99    mRSC = rsc;
100
101    version_major = 0;
102    version_minor = 0;
103    mInKernel = false;
104    memset(&mWorkers, 0, sizeof(mWorkers));
105    memset(&mTlsStruct, 0, sizeof(mTlsStruct));
106    mExit = false;
107    mSelectRTCallback = nullptr;
108    mEmbedGlobalInfo = true;
109    mEmbedGlobalInfoSkipConstant = true;
110}
111
112
113void * RsdCpuReferenceImpl::helperThreadProc(void *vrsc) {
114    RsdCpuReferenceImpl *dc = (RsdCpuReferenceImpl *)vrsc;
115
116    uint32_t idx = __sync_fetch_and_add(&dc->mWorkers.mLaunchCount, 1);
117
118    //ALOGV("RS helperThread starting %p idx=%i", dc, idx);
119
120    dc->mWorkers.mLaunchSignals[idx].init();
121    dc->mWorkers.mNativeThreadId[idx] = gettid();
122
123    memset(&dc->mTlsStruct, 0, sizeof(dc->mTlsStruct));
124    int status = pthread_setspecific(gThreadTLSKey, &dc->mTlsStruct);
125    if (status) {
126        ALOGE("pthread_setspecific %i", status);
127    }
128
129#if 0
130    typedef struct {uint64_t bits[1024 / 64]; } cpu_set_t;
131    cpu_set_t cpuset;
132    memset(&cpuset, 0, sizeof(cpuset));
133    cpuset.bits[idx / 64] |= 1ULL << (idx % 64);
134    int ret = syscall(241, rsc->mWorkers.mNativeThreadId[idx],
135              sizeof(cpuset), &cpuset);
136    ALOGE("SETAFFINITY ret = %i %s", ret, EGLUtils::strerror(ret));
137#endif
138
139    while (!dc->mExit) {
140        dc->mWorkers.mLaunchSignals[idx].wait();
141        if (dc->mWorkers.mLaunchCallback) {
142           // idx +1 is used because the calling thread is always worker 0.
143           dc->mWorkers.mLaunchCallback(dc->mWorkers.mLaunchData, idx+1);
144        }
145        __sync_fetch_and_sub(&dc->mWorkers.mRunningCount, 1);
146        dc->mWorkers.mCompleteSignal.set();
147    }
148
149    //ALOGV("RS helperThread exited %p idx=%i", dc, idx);
150    return nullptr;
151}
152
153// Launch a kernel.
154// The callback function is called to execute the kernel.
155void RsdCpuReferenceImpl::launchThreads(WorkerCallback_t cbk, void *data) {
156    mWorkers.mLaunchData = data;
157    mWorkers.mLaunchCallback = cbk;
158
159    // fast path for very small launches
160    MTLaunchStructCommon *mtls = (MTLaunchStructCommon *)data;
161    if (mtls && mtls->dimPtr->y <= 1 && mtls->end.x <= mtls->start.x + mtls->mSliceSize) {
162        if (mWorkers.mLaunchCallback) {
163            mWorkers.mLaunchCallback(mWorkers.mLaunchData, 0);
164        }
165        return;
166    }
167
168    mWorkers.mRunningCount = mWorkers.mCount;
169    __sync_synchronize();
170
171    for (uint32_t ct = 0; ct < mWorkers.mCount; ct++) {
172        mWorkers.mLaunchSignals[ct].set();
173    }
174
175    // We use the calling thread as one of the workers so we can start without
176    // the delay of the thread wakeup.
177    if (mWorkers.mLaunchCallback) {
178        mWorkers.mLaunchCallback(mWorkers.mLaunchData, 0);
179    }
180
181    while (__sync_fetch_and_or(&mWorkers.mRunningCount, 0) != 0) {
182        mWorkers.mCompleteSignal.wait();
183    }
184}
185
186
187void RsdCpuReferenceImpl::lockMutex() {
188    pthread_mutex_lock(&gInitMutex);
189}
190
191void RsdCpuReferenceImpl::unlockMutex() {
192    pthread_mutex_unlock(&gInitMutex);
193}
194
195// Determine if the CPU we're running on supports SIMD instructions.
196static void GetCpuInfo() {
197    // Read the CPU flags from /proc/cpuinfo.
198    FILE *cpuinfo = fopen("/proc/cpuinfo", "r");
199
200    if (!cpuinfo) {
201        return;
202    }
203
204    char cpuinfostr[4096];
205    // fgets() ends with newline or EOF, need to check the whole
206    // "cpuinfo" file to make sure we can use SIMD or not.
207    while (fgets(cpuinfostr, sizeof(cpuinfostr), cpuinfo)) {
208#if defined(ARCH_ARM_HAVE_VFP) || defined(ARCH_ARM_USE_INTRINSICS)
209        gArchUseSIMD = strstr(cpuinfostr, " neon") || strstr(cpuinfostr, " asimd");
210#elif defined(ARCH_X86_HAVE_SSSE3)
211        gArchUseSIMD = strstr(cpuinfostr, " ssse3");
212#endif
213        if (gArchUseSIMD) {
214            break;
215        }
216    }
217    fclose(cpuinfo);
218}
219
220bool RsdCpuReferenceImpl::init(uint32_t version_major, uint32_t version_minor,
221                               sym_lookup_t lfn, script_lookup_t slfn) {
222    mSymLookupFn = lfn;
223    mScriptLookupFn = slfn;
224
225    lockMutex();
226    if (!gThreadTLSKeyCount) {
227        int status = pthread_key_create(&gThreadTLSKey, nullptr);
228        if (status) {
229            ALOGE("Failed to init thread tls key.");
230            unlockMutex();
231            return false;
232        }
233    }
234    gThreadTLSKeyCount++;
235    unlockMutex();
236
237    mTlsStruct.mContext = mRSC;
238    mTlsStruct.mScript = nullptr;
239    int status = pthread_setspecific(gThreadTLSKey, &mTlsStruct);
240    if (status) {
241        ALOGE("pthread_setspecific %i", status);
242    }
243
244    mPageSize = sysconf(_SC_PAGE_SIZE);
245    REDUCE_NEW_ALOGV("page size = %ld", mPageSize);
246
247    GetCpuInfo();
248
249    int cpu = sysconf(_SC_NPROCESSORS_CONF);
250    if(mRSC->props.mDebugMaxThreads) {
251        cpu = mRSC->props.mDebugMaxThreads;
252    }
253    if (cpu < 2) {
254        mWorkers.mCount = 0;
255        return true;
256    }
257
258    // Subtract one from the cpu count because we also use the command thread as a worker.
259    mWorkers.mCount = (uint32_t)(cpu - 1);
260
261    ALOGV("%p Launching thread(s), CPUs %i", mRSC, mWorkers.mCount + 1);
262
263    mWorkers.mThreadId = (pthread_t *) calloc(mWorkers.mCount, sizeof(pthread_t));
264    mWorkers.mNativeThreadId = (pid_t *) calloc(mWorkers.mCount, sizeof(pid_t));
265    mWorkers.mLaunchSignals = new Signal[mWorkers.mCount];
266    mWorkers.mLaunchCallback = nullptr;
267
268    mWorkers.mCompleteSignal.init();
269
270    mWorkers.mRunningCount = mWorkers.mCount;
271    mWorkers.mLaunchCount = 0;
272    __sync_synchronize();
273
274    pthread_attr_t threadAttr;
275    status = pthread_attr_init(&threadAttr);
276    if (status) {
277        ALOGE("Failed to init thread attribute.");
278        return false;
279    }
280
281    for (uint32_t ct=0; ct < mWorkers.mCount; ct++) {
282        status = pthread_create(&mWorkers.mThreadId[ct], &threadAttr, helperThreadProc, this);
283        if (status) {
284            mWorkers.mCount = ct;
285            ALOGE("Created fewer than expected number of RS threads.");
286            break;
287        }
288    }
289    while (__sync_fetch_and_or(&mWorkers.mRunningCount, 0) != 0) {
290        usleep(100);
291    }
292
293    pthread_attr_destroy(&threadAttr);
294    return true;
295}
296
297
298void RsdCpuReferenceImpl::setPriority(int32_t priority) {
299    for (uint32_t ct=0; ct < mWorkers.mCount; ct++) {
300        setpriority(PRIO_PROCESS, mWorkers.mNativeThreadId[ct], priority);
301    }
302}
303
304RsdCpuReferenceImpl::~RsdCpuReferenceImpl() {
305    mExit = true;
306    mWorkers.mLaunchData = nullptr;
307    mWorkers.mLaunchCallback = nullptr;
308    mWorkers.mRunningCount = mWorkers.mCount;
309    __sync_synchronize();
310    for (uint32_t ct = 0; ct < mWorkers.mCount; ct++) {
311        mWorkers.mLaunchSignals[ct].set();
312    }
313    void *res;
314    for (uint32_t ct = 0; ct < mWorkers.mCount; ct++) {
315        pthread_join(mWorkers.mThreadId[ct], &res);
316    }
317    rsAssert(__sync_fetch_and_or(&mWorkers.mRunningCount, 0) == 0);
318    free(mWorkers.mThreadId);
319    free(mWorkers.mNativeThreadId);
320    delete[] mWorkers.mLaunchSignals;
321
322    // Global structure cleanup.
323    lockMutex();
324    --gThreadTLSKeyCount;
325    if (!gThreadTLSKeyCount) {
326        pthread_key_delete(gThreadTLSKey);
327    }
328    unlockMutex();
329
330}
331
332// Set up the appropriate input and output pointers to the kernel driver info structure.
333// Inputs:
334//   mtls - The MTLaunchStruct holding information about the kernel launch
335//   fep - The forEach parameters (driver info structure)
336//   x, y, z, lod, face, a1, a2, a3, a4 - The start offsets into each dimension
337static inline void FepPtrSetup(const MTLaunchStructForEach *mtls, RsExpandKernelDriverInfo *fep,
338                               uint32_t x, uint32_t y,
339                               uint32_t z = 0, uint32_t lod = 0,
340                               RsAllocationCubemapFace face = RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X,
341                               uint32_t a1 = 0, uint32_t a2 = 0, uint32_t a3 = 0, uint32_t a4 = 0) {
342    for (uint32_t i = 0; i < fep->inLen; i++) {
343        fep->inPtr[i] = (const uint8_t *)mtls->ains[i]->getPointerUnchecked(x, y, z, lod, face, a1, a2, a3, a4);
344    }
345    if (mtls->aout[0] != nullptr) {
346        fep->outPtr[0] = (uint8_t *)mtls->aout[0]->getPointerUnchecked(x, y, z, lod, face, a1, a2, a3, a4);
347    }
348}
349
350// Set up the appropriate input and output pointers to the kernel driver info structure.
351// Inputs:
352//   mtls - The MTLaunchStruct holding information about the kernel launch
353//   redp - The reduce parameters (driver info structure)
354//   x, y, z - The start offsets into each dimension
355static inline void RedpPtrSetup(const MTLaunchStructReduceNew *mtls, RsExpandKernelDriverInfo *redp,
356                                uint32_t x, uint32_t y, uint32_t z) {
357    for (uint32_t i = 0; i < redp->inLen; i++) {
358        redp->inPtr[i] = (const uint8_t *)mtls->ains[i]->getPointerUnchecked(x, y, z);
359    }
360}
361
362static uint32_t sliceInt(uint32_t *p, uint32_t val, uint32_t start, uint32_t end) {
363    if (start >= end) {
364        *p = start;
365        return val;
366    }
367
368    uint32_t div = end - start;
369
370    uint32_t n = val / div;
371    *p = (val - (n * div)) + start;
372    return n;
373}
374
375static bool SelectOuterSlice(const MTLaunchStructCommon *mtls, RsExpandKernelDriverInfo* info, uint32_t sliceNum) {
376
377    uint32_t r = sliceNum;
378    r = sliceInt(&info->current.z, r, mtls->start.z, mtls->end.z);
379    r = sliceInt(&info->current.lod, r, mtls->start.lod, mtls->end.lod);
380    r = sliceInt(&info->current.face, r, mtls->start.face, mtls->end.face);
381    r = sliceInt(&info->current.array[0], r, mtls->start.array[0], mtls->end.array[0]);
382    r = sliceInt(&info->current.array[1], r, mtls->start.array[1], mtls->end.array[1]);
383    r = sliceInt(&info->current.array[2], r, mtls->start.array[2], mtls->end.array[2]);
384    r = sliceInt(&info->current.array[3], r, mtls->start.array[3], mtls->end.array[3]);
385    return r == 0;
386}
387
388
389static void walk_general(void *usr, uint32_t idx) {
390    MTLaunchStructForEach *mtls = (MTLaunchStructForEach *)usr;
391    RsExpandKernelDriverInfo fep = mtls->fep;
392    fep.lid = idx;
393    ForEachFunc_t fn = mtls->kernel;
394
395
396    while(1) {
397        uint32_t slice = (uint32_t)__sync_fetch_and_add(&mtls->mSliceNum, 1);
398
399        if (!SelectOuterSlice(mtls, &fep, slice)) {
400            return;
401        }
402
403        for (fep.current.y = mtls->start.y; fep.current.y < mtls->end.y;
404             fep.current.y++) {
405
406            FepPtrSetup(mtls, &fep, mtls->start.x,
407                        fep.current.y, fep.current.z, fep.current.lod,
408                        (RsAllocationCubemapFace)fep.current.face,
409                        fep.current.array[0], fep.current.array[1],
410                        fep.current.array[2], fep.current.array[3]);
411
412            fn(&fep, mtls->start.x, mtls->end.x, mtls->fep.outStride[0]);
413        }
414    }
415
416}
417
418static void walk_2d(void *usr, uint32_t idx) {
419    MTLaunchStructForEach *mtls = (MTLaunchStructForEach *)usr;
420    RsExpandKernelDriverInfo fep = mtls->fep;
421    fep.lid = idx;
422    ForEachFunc_t fn = mtls->kernel;
423
424    while (1) {
425        uint32_t slice  = (uint32_t)__sync_fetch_and_add(&mtls->mSliceNum, 1);
426        uint32_t yStart = mtls->start.y + slice * mtls->mSliceSize;
427        uint32_t yEnd   = yStart + mtls->mSliceSize;
428
429        yEnd = rsMin(yEnd, mtls->end.y);
430
431        if (yEnd <= yStart) {
432            return;
433        }
434
435        for (fep.current.y = yStart; fep.current.y < yEnd; fep.current.y++) {
436            FepPtrSetup(mtls, &fep, mtls->start.x, fep.current.y);
437
438            fn(&fep, mtls->start.x, mtls->end.x, fep.outStride[0]);
439        }
440    }
441}
442
443static void walk_1d_foreach(void *usr, uint32_t idx) {
444    MTLaunchStructForEach *mtls = (MTLaunchStructForEach *)usr;
445    RsExpandKernelDriverInfo fep = mtls->fep;
446    fep.lid = idx;
447    ForEachFunc_t fn = mtls->kernel;
448
449    while (1) {
450        uint32_t slice  = (uint32_t)__sync_fetch_and_add(&mtls->mSliceNum, 1);
451        uint32_t xStart = mtls->start.x + slice * mtls->mSliceSize;
452        uint32_t xEnd   = xStart + mtls->mSliceSize;
453
454        xEnd = rsMin(xEnd, mtls->end.x);
455
456        if (xEnd <= xStart) {
457            return;
458        }
459
460        FepPtrSetup(mtls, &fep, xStart, 0);
461
462        fn(&fep, xStart, xEnd, fep.outStride[0]);
463    }
464}
465
466// The function format_bytes() is an auxiliary function to assist in logging.
467//
468// Bytes are read from an input (inBuf) and written (as pairs of hex digits)
469// to an output (outBuf).
470//
471// Output format:
472// - starts with ": "
473// - each input byte is translated to a pair of hex digits
474// - bytes are separated by "." except that every fourth separator is "|"
475// - if the input is sufficiently long, the output is truncated and terminated with "..."
476//
477// Arguments:
478// - outBuf  -- Pointer to buffer of type "FormatBuf" into which output is written
479// - inBuf   -- Pointer to bytes which are to be formatted into outBuf
480// - inBytes -- Number of bytes in inBuf
481//
482// Constant:
483// - kFormatInBytesMax -- Only min(kFormatInBytesMax, inBytes) bytes will be read
484//                        from inBuf
485//
486// Return value:
487// - pointer (const char *) to output (which is part of outBuf)
488//
489static const int kFormatInBytesMax = 16;
490// ": " + 2 digits per byte + 1 separator between bytes + "..." + null
491typedef char FormatBuf[2 + kFormatInBytesMax*2 + (kFormatInBytesMax - 1) + 3 + 1];
492static const char *format_bytes(FormatBuf *outBuf, const uint8_t *inBuf, const int inBytes) {
493  strcpy(*outBuf, ": ");
494  int pos = 2;
495  const int lim = std::min(kFormatInBytesMax, inBytes);
496  for (int i = 0; i < lim; ++i) {
497    if (i) {
498      sprintf(*outBuf + pos, (i % 4 ? "." : "|"));
499      ++pos;
500    }
501    sprintf(*outBuf + pos, "%02x", inBuf[i]);
502    pos += 2;
503  }
504  if (kFormatInBytesMax < inBytes)
505    strcpy(*outBuf + pos, "...");
506  return *outBuf;
507}
508
509static void walk_1d_reduce_new(void *usr, uint32_t idx) {
510  const MTLaunchStructReduceNew *mtls = (const MTLaunchStructReduceNew *)usr;
511  RsExpandKernelDriverInfo redp = mtls->redp;
512
513  // find accumulator
514  uint8_t *&accumPtr = mtls->accumPtr[idx];
515  if (!accumPtr) {
516    uint32_t accumIdx = (uint32_t)__sync_fetch_and_add(&mtls->accumCount, 1);
517    if (mtls->outFunc) {
518      accumPtr = mtls->accumAlloc + mtls->accumStride * accumIdx;
519    } else {
520      if (accumIdx == 0) {
521        accumPtr = mtls->redp.outPtr[0];
522      } else {
523        accumPtr = mtls->accumAlloc + mtls->accumStride * (accumIdx - 1);
524      }
525    }
526    REDUCE_NEW_ALOGV("walk_1d_reduce_new(%p): idx = %u got accumCount %u and accumPtr %p",
527                     mtls->accumFunc, idx, accumIdx, accumPtr);
528    // initialize accumulator
529    if (mtls->initFunc) {
530      mtls->initFunc(accumPtr);
531    } else {
532      memset(accumPtr, 0, mtls->accumSize);
533    }
534  }
535
536  // accumulate
537  const ReduceNewAccumulatorFunc_t fn = mtls->accumFunc;
538  while (1) {
539    uint32_t slice  = (uint32_t)__sync_fetch_and_add(&mtls->mSliceNum, 1);
540    uint32_t xStart = mtls->start.x + slice * mtls->mSliceSize;
541    uint32_t xEnd   = xStart + mtls->mSliceSize;
542
543    xEnd = rsMin(xEnd, mtls->end.x);
544
545    if (xEnd <= xStart) {
546      return;
547    }
548
549    RedpPtrSetup(mtls, &redp, xStart, 0, 0);
550    fn(&redp, xStart, xEnd, accumPtr);
551
552    FormatBuf fmt;
553    if (mtls->logReduceAccum) {
554      format_bytes(&fmt, accumPtr, mtls->accumSize);
555    } else {
556      fmt[0] = 0;
557    }
558    REDUCE_NEW_ALOGV("walk_1d_reduce_new(%p): idx = %u [%u, %u)%s",
559                     mtls->accumFunc, idx, xStart, xEnd, fmt);
560  }
561}
562
563// Launch a simple reduce-style kernel.
564// Inputs:
565//  ain:  The allocation that contains the input
566//  aout: The allocation that will hold the output
567//  mtls: Holds launch parameters
568void RsdCpuReferenceImpl::launchReduce(const Allocation *ain,
569                                       Allocation *aout,
570                                       MTLaunchStructReduce *mtls) {
571    const uint32_t xStart = mtls->start.x;
572    const uint32_t xEnd = mtls->end.x;
573
574    if (xStart >= xEnd) {
575      return;
576    }
577
578    const uint32_t startOffset = ain->getType()->getElementSizeBytes() * xStart;
579    mtls->kernel(&mtls->inBuf[startOffset], mtls->outBuf, xEnd - xStart);
580}
581
582// Launch a general reduce-style kernel.
583// Inputs:
584//   ains[0..inLen-1]: Array of allocations that contain the inputs
585//   aout:             The allocation that will hold the output
586//   mtls:             Holds launch parameters
587void RsdCpuReferenceImpl::launchReduceNew(const Allocation ** ains,
588                                          uint32_t inLen,
589                                          Allocation * aout,
590                                          MTLaunchStructReduceNew *mtls) {
591  mtls->logReduceAccum = mRSC->props.mLogReduceAccum;
592  if ((mWorkers.mCount >= 1) && mtls->isThreadable && !mInKernel) {
593    launchReduceNewParallel(ains, inLen, aout, mtls);
594  } else {
595    launchReduceNewSerial(ains, inLen, aout, mtls);
596  }
597}
598
599// Launch a general reduce-style kernel, single-threaded.
600// Inputs:
601//   ains[0..inLen-1]: Array of allocations that contain the inputs
602//   aout:             The allocation that will hold the output
603//   mtls:             Holds launch parameters
604void RsdCpuReferenceImpl::launchReduceNewSerial(const Allocation ** ains,
605                                                uint32_t inLen,
606                                                Allocation * aout,
607                                                MTLaunchStructReduceNew *mtls) {
608  ALOGV("launchReduceNewSerial(%p)", mtls->accumFunc);
609
610  // In the presence of outconverter, we allocate temporary memory for
611  // the accumulator.
612  //
613  // In the absence of outconverter, we use the output allocation as the
614  // accumulator.
615  uint8_t *const accumPtr = (mtls->outFunc
616                             ? static_cast<uint8_t *>(malloc(mtls->accumSize))
617                             : mtls->redp.outPtr[0]);
618
619  // initialize
620  if (mtls->initFunc) {
621    mtls->initFunc(accumPtr);
622  } else {
623    memset(accumPtr, 0, mtls->accumSize);
624  }
625
626  // accumulate
627  const ReduceNewAccumulatorFunc_t fn = mtls->accumFunc;
628  uint32_t slice = 0;
629  while (SelectOuterSlice(mtls, &mtls->redp, slice++)) {
630    for (mtls->redp.current.y = mtls->start.y;
631         mtls->redp.current.y < mtls->end.y;
632         mtls->redp.current.y++) {
633      RedpPtrSetup(mtls, &mtls->redp, mtls->start.x, mtls->redp.current.y, mtls->redp.current.z);
634      fn(&mtls->redp, mtls->start.x, mtls->end.x, accumPtr);
635    }
636  }
637
638  // outconvert
639  if (mtls->outFunc) {
640    mtls->outFunc(mtls->redp.outPtr[0], accumPtr);
641    free(accumPtr);
642  }
643}
644
645// Launch a general reduce-style kernel, multi-threaded.
646// Inputs:
647//   ains[0..inLen-1]: Array of allocations that contain the inputs
648//   aout:             The allocation that will hold the output
649//   mtls:             Holds launch parameters
650void RsdCpuReferenceImpl::launchReduceNewParallel(const Allocation ** ains,
651                                                  uint32_t inLen,
652                                                  Allocation * aout,
653                                                  MTLaunchStructReduceNew *mtls) {
654  // For now, we don't know how to go parallel beyond 1D, or in the absence of a combiner.
655  if ((mtls->redp.dim.y > 1) || (mtls->redp.dim.z > 1) || !mtls->combFunc) {
656    launchReduceNewSerial(ains, inLen, aout, mtls);
657    return;
658  }
659
660  // Number of threads = "main thread" + number of other (worker) threads
661  const uint32_t numThreads = mWorkers.mCount + 1;
662
663  // In the absence of outconverter, we use the output allocation as
664  // an accumulator, and therefore need to allocate one fewer accumulator.
665  const uint32_t numAllocAccum = numThreads - (mtls->outFunc == nullptr);
666
667  // If mDebugReduceSplitAccum, then we want each accumulator to start
668  // on a page boundary.  (TODO: Would some unit smaller than a page
669  // be sufficient to avoid false sharing?)
670  if (mRSC->props.mDebugReduceSplitAccum) {
671    // Round up accumulator size to an integral number of pages
672    mtls->accumStride =
673        (unsigned(mtls->accumSize) + unsigned(mPageSize)-1) &
674        ~(unsigned(mPageSize)-1);
675    // Each accumulator gets its own page.  Alternatively, if we just
676    // wanted to make sure no two accumulators are on the same page,
677    // we could instead do
678    //   allocSize = mtls->accumStride * (numAllocation - 1) + mtls->accumSize
679    const size_t allocSize = mtls->accumStride * numAllocAccum;
680    mtls->accumAlloc = static_cast<uint8_t *>(memalign(mPageSize, allocSize));
681  } else {
682    mtls->accumStride = mtls->accumSize;
683    mtls->accumAlloc = static_cast<uint8_t *>(malloc(mtls->accumStride * numAllocAccum));
684  }
685
686  const size_t accumPtrArrayBytes = sizeof(uint8_t *) * numThreads;
687  mtls->accumPtr = static_cast<uint8_t **>(malloc(accumPtrArrayBytes));
688  memset(mtls->accumPtr, 0, accumPtrArrayBytes);
689
690  mtls->accumCount = 0;
691
692  rsAssert(!mInKernel);
693  mInKernel = true;
694  mtls->mSliceSize = rsMax(1U, mtls->redp.dim.x / (numThreads * 4));
695  ALOGV("launchReduceNewParallel(%p): %u threads, accumAlloc = %p",
696        mtls->accumFunc, numThreads, mtls->accumAlloc);
697  launchThreads(walk_1d_reduce_new, mtls);
698  mInKernel = false;
699
700  // Combine accumulators and identify final accumulator
701  uint8_t *finalAccumPtr = (mtls->outFunc ? nullptr : mtls->redp.outPtr[0]);
702  //   Loop over accumulators, combining into finalAccumPtr.  If finalAccumPtr
703  //   is null, then the first accumulator I find becomes finalAccumPtr.
704  for (unsigned idx = 0; idx < mtls->accumCount; ++idx) {
705    uint8_t *const thisAccumPtr = mtls->accumPtr[idx];
706    if (finalAccumPtr) {
707      if (finalAccumPtr != thisAccumPtr) {
708        if (mtls->combFunc) {
709          if (mtls->logReduceAccum) {
710            FormatBuf fmt;
711            REDUCE_NEW_ALOGV("launchReduceNewParallel(%p): accumulating into%s",
712                             mtls->accumFunc,
713                             format_bytes(&fmt, finalAccumPtr, mtls->accumSize));
714            REDUCE_NEW_ALOGV("launchReduceNewParallel(%p):    accumulator[%d]%s",
715                             mtls->accumFunc, idx,
716                             format_bytes(&fmt, thisAccumPtr, mtls->accumSize));
717          }
718          mtls->combFunc(finalAccumPtr, thisAccumPtr);
719        } else {
720          rsAssert(!"expected combiner");
721        }
722      }
723    } else {
724      finalAccumPtr = thisAccumPtr;
725    }
726  }
727  rsAssert(finalAccumPtr != nullptr);
728  if (mtls->logReduceAccum) {
729    FormatBuf fmt;
730    REDUCE_NEW_ALOGV("launchReduceNewParallel(%p): final accumulator%s",
731                     mtls->accumFunc, format_bytes(&fmt, finalAccumPtr, mtls->accumSize));
732  }
733
734  // Outconvert
735  if (mtls->outFunc) {
736    mtls->outFunc(mtls->redp.outPtr[0], finalAccumPtr);
737    if (mtls->logReduceAccum) {
738      FormatBuf fmt;
739      REDUCE_NEW_ALOGV("launchReduceNewParallel(%p): final outconverted result%s",
740                       mtls->accumFunc,
741                       format_bytes(&fmt, mtls->redp.outPtr[0], mtls->redp.outStride[0]));
742    }
743  }
744
745  // Clean up
746  free(mtls->accumPtr);
747  free(mtls->accumAlloc);
748}
749
750
751void RsdCpuReferenceImpl::launchForEach(const Allocation ** ains,
752                                        uint32_t inLen,
753                                        Allocation* aout,
754                                        const RsScriptCall* sc,
755                                        MTLaunchStructForEach* mtls) {
756
757    //android::StopWatch kernel_time("kernel time");
758
759    bool outerDims = (mtls->start.z != mtls->end.z) ||
760                     (mtls->start.face != mtls->end.face) ||
761                     (mtls->start.lod != mtls->end.lod) ||
762                     (mtls->start.array[0] != mtls->end.array[0]) ||
763                     (mtls->start.array[1] != mtls->end.array[1]) ||
764                     (mtls->start.array[2] != mtls->end.array[2]) ||
765                     (mtls->start.array[3] != mtls->end.array[3]);
766
767    if ((mWorkers.mCount >= 1) && mtls->isThreadable && !mInKernel) {
768        const size_t targetByteChunk = 16 * 1024;
769        mInKernel = true;  // NOTE: The guard immediately above ensures this was !mInKernel
770
771        if (outerDims) {
772            // No fancy logic for chunk size
773            mtls->mSliceSize = 1;
774            launchThreads(walk_general, mtls);
775        } else if (mtls->fep.dim.y > 1) {
776            uint32_t s1 = mtls->fep.dim.y / ((mWorkers.mCount + 1) * 4);
777            uint32_t s2 = 0;
778
779            // This chooses our slice size to rate limit atomic ops to
780            // one per 16k bytes of reads/writes.
781            if ((mtls->aout[0] != nullptr) && mtls->aout[0]->mHal.drvState.lod[0].stride) {
782                s2 = targetByteChunk / mtls->aout[0]->mHal.drvState.lod[0].stride;
783            } else if (mtls->ains[0]) {
784                s2 = targetByteChunk / mtls->ains[0]->mHal.drvState.lod[0].stride;
785            } else {
786                // Launch option only case
787                // Use s1 based only on the dimensions
788                s2 = s1;
789            }
790            mtls->mSliceSize = rsMin(s1, s2);
791
792            if(mtls->mSliceSize < 1) {
793                mtls->mSliceSize = 1;
794            }
795
796            launchThreads(walk_2d, mtls);
797        } else {
798            uint32_t s1 = mtls->fep.dim.x / ((mWorkers.mCount + 1) * 4);
799            uint32_t s2 = 0;
800
801            // This chooses our slice size to rate limit atomic ops to
802            // one per 16k bytes of reads/writes.
803            if ((mtls->aout[0] != nullptr) && mtls->aout[0]->getType()->getElementSizeBytes()) {
804                s2 = targetByteChunk / mtls->aout[0]->getType()->getElementSizeBytes();
805            } else if (mtls->ains[0]) {
806                s2 = targetByteChunk / mtls->ains[0]->getType()->getElementSizeBytes();
807            } else {
808                // Launch option only case
809                // Use s1 based only on the dimensions
810                s2 = s1;
811            }
812            mtls->mSliceSize = rsMin(s1, s2);
813
814            if (mtls->mSliceSize < 1) {
815                mtls->mSliceSize = 1;
816            }
817
818            launchThreads(walk_1d_foreach, mtls);
819        }
820        mInKernel = false;
821
822    } else {
823        ForEachFunc_t fn = mtls->kernel;
824        uint32_t slice = 0;
825
826
827        while(SelectOuterSlice(mtls, &mtls->fep, slice++)) {
828            for (mtls->fep.current.y = mtls->start.y;
829                 mtls->fep.current.y < mtls->end.y;
830                 mtls->fep.current.y++) {
831
832                FepPtrSetup(mtls, &mtls->fep, mtls->start.x,
833                            mtls->fep.current.y, mtls->fep.current.z, mtls->fep.current.lod,
834                            (RsAllocationCubemapFace) mtls->fep.current.face,
835                            mtls->fep.current.array[0], mtls->fep.current.array[1],
836                            mtls->fep.current.array[2], mtls->fep.current.array[3]);
837
838                fn(&mtls->fep, mtls->start.x, mtls->end.x, mtls->fep.outStride[0]);
839            }
840        }
841    }
842}
843
844RsdCpuScriptImpl * RsdCpuReferenceImpl::setTLS(RsdCpuScriptImpl *sc) {
845    //ALOGE("setTls %p", sc);
846    ScriptTLSStruct * tls = (ScriptTLSStruct *)pthread_getspecific(gThreadTLSKey);
847    rsAssert(tls);
848    RsdCpuScriptImpl *old = tls->mImpl;
849    tls->mImpl = sc;
850    tls->mContext = mRSC;
851    if (sc) {
852        tls->mScript = sc->getScript();
853    } else {
854        tls->mScript = nullptr;
855    }
856    return old;
857}
858
859const RsdCpuReference::CpuSymbol * RsdCpuReferenceImpl::symLookup(const char *name) {
860    return mSymLookupFn(mRSC, name);
861}
862
863
864RsdCpuReference::CpuScript * RsdCpuReferenceImpl::createScript(const ScriptC *s,
865                                    char const *resName, char const *cacheDir,
866                                    uint8_t const *bitcode, size_t bitcodeSize,
867                                    uint32_t flags) {
868
869    RsdCpuScriptImpl *i = new RsdCpuScriptImpl(this, s);
870    if (!i->init(resName, cacheDir, bitcode, bitcodeSize, flags
871        , getBccPluginName()
872        )) {
873        delete i;
874        return nullptr;
875    }
876    return i;
877}
878
879extern RsdCpuScriptImpl * rsdIntrinsic_3DLUT(RsdCpuReferenceImpl *ctx,
880                                             const Script *s, const Element *e);
881extern RsdCpuScriptImpl * rsdIntrinsic_Convolve3x3(RsdCpuReferenceImpl *ctx,
882                                                   const Script *s, const Element *e);
883extern RsdCpuScriptImpl * rsdIntrinsic_ColorMatrix(RsdCpuReferenceImpl *ctx,
884                                                   const Script *s, const Element *e);
885extern RsdCpuScriptImpl * rsdIntrinsic_LUT(RsdCpuReferenceImpl *ctx,
886                                           const Script *s, const Element *e);
887extern RsdCpuScriptImpl * rsdIntrinsic_Convolve5x5(RsdCpuReferenceImpl *ctx,
888                                                   const Script *s, const Element *e);
889extern RsdCpuScriptImpl * rsdIntrinsic_Blur(RsdCpuReferenceImpl *ctx,
890                                            const Script *s, const Element *e);
891extern RsdCpuScriptImpl * rsdIntrinsic_YuvToRGB(RsdCpuReferenceImpl *ctx,
892                                                const Script *s, const Element *e);
893extern RsdCpuScriptImpl * rsdIntrinsic_Blend(RsdCpuReferenceImpl *ctx,
894                                             const Script *s, const Element *e);
895extern RsdCpuScriptImpl * rsdIntrinsic_Histogram(RsdCpuReferenceImpl *ctx,
896                                                 const Script *s, const Element *e);
897extern RsdCpuScriptImpl * rsdIntrinsic_Resize(RsdCpuReferenceImpl *ctx,
898                                              const Script *s, const Element *e);
899extern RsdCpuScriptImpl * rsdIntrinsic_BLAS(RsdCpuReferenceImpl *ctx,
900                                              const Script *s, const Element *e);
901
902RsdCpuReference::CpuScript * RsdCpuReferenceImpl::createIntrinsic(const Script *s,
903                                    RsScriptIntrinsicID iid, Element *e) {
904
905    RsdCpuScriptImpl *i = nullptr;
906    switch (iid) {
907    case RS_SCRIPT_INTRINSIC_ID_3DLUT:
908        i = rsdIntrinsic_3DLUT(this, s, e);
909        break;
910    case RS_SCRIPT_INTRINSIC_ID_CONVOLVE_3x3:
911        i = rsdIntrinsic_Convolve3x3(this, s, e);
912        break;
913    case RS_SCRIPT_INTRINSIC_ID_COLOR_MATRIX:
914        i = rsdIntrinsic_ColorMatrix(this, s, e);
915        break;
916    case RS_SCRIPT_INTRINSIC_ID_LUT:
917        i = rsdIntrinsic_LUT(this, s, e);
918        break;
919    case RS_SCRIPT_INTRINSIC_ID_CONVOLVE_5x5:
920        i = rsdIntrinsic_Convolve5x5(this, s, e);
921        break;
922    case RS_SCRIPT_INTRINSIC_ID_BLUR:
923        i = rsdIntrinsic_Blur(this, s, e);
924        break;
925    case RS_SCRIPT_INTRINSIC_ID_YUV_TO_RGB:
926        i = rsdIntrinsic_YuvToRGB(this, s, e);
927        break;
928    case RS_SCRIPT_INTRINSIC_ID_BLEND:
929        i = rsdIntrinsic_Blend(this, s, e);
930        break;
931    case RS_SCRIPT_INTRINSIC_ID_HISTOGRAM:
932        i = rsdIntrinsic_Histogram(this, s, e);
933        break;
934    case RS_SCRIPT_INTRINSIC_ID_RESIZE:
935        i = rsdIntrinsic_Resize(this, s, e);
936        break;
937    case RS_SCRIPT_INTRINSIC_ID_BLAS:
938        i = rsdIntrinsic_BLAS(this, s, e);
939        break;
940
941    default:
942        rsAssert(0);
943    }
944
945    return i;
946}
947
948void* RsdCpuReferenceImpl::createScriptGroup(const ScriptGroupBase *sg) {
949  switch (sg->getApiVersion()) {
950    case ScriptGroupBase::SG_V1: {
951      CpuScriptGroupImpl *sgi = new CpuScriptGroupImpl(this, sg);
952      if (!sgi->init()) {
953        delete sgi;
954        return nullptr;
955      }
956      return sgi;
957    }
958    case ScriptGroupBase::SG_V2: {
959      return new CpuScriptGroup2Impl(this, sg);
960    }
961  }
962  return nullptr;
963}
964