ExecutionBuilder.cpp revision 8fb14e90ceb360adfbac0f708d27161b7c5b7fc5
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "ExecutionBuilder"
18
19#include "ExecutionBuilder.h"
20
21#include "CompilationBuilder.h"
22#include "CpuExecutor.h"
23#include "HalInterfaces.h"
24#include "Manager.h"
25#include "ModelBuilder.h"
26
27#include <mutex>
28#include <thread>
29#include <vector>
30
31namespace android {
32namespace nn {
33
34int ModelArgumentInfo::setFromPointer(const Operand& operand,
35                                      const ANeuralNetworksOperandType* type, void* data,
36                                      uint32_t length) {
37    int n = updateDimensionInfo(operand, type);
38    if (n != ANEURALNETWORKS_NO_ERROR) {
39        return n;
40    }
41    if (data == nullptr) {
42        if (length) {
43            LOG(ERROR) << "Setting argument as having no value but non-zero length passed.";
44            return ANEURALNETWORKS_BAD_DATA;
45        }
46        state = ModelArgumentInfo::HAS_NO_VALUE;
47    } else {
48        state = ModelArgumentInfo::POINTER;
49    }
50    buffer = data;
51    locationAndLength = {.poolIndex = 0, .offset = 0, .length = length};
52    return ANEURALNETWORKS_NO_ERROR;
53}
54
55int ModelArgumentInfo::setFromMemory(const Operand& operand, const ANeuralNetworksOperandType* type,
56                                     uint32_t poolIndex, uint32_t offset, uint32_t length) {
57    int n = updateDimensionInfo(operand, type);
58    if (n != ANEURALNETWORKS_NO_ERROR) {
59        return n;
60    }
61    state = ModelArgumentInfo::MEMORY;
62    locationAndLength = {.poolIndex = poolIndex, .offset = offset, .length = length};
63    buffer = nullptr;
64    return ANEURALNETWORKS_NO_ERROR;
65}
66
67int ModelArgumentInfo::setFromTemporaryMemory(const Operand& operand,
68                                              uint32_t poolIndex, uint32_t offset) {
69    dimensions = operand.dimensions;
70    state = ModelArgumentInfo::MEMORY;
71    locationAndLength =
72            {.poolIndex = poolIndex, .offset = offset, .length = sizeOfData(operand)};
73    buffer = nullptr;
74    return ANEURALNETWORKS_NO_ERROR;
75}
76
77int ModelArgumentInfo::updateDimensionInfo(const Operand& operand,
78                                           const ANeuralNetworksOperandType* newType) {
79    if (newType == nullptr) {
80        dimensions = hidl_vec<uint32_t>();
81    } else {
82        uint32_t count = newType->dimensionCount;
83        if (static_cast<OperandType>(newType->type) != operand.type ||
84            count != operand.dimensions.size()) {
85            LOG(ERROR) << "ANeuralNetworksExecution_setInput/Output incompatible types";
86            return ANEURALNETWORKS_BAD_DATA;
87        }
88        for (uint32_t i = 0; i < count; i++) {
89            dimensions[i] = newType->dimensions[i];
90        }
91    }
92    return ANEURALNETWORKS_NO_ERROR;
93}
94
95ExecutionBuilder::ExecutionBuilder(const CompilationBuilder* compilation) :
96        mModel(compilation->mModel),
97        mPlan(&compilation->mPlan),
98        mInputs(mModel->inputCount()),
99        mOutputs(mModel->outputCount()),
100        mMemories(mModel->getMemories()) {
101    LOG(DEBUG) << "ExecutionBuilder::ExecutionBuilder";
102}
103
104int ExecutionBuilder::setInput(uint32_t index, const ANeuralNetworksOperandType* type,
105                               const void* buffer, size_t length) {
106    uint32_t count = static_cast<uint32_t>(mInputs.size());
107    if (index >= count) {
108        LOG(ERROR) << "ANeuralNetworksExecution_setInput bad index " << index << " " << count;
109        return ANEURALNETWORKS_BAD_DATA;
110    }
111    if (type != nullptr) {
112        int n = validateOperandType(*type, "ANeuralNetworksExecution_setInput", false);
113        if (n != ANEURALNETWORKS_NO_ERROR) {
114            return n;
115        }
116    }
117    if (length > 0xFFFFFFFF) {
118        LOG(ERROR) << "ANeuralNetworksExecution_setInput input exceeds max length " << length;
119        return ANEURALNETWORKS_BAD_DATA;
120    }
121    uint32_t l = static_cast<uint32_t>(length);
122    return mInputs[index].setFromPointer(mModel->getInputOperand(index), type,
123                                         const_cast<void*>(buffer), l);
124}
125
126int ExecutionBuilder::setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
127                                         const Memory* memory, size_t offset, size_t length) {
128    // Should be similar to StepExecutor::setInputOrOutputFromTemporaryMemory()
129
130    uint32_t count = static_cast<uint32_t>(mInputs.size());
131    if (index >= count) {
132        LOG(ERROR) << "ANeuralNetworksExecution_setInputFromMemory bad index " << index << " "
133                   << count;
134        return ANEURALNETWORKS_BAD_DATA;
135    }
136    if (!memory->validateSize(offset, length)) {
137        return ANEURALNETWORKS_BAD_DATA;
138    }
139    // TODO validate the rest
140    uint32_t poolIndex = mMemories.add(memory);
141    return mInputs[index].setFromMemory(mModel->getInputOperand(index), type, poolIndex, offset,
142                                        length);
143}
144
145int ExecutionBuilder::setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer,
146                                size_t length) {
147    uint32_t count = static_cast<uint32_t>(mOutputs.size());
148    if (index >= count) {
149        LOG(ERROR) << "ANeuralNetworksExecution_setOutput bad index " << index << " " << count;
150        return ANEURALNETWORKS_BAD_DATA;
151    }
152    if (type != nullptr) {
153        int n = validateOperandType(*type, "ANeuralNetworksExecution_setOutput", false);
154        if (n != ANEURALNETWORKS_NO_ERROR) {
155            return n;
156        }
157    }
158    if (length > 0xFFFFFFFF) {
159        LOG(ERROR) << "ANeuralNetworksExecution_setOutput input exceeds max length " << length;
160        return ANEURALNETWORKS_BAD_DATA;
161    }
162    uint32_t l = static_cast<uint32_t>(length);
163    return mOutputs[index].setFromPointer(mModel->getOutputOperand(index), type, buffer, l);
164}
165
166int ExecutionBuilder::setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
167                                          const Memory* memory, size_t offset, size_t length) {
168    // Should be similar to StepExecutor::setInputOrOutputFromTemporaryMemory()
169
170    uint32_t count = static_cast<uint32_t>(mOutputs.size());
171    if (index >= count) {
172        LOG(ERROR) << "ANeuralNetworksExecution_setOutputFromMemory bad index " << index << " "
173                   << count;
174        return ANEURALNETWORKS_BAD_DATA;
175    }
176    if (!memory->validateSize(offset, length)) {
177        return ANEURALNETWORKS_BAD_DATA;
178    }
179    // TODO validate the rest
180    uint32_t poolIndex = mMemories.add(memory);
181    return mOutputs[index].setFromMemory(mModel->getOutputOperand(index), type, poolIndex, offset,
182                                         length);
183}
184
185static void asyncStartComputePartitioned(const ExecutionPlan* plan,
186                                         std::shared_ptr<ExecutionPlan::Controller> controller,
187                                         const sp<IExecutionCallback>& executionCallback) {
188    LOG(DEBUG) << "ExecutionBuilder::startCompute (from plan, iteratively)";
189    while (true) {
190        std::shared_ptr<StepExecutor> executor;
191        LOG(DEBUG) << "looking for next StepExecutor";
192        int n = plan->next(controller, &executor);
193        if (n != ANEURALNETWORKS_NO_ERROR || executor == nullptr) {
194            executionCallback->notify(
195                n == ANEURALNETWORKS_NO_ERROR ? ErrorStatus::NONE : ErrorStatus::GENERAL_FAILURE);
196            return;
197        }
198
199        sp<ExecutionCallback> stepCallback;
200        n = executor->startCompute(&stepCallback);
201        if (n != ANEURALNETWORKS_NO_ERROR) {
202            executionCallback->notify(ErrorStatus::GENERAL_FAILURE);
203            return;
204        }
205        stepCallback->wait();
206        ErrorStatus status = stepCallback->getStatus();
207        if (status != ErrorStatus::NONE) {
208            executionCallback->notify(status);
209            return;
210        }
211    }
212}
213
214int ExecutionBuilder::startCompute(sp<ExecutionCallback>* synchronizationCallback) {
215    *synchronizationCallback = nullptr;
216
217    // TODO validate that we have full types for all inputs and outputs,
218    // that the graph is not cyclic,
219
220    for (auto& p : mInputs) {
221        if (p.state == ModelArgumentInfo::UNSPECIFIED) {
222            LOG(ERROR) << "ANeuralNetworksExecution_startCompute not all inputs specified";
223            return ANEURALNETWORKS_BAD_DATA;
224        }
225    }
226    for (auto& p : mOutputs) {
227        if (p.state == ModelArgumentInfo::UNSPECIFIED) {
228            LOG(ERROR) << "ANeuralNetworksExecution_startCompute not all outputs specified";
229            return ANEURALNETWORKS_BAD_DATA;
230        }
231    }
232
233    // TODO: Remove the non-plan-based path once we've fully integrated ExecutionPlan
234    // with the compilation and execution phases of the NN API?  Or retain that path
235    // as a fallback in the case of partitioning failure?
236    //
237    // TODO: Entire plan-based-path should run in an asynchronous thread --
238    // take the asynchronous thread logic out of startComputeOnCpu() and use
239    // it to wrap the plan-based-path.
240    const uint32_t partitioning = DeviceManager::get()->getPartitioning();
241    if (partitioning > 0) {
242        std::shared_ptr<ExecutionPlan::Controller> controller = mPlan->makeController(this);
243        if (controller == nullptr) {
244            if (!DeviceManager::partitioningAllowsFallback(partitioning)) {
245                return ANEURALNETWORKS_OP_FAILED;
246            }
247        } else {
248            // TODO: use a thread pool
249
250            // Prepare the callback for asynchronous execution.
251            // sp<ExecutionCallback> object is returned when the
252            // execution has been successfully launched, otherwise a
253            // nullptr is returned.  The executionCallback is
254            // abstracted in the NN API as an "event".
255            sp<ExecutionCallback> executionCallback = new ExecutionCallback();
256            std::thread thread(asyncStartComputePartitioned, mPlan, controller, executionCallback);
257            executionCallback->bind_thread(std::move(thread));
258            *synchronizationCallback = executionCallback;
259            return ANEURALNETWORKS_NO_ERROR;
260        }
261    }
262
263    // Find a driver that can handle all the operations.
264    Model hidlModel;
265    mModel->setHidlModel(&hidlModel);
266    const std::vector<std::shared_ptr<Device>>& devices = DeviceManager::get()->getDrivers();
267    for (const auto& device : devices) {
268        hidl_vec<bool> supports;
269        LOG(DEBUG) << "Checking " << device->getName();
270        device->getSupportedOperations(hidlModel, &supports);
271        if (std::find(supports.begin(), supports.end(), false) == supports.end()) {
272            LOG(DEBUG) << "ExecutionBuilder::startCompute (without plan) on " << device->getName();
273            StepExecutor executor(this, mModel, device->getInterface(),
274                                  nullptr /* no IPreparedModel, so compile */);
275            executor.mapInputsAndOutputsTrivially();
276            return executor.startCompute(synchronizationCallback);
277        }
278    }
279    // If none can, run on the CPU.
280    LOG(DEBUG) << "ExecutionBuilder::startCompute (without plan) on CPU";
281    StepExecutor executor(this, mModel,
282                          nullptr /* no IDevice, so CPU */,
283                          nullptr /* no IPreparedModel */);
284    executor.mapInputsAndOutputsTrivially();
285    return executor.startCompute(synchronizationCallback);
286}
287
288// Figures out how to place each of the input or outputs in a buffer. This just does the layout,
289// it does not copy data.  Aligns each input a bit.
290int StepExecutor::allocatePointerArgumentsToPool(std::vector<ModelArgumentInfo>* args,
291                                                 Memory* memory) {
292    uint32_t nextPoolIndex = mMemories.size();
293    int64_t total = 0;
294    for (auto& info : *args) {
295        if (info.state == ModelArgumentInfo::POINTER) {
296            DataLocation& loc = info.locationAndLength;
297            // TODO Good enough alignment?
298            total += alignBytesNeeded(static_cast<uint32_t>(total), loc.length);
299            loc.poolIndex = nextPoolIndex;
300            loc.offset = static_cast<uint32_t>(total);
301            total += loc.length;
302        }
303    };
304    if (total > 0xFFFFFFFF) {
305        LOG(ERROR) << "ANeuralNetworksExecution_startCompute Size of all inputs or outputs exceeds "
306                      "2^32.";
307        return ANEURALNETWORKS_BAD_DATA;
308    }
309    hidl_memory hidlMemory;
310    if (total > 0) {
311        memory->create(total);  // TODO check error
312        mMemories.add(memory);
313    }
314    return ANEURALNETWORKS_NO_ERROR;
315}
316
317static void setRequestArgumentArray(const std::vector<ModelArgumentInfo>& argumentInfos,
318                                     hidl_vec<RequestArgument>* ioInfos) {
319    size_t count = argumentInfos.size();
320    ioInfos->resize(count);
321    for (size_t i = 0; i < count; i++) {
322        const auto& info = argumentInfos[i];
323        (*ioInfos)[i] = { .hasNoValue = info.state == ModelArgumentInfo::HAS_NO_VALUE,
324                          .location = info.locationAndLength,
325                          .dimensions = info.dimensions,
326                        };
327    }
328}
329
330StepExecutor::StepExecutor(const ExecutionBuilder* executionBuilder,
331                           const ModelBuilder* model,
332                           sp<IDevice> driver, sp<IPreparedModel> preparedModel) :
333    mExecutionBuilder(executionBuilder), mModel(model),
334    mDriver(driver), mPreparedModel(preparedModel),
335    mInputs(model->inputCount()), mOutputs(model->outputCount()) {}
336
337void StepExecutor::mapInputsAndOutputsTrivially() {
338    mInputs = mExecutionBuilder->mInputs;
339    mOutputs = mExecutionBuilder->mOutputs;
340    mMemories = mExecutionBuilder->mMemories;
341}
342
343void StepExecutor::mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput,
344                                    ModelArgumentInfo* executorInputOrOutput) {
345    *executorInputOrOutput = builderInputOrOutput;
346    switch (executorInputOrOutput->state) {
347        default:
348            nnAssert(!"unexpected ModelArgumentInfo::state");
349        case ModelArgumentInfo::POINTER:
350        case ModelArgumentInfo::UNSPECIFIED:
351            break;
352        case ModelArgumentInfo::MEMORY: {
353            const uint32_t builderPoolIndex =
354                    builderInputOrOutput.locationAndLength.poolIndex;
355            const Memory* memory = mExecutionBuilder->mMemories[builderPoolIndex];
356            const uint32_t executorPoolIndex = mMemories.add(memory);
357            executorInputOrOutput->locationAndLength.poolIndex =
358                    executorPoolIndex;
359            break;
360        }
361    }
362}
363
364int StepExecutor::setInputOrOutputFromTemporaryMemory(const Operand& inputOrOutputOperand,
365                                                      const Memory* memory, uint32_t offset,
366                                                      ModelArgumentInfo* inputOrOutputInfo) {
367    // Should be similar to
368    //     ExecutionBuilder::setInputFromMemory()
369    //     ExecutionBuilder::setOutputFromMemory()
370
371    uint32_t poolIndex = mMemories.add(memory);
372    return inputOrOutputInfo->setFromTemporaryMemory(inputOrOutputOperand, poolIndex, offset);
373}
374
375int StepExecutor::startCompute(sp<ExecutionCallback>* synchronizationCallback) {
376    if (mDriver == nullptr) {
377        return startComputeOnCpu(synchronizationCallback);
378    } else {
379        return startComputeOnDevice(synchronizationCallback);
380    }
381}
382
383int StepExecutor::startComputeOnDevice(sp<ExecutionCallback>* synchronizationCallback) {
384    nnAssert(mDriver != nullptr);
385
386    *synchronizationCallback = nullptr;
387
388    // TODO: Remove the mPreparedModel == nullptr case once we've fully integrated
389    // ExecutionPlan with the compilation and execution phases of the NN API
390    if (mPreparedModel == nullptr) {
391        Model model;
392        mModel->setHidlModel(&model);
393
394        // TODO Dangerous!  In async, the model will outlive it here. Safe for now
395        sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
396        Return<ErrorStatus> prepareLaunchStatus =
397                mDriver->prepareModel(model, preparedModelCallback);
398        if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) {
399            return ANEURALNETWORKS_OP_FAILED;
400        }
401
402        // Immediately synchronize with callback object for now
403        // TODO: change to asynchronous later
404        preparedModelCallback->wait();
405        ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
406        mPreparedModel = preparedModelCallback->getPreparedModel();
407        if (prepareReturnStatus != ErrorStatus::NONE || mPreparedModel == nullptr) {
408            return ANEURALNETWORKS_OP_FAILED;
409        }
410    }
411
412    // We separate the input & output pools so that we reduce the copying done if we
413    // do an eventual remoting (hidl_memory->update()).  We could also use it to set
414    // protection on read only memory but that's not currently done.
415    Memory inputPointerArguments;
416    Memory outputPointerArguments;
417
418    // Layout the input and output data
419    int n = allocatePointerArgumentsToPool(&mInputs, &inputPointerArguments);
420    if (n != ANEURALNETWORKS_NO_ERROR) {
421        return n;
422    }
423    n = allocatePointerArgumentsToPool(&mOutputs, &outputPointerArguments);
424    if (n != ANEURALNETWORKS_NO_ERROR) {
425        return n;
426    }
427
428    // Copy the input data that was specified via a pointer.
429    // inputPointerArguments.update();
430    for (auto& info : mInputs) {
431        if (info.state == ModelArgumentInfo::POINTER) {
432            DataLocation& loc = info.locationAndLength;
433            uint8_t* data = nullptr;
434            int n = inputPointerArguments.getPointer(&data);
435            if (n != ANEURALNETWORKS_NO_ERROR) {
436                return n;
437            }
438            memcpy(data + loc.offset, info.buffer, loc.length);
439        }
440    }
441    // TODO: Add inputPointerArguments.commit() and .update() at all the right places
442
443    Request request;
444    setRequestArgumentArray(mInputs, &request.inputs);
445    setRequestArgumentArray(mOutputs, &request.outputs);
446    uint32_t count = mMemories.size();
447    request.pools.resize(count);
448    for (uint32_t i = 0; i < count; i++) {
449        request.pools[i] = mMemories[i]->getHidlMemory();
450    }
451
452    // Prepare the callback for asynchronous execution. sp<ExecutionCallback>
453    // object is returned when the execution has been successfully launched,
454    // otherwise a nullptr is returned. The executionCallback is abstracted in
455    // the NN API as an "event".
456    //
457    // The sp is used for ref-counting purposes. Without it, the HIDL service
458    // could attempt to communicate with a dead callback object.
459    //
460    // TODO: Explain the "dead callback" problem further, either here or
461    // in the design document.
462    sp<ExecutionCallback> executionCallback = new ExecutionCallback();
463
464    LOG(DEBUG) << "Before mPreparedModel->execute() " << toString(request);
465    // Execute.
466    // TODO: What happens to the Callback if the service dies abnormally
467    // -- won't that keep the Callback live forever, because the service
468    // never has the opportunity to bump the reference count down? Or
469    // maybe the HIDL infrastructure handles this magically? At worst,
470    // it seems like this is a small memory leak, if the Callback stays
471    // alive forever.
472    if (mPreparedModel->execute(request, executionCallback) != ErrorStatus::NONE) {
473        LOG(DEBUG) << "**Execute failed**";
474        return ANEURALNETWORKS_OP_FAILED;
475    }
476
477    // TODO: Remove this synchronization point when the block of code below is
478    // removed.
479    executionCallback->wait();
480    Return<ErrorStatus> executionStatus = executionCallback->getStatus();
481    if (!executionStatus.isOk() || executionStatus != ErrorStatus::NONE) {
482        LOG(DEBUG) << "**Execute async failed**";
483        return ANEURALNETWORKS_OP_FAILED;
484    }
485
486    // Copy the output data from shared memory to the output buffers.
487    // TODO: Move this block of code somewhere else. It should not be in the
488    // startCompute function.
489    // TODO: outputMemory->update(); outputMemory->commit()
490    for (auto& info : mOutputs) {
491        if (info.state == ModelArgumentInfo::POINTER) {
492            DataLocation& loc = info.locationAndLength;
493            uint8_t* data = nullptr;
494            int n = outputPointerArguments.getPointer(&data);
495            if (n != ANEURALNETWORKS_NO_ERROR) {
496                return n;
497            }
498            memcpy(info.buffer, data + loc.offset, loc.length);
499        }
500    }
501    LOG(DEBUG) << "StepExecutor::startComputeOnDevice completed";
502
503    *synchronizationCallback = executionCallback;
504    return ANEURALNETWORKS_NO_ERROR;
505}
506
507static void asyncStartComputeOnCpu(const Model& model, const Request& request,
508                                   const std::vector<RunTimePoolInfo>& runTimePoolInfos,
509                                   const sp<IExecutionCallback>& executionCallback) {
510    CpuExecutor executor;
511    int err = executor.run(model, request, runTimePoolInfos);
512    ErrorStatus status = err == ANEURALNETWORKS_NO_ERROR ?
513            ErrorStatus::NONE : ErrorStatus::GENERAL_FAILURE;
514    executionCallback->notify(status);
515}
516
517int StepExecutor::startComputeOnCpu(sp<ExecutionCallback>* synchronizationCallback) {
518    // TODO: use a thread pool
519
520    Model model;
521    mModel->setHidlModel(&model);
522
523    // Prepare the callback for asynchronous execution. sp<ExecutionCallback>
524    // object is returned when the execution has been successfully launched,
525    // otherwise a nullptr is returned. The executionCallback is abstracted in
526    // the NN API as an "event".
527    sp<ExecutionCallback> executionCallback = new ExecutionCallback();
528    *synchronizationCallback = nullptr;
529
530    std::vector<RunTimePoolInfo> runTimePoolInfos;
531    uint32_t count = mMemories.size();
532    runTimePoolInfos.resize(count);
533    for (uint32_t i = 0; i < count; i++) {
534        const Memory* mem = mMemories[i];
535        runTimePoolInfos[i].set(mem->getHidlMemory());
536    }
537    // Create as many pools as there are input / output.
538    auto fixPointerArguments = [&runTimePoolInfos](std::vector<ModelArgumentInfo>& argumentInfos) {
539        for (ModelArgumentInfo& argumentInfo : argumentInfos) {
540            if (argumentInfo.state == ModelArgumentInfo::POINTER) {
541                RunTimePoolInfo runTimeInfo = {
542                            .buffer = static_cast<uint8_t*>(argumentInfo.buffer)};
543                argumentInfo.locationAndLength.poolIndex =
544                            static_cast<uint32_t>(runTimePoolInfos.size());
545                argumentInfo.locationAndLength.offset = 0;
546                runTimePoolInfos.push_back(runTimeInfo);
547            }
548        }
549    };
550    fixPointerArguments(mInputs);
551    fixPointerArguments(mOutputs);
552
553    Request request;
554    setRequestArgumentArray(mInputs, &request.inputs);
555    setRequestArgumentArray(mOutputs, &request.outputs);
556
557    // TODO: should model be moved with a std::cref?
558    std::thread thread(asyncStartComputeOnCpu, model, std::move(request),
559                       std::move(runTimePoolInfos), executionCallback);
560    executionCallback->bind_thread(std::move(thread));
561
562    *synchronizationCallback = executionCallback;
563    return ANEURALNETWORKS_NO_ERROR;
564}
565
566}  // namespace nn
567}  // namespace android
568