Searched refs:activation (Results 1 - 25 of 26) sorted by path

12

/frameworks/ml/nn/common/
H A DCpuExecutor.cpp297 int32_t activation = getScalarData<int32_t>(mOperands[ins[2]]); local
309 activation,
319 activation,
330 int32_t activation = getScalarData<int32_t>(mOperands[ins[2]]); local
342 activation,
352 activation,
404 int32_t activation; local
414 activation = getScalarData<int32_t>(mOperands[ins[10]]);
420 activation = getScalarData<int32_t>(mOperands[ins[7]]);
455 depth_multiplier, activation,
493 int32_t activation; local
572 int32_t activation; local
652 int32_t activation; local
716 int32_t activation; local
938 int32_t activation = getScalarData<int32_t>(mOperands[ins[3]]); local
[all...]
H A DOperationsUtils.cpp128 void CalculateActivationRangeUint8(int32_t activation,
142 if (activation == kActivationRelu) {
145 } else if (activation == kActivationRelu6) {
148 } else if (activation == kActivationRelu1) {
/frameworks/ml/nn/common/include/
H A DOperations.h39 int32_t activation,
43 int32_t activation,
48 int32_t activation,
52 int32_t activation,
69 int32_t depth_multiplier, int32_t activation,
77 int32_t depth_multiplier, int32_t activation,
86 int32_t activation,
94 int32_t activation,
101 int32_t filter_width, int32_t filter_height, int32_t activation,
107 int32_t filter_width, int32_t filter_height, int32_t activation,
[all...]
H A DOperationsUtils.h92 void CalculateActivationRangeUint8(int32_t activation,
211 switch (activation) { \
225 LOG(ERROR) << "Unsupported fused activation function type"; \
/frameworks/ml/nn/common/operations/
H A DActivation.cpp93 #define ANDROID_NN_RELUX_QUANT8(activation) \
98 CalculateActivationRangeUint8(activation, inputShape, \
H A DConv2D.cpp69 int32_t activation,
74 #define ANDROID_NN_CONV(activation) \
75 optimized_ops::Conv<FusedActivationFunctionType::activation>( \
98 int32_t activation,
119 CalculateActivationRangeUint8(activation, outputShape,
127 #define ANDROID_NN_CONV(activation) \
128 optimized_ops::Conv<FusedActivationFunctionType::activation>( \
63 convFloat32(const float* inputData, const Shape& inputShape, const float* filterData, const Shape& filterShape, const float* biasData, const Shape& biasShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t activation, float* outputData, const Shape& outputShape) argument
92 convQuant8(const uint8_t* inputData, const Shape& inputShape, const uint8_t* filterData, const Shape& filterShape, const int32_t* biasData, const Shape& biasShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t activation, uint8_t* outputData, const Shape& outputShape) argument
H A DDepthwiseConv2D.cpp43 int32_t depth_multiplier, int32_t activation,
48 #define ANDROID_NN_DEPTHWISE_CONV(activation) \
49 optimized_ops::DepthwiseConv<FusedActivationFunctionType::activation>( \
70 int32_t depth_multiplier, int32_t activation,
88 CalculateActivationRangeUint8(activation, outputShape,
95 #define ANDROID_NN_DEPTHWISE_CONV(activation) \
96 optimized_ops::DepthwiseConv<FusedActivationFunctionType::activation>( \
37 depthwiseConvFloat32(const float* inputData, const Shape& inputShape, const float* filterData, const Shape& filterShape, const float* biasData, const Shape& biasShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t depth_multiplier, int32_t activation, float* outputData, const Shape& outputShape) argument
64 depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape, const uint8_t* filterData, const Shape& filterShape, const int32_t* biasData, const Shape& biasShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t depth_multiplier, int32_t activation, uint8_t* outputData, const Shape& outputShape) argument
H A DFullyConnected.cpp28 int32_t activation,
31 #define ANDROID_NN_FULLY_CONNECTED(activation) \
32 optimized_ops::FullyConnected<FusedActivationFunctionType::activation>( \
46 int32_t activation,
64 CalculateActivationRangeUint8(activation, outputShape,
72 #define ANDROID_NN_FULLY_CONNECTED(activation) \
73 optimized_ops::FullyConnected<FusedActivationFunctionType::activation>( \
25 fullyConnectedFloat32(const float* inputData, const Shape& inputShape, const float* weightsData, const Shape& weightsShape, const float* biasData, const Shape& biasShape, int32_t activation, float* outputData, const Shape& outputShape) argument
43 fullyConnectedQuant8(const uint8_t* inputData, const Shape& inputShape, const uint8_t* weightsData, const Shape& weightsShape, const int32_t* biasData, const Shape& biasShape, int32_t activation, uint8_t* outputData, const Shape& outputShape) argument
H A DPooling.cpp38 int32_t filter_width, int32_t filter_height, int32_t activation,
43 #define ANDROID_NN_AVERAGE_POOL(activation) \
44 optimized_ops::AveragePool<FusedActivationFunctionType::activation>( \
60 int32_t filter_width, int32_t filter_height, int32_t activation,
68 CalculateActivationRangeUint8(activation, outputShape,
72 #define ANDROID_NN_AVERAGE_POOL(activation) \
73 optimized_ops::AveragePool<FusedActivationFunctionType::activation>( \
90 int32_t filter_width, int32_t filter_height, int32_t activation,
95 #define ANDROID_NN_L2_POOL(activation) \
96 optimized_ops::L2Pool<FusedActivationFunctionType::activation>( \
34 averagePoolFloat32(const float* inputData, const Shape& inputShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t filter_width, int32_t filter_height, int32_t activation, float* outputData, const Shape& outputShape) argument
56 averagePoolQuant8(const uint8_t* inputData, const Shape& inputShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t filter_width, int32_t filter_height, int32_t activation, uint8_t* outputData, const Shape& outputShape) argument
86 l2PoolFloat32(const float* inputData, const Shape& inputShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t filter_width, int32_t filter_height, int32_t activation, float* outputData, const Shape& outputShape) argument
108 maxPoolFloat32(const float* inputData, const Shape& inputShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t filter_width, int32_t filter_height, int32_t activation, float* outputData, const Shape& outputShape) argument
130 maxPoolQuant8(const uint8_t* inputData, const Shape& inputShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t filter_width, int32_t filter_height, int32_t activation, uint8_t* outputData, const Shape& outputShape) argument
[all...]
H A DSVDF.cpp128 float activation = 0.0; local
132 activation += input_ptr_batch[j] * weights_feature_ptr[j];
143 output_ptr_batch[c] += weights_time_ptr[memory_size - 1] * activation;
148 // Apply activation.
152 // Right shift the state and concatenate with activation.
153 svdf_right_shift_state(state_in_ptr, memory_size - 1, activation,
H A DSVDFTest.cpp221 int activation = ActivationFn::kActivationNone; local
222 ASSERT_EQ(execution.setInput(SVDF::kActivationParam, &activation,
223 sizeof(activation)),
H A DSimpleMath.cpp30 int32_t activation,
34 #define ANDROID_NN_NORMAL_ADD(activation) \
35 optimized_ops::Add<FusedActivationFunctionType::activation>( \
40 #define ANDROID_NN_BROADCAST_ADD(activation) \
41 optimized_ops::BroadcastAdd<FusedActivationFunctionType::activation>( \
59 int32_t activation,
94 CalculateActivationRangeUint8(activation, shapeOut,
98 #define ANDROID_NN_NORMAL_ADD(activation) \
99 optimized_ops::Add<FusedActivationFunctionType::activation>( \
109 #define ANDROID_NN_BROADCAST_ADD(activation) \
28 addFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2, int32_t activation, float* out, const Shape& shapeOut) argument
57 addQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2, int32_t activation, uint8_t* out, const Shape& shapeOut) argument
131 mulFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2, int32_t activation, float* out, const Shape& shapeOut) argument
160 mulQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2, int32_t activation, uint8_t* out, const Shape& shapeOut) argument
[all...]
/frameworks/ml/nn/common/operations/internal/optimized/
H A Dneon_tensor_utils.h78 ActivationFn activation, float* result) {
79 PortableApplyActivationToVector(vector, v_size, activation, result);
77 ApplyActivationToVector(const float* vector, int v_size, ActivationFn activation, float* result) argument
H A Dtensor_utils_impl.h101 // Apply activation function to elements of a vector.
103 ActivationFn activation,
/frameworks/ml/nn/common/operations/internal/reference/
H A Dportable_tensor_utils.cc117 ActivationFn activation,
119 auto activation_func = ActivationFunctor(activation);
116 PortableApplyActivationToVector(const float* vector, int v_size, ActivationFn activation, float* result) argument
H A Dportable_tensor_utils.h75 // Apply activation function to elements of a vector.
77 ActivationFn activation,
158 ActivationFn activation, float* result) {
159 PortableApplyActivationToVector(vector, v_size, activation, result);
157 ApplyActivationToVector(const float* vector, int v_size, ActivationFn activation, float* result) argument
/frameworks/ml/nn/common/operations/internal/
H A Dtensor_utils.h88 // Apply activation function to elements of a vector.
90 ActivationFn activation, float* result);
/frameworks/ml/nn/runtime/test/
H A DTestMemory.cpp89 int32_t activation(0);
99 model.setOperandValue(f, &activation, sizeof(activation));
161 int32_t activation(0);
171 model.setOperandValue(f, &activation, sizeof(activation));
H A DTestTrivialModel.cpp63 int32_t activation(ANEURALNETWORKS_FUSED_NONE);
68 model->setOperandValue(d, &activation, sizeof(activation));
80 int32_t activation(ANEURALNETWORKS_FUSED_NONE);
88 model->setOperandValue(f, &activation, sizeof(activation));
159 // activation: NONE.
162 auto activation = modelBroadcastAdd2.addOperand(&scalarType); local
163 modelBroadcastAdd2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
171 modelBroadcastAdd2.addOperation(ANEURALNETWORKS_ADD, {a, b, activation}, {
194 auto activation = modelBroadcastMul2.addOperand(&scalarType); local
[all...]
/frameworks/ml/nn/runtime/test/generated/models/
H A Davg_pool_float_2.model.cpp11 auto activation = model->addOperand(&type1); local
21 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
22 model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, activation}, {output});
H A Davg_pool_float_3.model.cpp11 auto activation = model->addOperand(&type1); local
21 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
22 model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, activation}, {output});
H A Davg_pool_quant8_2.model.cpp11 auto activation = model->addOperand(&type1); local
21 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
22 model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, activation}, {output});
H A Davg_pool_quant8_3.model.cpp11 auto activation = model->addOperand(&type1); local
21 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
22 model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, activation}, {output});
H A Dmax_pool_float_2.model.cpp11 auto activation = model->addOperand(&type1); local
21 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
22 model->addOperation(ANEURALNETWORKS_MAX_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, activation}, {output});
H A Dmax_pool_quant8_2.model.cpp11 auto activation = model->addOperand(&type1); local
21 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
22 model->addOperation(ANEURALNETWORKS_MAX_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, activation}, {output});

Completed in 183 milliseconds

12