Searched defs:input_max (Results 1 - 19 of 19) sorted by relevance

/external/tensorflow/tensorflow/core/kernels/
H A Dquantization_utils.cc20 void GetOutputMinAndMaxForQuantizedAdd(float input_min, float input_max, argument
36 std::max(input_max, std::max(-input_min, std::max(smaller_input_max,
H A Dquantized_activation_ops_test.cc46 const float input_max = 127.0f; local
52 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
59 AddInputFromArray<float>(TensorShape({1}), {input_max});
77 const float input_max = 127.0f; local
83 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
90 AddInputFromArray<float>(TensorShape({1}), {input_max});
H A Dquantize_and_dequantize_op.h44 auto input_max = input_max_tensor->scalar<T>(); local
47 input_max.device(d) = input.maximum();
50 d.memcpyDeviceToHost(&max_range, input_max.data(), sizeof(T));
H A Dquantized_bias_add_op.cc42 const float input_max = context->input(3).flat<float>()(0); variable
71 GetOutputMinAndMaxForQuantizedAdd(input_min, input_max, bias_min,
75 bias_ui8_array.size(), input_min, input_max,
81 input_max, bias, bias_min, bias_max, output, &total_min, &total_max);
H A Dquantized_bias_add_op_test.cc52 const float input_max = 60.0f; local
59 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
78 AddInputFromArray<float>(TensorShape({1}), {input_max});
102 const float input_max = 2006.27f; local
119 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
160 AddInputFromArray<float>(TensorShape({1}), {input_max});
H A Dquantized_pooling_ops_test.cc52 const float input_max = 255.0f; local
62 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
73 AddInputFromArray<float>(TensorShape({1}), {input_max});
97 const float input_max = 255.0f; local
107 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
118 AddInputFromArray<float>(TensorShape({1}), {input_max});
H A Dquantized_batch_norm_op_test.cc62 const float input_max = 127.0f; local
72 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
101 AddInputFromArray<float>(TensorShape({1}), {input_max});
159 const float input_max = 127.0f; local
169 FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
198 AddInputFromArray<float>(TensorShape({1}), {input_max});
H A Dquantized_batch_norm_op.cc32 const float input_max, const Tensor& mean,
57 QuantizedToFloat(input_flat(input_index), input_min, input_max);
95 const float input_max, const Tensor& mean,
151 input_max, *output_min, *output_max);
177 const float input_max = context->input(2).flat<float>()(0); variable
212 FixedPointBatchNorm<T1, T2>(input, input_min, input_max, mean, mean_min,
31 ReferenceBatchNorm(const Tensor& input, const float input_min, const float input_max, const Tensor& mean, float mean_min, float mean_max, const Tensor& var, float var_min, float var_max, const Tensor& beta, float beta_min, float beta_max, const Tensor& gamma, float gamma_min, float gamma_max, float variance_epsilon, bool scale_after_normalization, Tensor* output, float* output_min, float* output_max) argument
94 FixedPointBatchNorm(const Tensor& input, const float input_min, const float input_max, const Tensor& mean, float mean_min, float mean_max, const Tensor& var, float var_min, float var_max, const Tensor& beta, float beta_min, float beta_max, const Tensor& gamma, float gamma_min, float gamma_max, float variance_epsilon, bool scale_after_normalization, Tensor* output, float* output_min, float* output_max) argument
H A Dquantized_concat_op.cc42 const float input_max = (*input_min_and_max)[input_index].second; local
43 if (input_min == output_min && input_max == output_max) {
52 QuantizedToFloatStruct<T> q2f(input_min, input_max);
88 const float input_max = input_maxes[i].flat<float>()(0); local
89 input_mins_and_maxes->emplace_back(input_min, input_max);
91 overall_max = std::max(overall_max, input_max);
H A Dmeta_support.cc257 float input_min, float input_max, float output_min,
272 CalculateRangeScale<int32_t>(input_min, input_max);
348 float input_min, float input_max, float bias_min,
366 CalculateRangeScale<uint8_t>(input_min, input_max);
256 Requantize(OpKernelContext* tf_context, const qint32* input, int count, float input_min, float input_max, float output_min, float output_max, quint8* output) argument
346 QuantizedBiasAdd(OpKernelContext* tf_context, const quint8* input, int input_count, const quint8* bias, int bias_count, float input_min, float input_max, float bias_min, float bias_max, float output_min, float output_max, qint32* output) argument
H A Dquantized_instance_norm.cc278 float input_max = context->input(2).flat<float>()(0); variable
279 float input_scale = (input_max - input_min) / 255.0f;
281 OP_REQUIRES(context, input_min < input_max,
283 "input_min must be less than input_max : ", input_min,
284 " >= ", input_max));
H A Dquantization_utils_test.cc35 float input_max, float output_min, float output_max,
43 QuantizedToFloat(values_quantized[value_index], input_min, input_max),
55 input_max, output_min, output_max,
59 *eigen_device, i_tensor, input_min, input_max, output_min, output_max,
71 << ", input_max=" << input_max << ", output_min=" << output_min
76 void TestRequantizeMany8To32Bit(float input_min, float input_max, argument
85 QuantizedToFloat(values_quantized[value_index], input_min, input_max),
96 input_max, output_min, output_max,
107 << ", input_max
34 TestRequantizeMany(Eigen::ThreadPoolDevice* eigen_device, float input_min, float input_max, float output_min, float output_max, const std::vector<qint32>& values_quantized, int tolerance = 1) argument
231 const float input_max = ranges[range_index][1]; local
283 const float input_max = 100.0f; local
526 const float input_max = ranges[range_index][1]; local
548 const float input_max = 0.641057f; local
583 const float input_max = ranges[range_index][1]; local
626 const float input_max = 255.0f; local
656 const float input_max = 2400.0f; local
679 const float input_max = 127.0f; local
[all...]
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
H A Dquantize_and_dequantize_op.cc46 // m = max(abs(input_min), abs(input_max)) if range_given is true,
50 xla::ComputationDataHandle input_min, input_max; variable
56 input_max = XlaHelpers::FloatLiteral(b, data_type, input_max_value);
62 input_max =
65 xla::ComputationDataHandle m = b->Max(b->Abs(input_min), b->Abs(input_max));
H A Dfake_quantize_ops.cc106 float input_min, input_max; local
108 OP_REQUIRES_OK(ctx, ctx->GetAttr("max", &input_max));
109 CpuNudge(input_min, input_max, quant_min_, quant_max_, &nudged_input_min_,
154 float input_min, input_max, scale; local
156 OP_REQUIRES_OK(ctx, ctx->GetAttr("max", &input_max));
157 CpuNudge(input_min, input_max, quant_min, quant_max, &nudged_input_min_,
210 xla::ComputationDataHandle input_max = ctx->Input(2); variable
214 XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_,
251 xla::ComputationDataHandle input_max = ctx->Input(3); variable
255 XlaNudge(b, data_type, input_min, input_max, quant_min
[all...]
/external/tensorflow/tensorflow/compiler/tf2xla/
H A Dxla_helpers.cc52 xla::ComputationDataHandle input_max = builder->Reduce( local
59 builder->Eq(input, input_max, broadcast_dims), xla_output_type);
/external/tensorflow/tensorflow/compiler/xla/tests/
H A Dreduce_test.cc598 auto input_max = FLT_MIN; local
600 [&](int64, int64, float* v) { input_max = std::max(input_max, *v); });
601 ComputeAndCompareR0<float>(&builder, input_max, {}, ErrorSpec(0.0001));
/external/tensorflow/tensorflow/tools/graph_transforms/
H A Dquantize_nodes.cc309 // If the user has passed in the input_min and input_max args, then we need to
316 float input_max; local
318 TF_RETURN_IF_ERROR(ExtractRangeFromParams(context, "input_min", "input_max",
319 &input_min, &input_max,
354 max_tensor.flat<float>()(0) = input_max;
/external/webrtc/webrtc/modules/audio_coding/neteq/
H A Dmerge.cc66 int16_t expanded_max, input_max; local
69 &expanded_max, &input_max);
92 expanded_max, input_max, old_length,
208 int16_t* expanded_max, int16_t* input_max) const {
213 *input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
228 WebRtcSpl_NormW32(*input_max * *input_max);
310 size_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max, argument
318 if (expanded_max * input_max > 26843546) {
/external/tensorflow/tensorflow/core/graph/
H A Dquantize_training.cc55 float input_max; member in struct:tensorflow::__anon26313::EdgeToConvert
64 input_max(max) {}
80 bool* range_given, float* input_min, float* input_max) {
96 *input_max = 6;
101 *input_max = 1;
106 *input_max = 1;
114 input_max);
124 input_max);
499 Node** input_max) {
501 // Make constant nodes for the input_min and input_max i
79 FindType(const Graph* graph, const Node* node, bool* signed_input, bool* range_given, float* input_min, float* input_max) argument
496 MakeInputMinMax(Graph* graph, const string& name_prefix, const EdgeToConvert& edge, std::vector<Node*>* added_variables, Node** input_min, Node** input_max) argument
535 Node* input_max; local
629 float input_max = 0; local
[all...]

Completed in 1112 milliseconds