Searched defs:num_batches (Results 1 - 17 of 17) sorted by relevance

/external/tensorflow/tensorflow/core/kernels/
H A Dparameterized_truncated_normal_op_test.cc27 static Graph* PTruncatedNormal(int num_batches, int samples_per_batch) { argument
30 shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
33 Tensor means_t(DT_FLOAT, TensorShape({num_batches}));
35 Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches}));
38 Tensor minvals_t(DT_FLOAT, TensorShape({num_batches}));
40 Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches}));
56 static Graph* PTruncatedNormal2SD(int num_batches, int samples_per_batch) { argument
59 shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
61 Tensor means_t(DT_FLOAT, TensorShape({num_batches}));
63 Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches}));
83 PTruncatedNormalOneTail(int num_batches, int samples_per_batch) argument
[all...]
H A Drange_sampler_test.cc265 // We sample num_batches batches, each without replacement.
273 const int num_batches = 100; local
290 for (int trial = 0; trial < num_batches; trial++) {
305 const float average_count = static_cast<float>(histogram[i]) / num_batches;
H A Deigen_pooling_test.cc32 const int num_batches = 13; local
38 Tensor<float, 4> input(depth, input_rows, input_cols, num_batches);
39 Tensor<float, 4> result(depth, output_rows, output_cols, num_batches);
52 EXPECT_EQ(result.dimension(3), num_batches);
54 for (int b = 0; b < num_batches; ++b) {
80 const int num_batches = 13; local
86 Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows, depth);
87 Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
101 EXPECT_EQ(result.dimension(0), num_batches);
103 for (int b = 0; b < num_batches;
130 const int num_batches = 13; local
190 const int num_batches = 13; local
250 const int num_batches = 13; local
312 const int num_batches = 13; local
374 const int num_batches = 13; local
452 const int num_batches = 13; local
529 const int num_batches = 13; local
577 const int num_batches = 13; local
627 const int num_batches = 13; local
687 const int num_batches = 13; local
[all...]
H A Deigen_backward_spatial_convolutions_test.cc500 const int num_batches = 13; local
511 num_batches);
514 num_batches);
526 EXPECT_EQ(input_backward.dimension(3), num_batches);
528 for (int b = 0; b < num_batches; ++b) {
555 const int num_batches = 13; local
565 Tensor<float, 4, RowMajor> input_backward(num_batches, input_cols, input_rows,
569 Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
579 EXPECT_EQ(input_backward.dimension(0), num_batches);
584 for (int b = 0; b < num_batches;
611 const int num_batches = 13; local
678 const int num_batches = 13; local
743 test_batched_strided_spatial_convolution_backward_input_valid( const int num_batches, const int input_depth, const int input_rows, const int input_cols, const int output_depth) argument
801 int num_batches = 1; local
819 test_batched_strided_spatial_convolution_backward_input_valid_row_major( const int num_batches, const int input_depth, const int input_rows, const int input_cols, const int output_depth) argument
878 int num_batches = 1; local
999 const int num_batches = 11; local
1046 const int num_batches = 11; local
1096 const int num_batches = 11; local
1144 const int num_batches = 11; local
1191 const int num_batches = 11; local
1366 const int num_batches = 13; local
1421 const int num_batches = 13; local
1477 const int num_batches = 13; local
1540 const int num_batches = 13; local
1607 const int num_batches = 13; local
1674 const int num_batches = 13; local
1734 const int num_batches = 13; local
1795 const int num_batches = 13; local
1874 const int num_batches = 13; local
1953 const int num_batches = 13; local
2030 const int num_batches = 13; local
[all...]
H A Deigen_spatial_convolutions_test.cc195 const int num_batches = 13; local
202 Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
204 Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
217 EXPECT_EQ(result.dimension(3), num_batches);
219 for (int b = 0; b < num_batches; ++b) {
247 const int num_batches = 13; local
257 Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
259 Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
272 EXPECT_EQ(result.dimension(3), num_batches);
275 for (int b = 0; b < num_batches;
305 const int num_batches = 13; local
361 const int num_batches = 13; local
409 const int num_batches = 5; local
457 const int num_batches = 13; local
508 const int num_batches = 13; local
559 const int num_batches = 13; local
613 const int num_batches = 13; local
[all...]
H A Dparameterized_truncated_normal_op.cc52 void operator()(OpKernelContext* ctx, const CPUDevice& d, int64 num_batches, argument
85 // with length num_batches, but the scalar becomes an array of length 1.
91 // The last batch can be short, if we adjusted num_batches and
232 Shard(worker_threads.num_threads, worker_threads.workers, num_batches,
264 int32 num_batches = shape_tensor.flat<int32>()(0); variable
271 const int32 num_elements = num_batches * samples_per_batch;
273 // Allocate the output before fudging num_batches and samples_per_batch.
305 int32 size = num_batches * samples_per_batch;
309 num_batches = adjusted_batches;
312 // Parameters must be broadcastable to the shape [num_batches]
[all...]
H A Dparameterized_truncated_normal_op_gpu.cu.cc51 TruncatedNormalKernel(random::PhiloxRandom gen, T* data, int64 num_batches, argument
195 void operator()(OpKernelContext* ctx, const GPUDevice& d, int64 num_batches,
207 gen, output.data(), num_batches, samples_per_batch, num_elements,
/external/libxkbcommon/xkbcommon/src/x11/
H A Dutil.c162 const size_t num_batches = ROUNDUP(count, SIZE) / SIZE; local
165 for (size_t batch = 0; batch < num_batches; batch++) {
/external/tensorflow/tensorflow/contrib/eager/python/examples/spinn/
H A Ddata.py316 def num_batches(self, batch_size): member in class:SnliData
/external/tensorflow/tensorflow/contrib/lite/kernels/
H A Dbasic_rnn_test.cc174 int num_batches() { return batches_; } function in class:tflite::__anon25923::RNNOpModel
239 (rnn.input_size() * rnn.num_batches());
H A Dfully_connected_test.cc157 int num_batches() { return batches_; } function in class:tflite::__anon25933::BaseFullyConnectedOpModel
348 (m.input_size() * m.num_batches());
H A Dsvdf_test.cc183 int num_batches() { return batches_; } function in class:tflite::__anon25959::SVDFOpModel
221 const int svdf_num_batches = svdf.num_batches();
281 const int svdf_num_batches = svdf.num_batches();
H A Dunidirectional_sequence_rnn_test.cc187 int num_batches() { return batches_; } function in class:tflite::__anon25965::UnidirectionalRNNOpModel
H A Dbidirectional_sequence_rnn_test.cc741 int num_batches() { return batches_; } function in class:tflite::__anon25925::BidirectionalRNNOpModel
H A Doptional_tensor_test.cc209 int num_batches() { return n_batch_; } function in class:tflite::__anon25946::LSTMOpModel
H A Dlstm_test.cc207 int num_batches() { return n_batch_; } function in class:tflite::__anon25943::LSTMOpModel
H A Dunidirectional_sequence_lstm_test.cc211 int num_batches() { return n_batch_; } function in class:tflite::__anon25964::UnidirectionalLSTMOpModel

Completed in 354 milliseconds