Searched refs:num_elements (Results 26 - 50 of 335) sorted by relevance

1234567891011>>

/external/tensorflow/tensorflow/core/framework/
H A Dallocator.h110 T* Allocate(size_t num_elements) { argument
111 return Allocate<T>(num_elements, AllocationAttributes());
115 T* Allocate(size_t num_elements, argument
120 if (num_elements > (std::numeric_limits<size_t>::max() / sizeof(T))) {
124 void* p = AllocateRaw(kAllocatorAlignment, sizeof(T) * num_elements,
127 if (typed_p) RunCtor<T>(typed_p, num_elements);
132 void Deallocate(T* ptr, size_t num_elements) { argument
134 RunDtor<T>(ptr, num_elements);
H A Dtensor_shape.cc60 int64 num_elements = 1;
65 num_elements = -1;
66 } else if (!kIsPartial || num_elements >= 0) {
67 num_elements = MultiplyWithoutOverflow(num_elements, d.size());
68 if (num_elements < 0) return false;
86 int64 num_elements = 1; local
103 num_elements = -1;
104 } else if (!kIsPartial || num_elements >= 0) {
105 num_elements
[all...]
/external/libcxx/test/support/
H A Dunique_ptr_test_helper.h39 newValue(int num_elements) { argument
40 assert(num_elements == 1);
47 newValue(int num_elements) { argument
49 assert(num_elements >= 1);
50 return new VT[num_elements];
/external/tensorflow/tensorflow/compiler/xla/
H A Dsparse_index_array.h128 int64 num_elements = index_count(); local
129 CHECK_EQ(values.size(), num_elements);
131 sort_order.reserve(num_elements);
132 for (int64 i = 0; i < num_elements; ++i) {
143 for (int64 i = 0; i < num_elements; ++i) {
/external/tensorflow/tensorflow/core/util/
H A Dtensor_slice_set.cc57 result_shape.num_elements()};
87 int64 total_size = target_shape.num_elements();
99 overlap_size += inter_shape.num_elements();
144 int64 total_size = target_shape.num_elements();
156 overlap_size += inter_shape.num_elements();
H A Dexample_proto_fast_parsing.cc97 bool GetNumElementsInBytesList(int* num_elements) { argument
104 *num_elements = 0;
110 ++*num_elements;
451 LimitedArraySlice(T* begin, size_t num_elements) argument
452 : current_(begin), end_(begin + num_elements) {}
569 const std::size_t num_elements = config.dense[d].elements_per_stride; local
570 const std::size_t offset = example_index * num_elements;
584 LimitedArraySlice<int64> slice(out_p, num_elements);
587 return shape_error(num_elements - slice.EndDistance(), "int64");
593 LimitedArraySlice<float> slice(out_p, num_elements);
615 const std::size_t num_elements = config.dense[d].elements_per_stride; local
738 const std::size_t num_elements = in.shape().num_elements(); local
823 FillAndCopyVarLen( const int d, const size_t num_elements, const size_t num_elements_per_minibatch, const Config& config, const std::vector<std::vector<SparseBuffer>>& varlen_dense_buffers, Tensor* values) argument
1256 const std::size_t num_elements = config.dense[d].elements_per_stride; local
1294 size_t num_elements; local
[all...]
H A Dtensor_slice_writer.cc179 Status TensorSliceWriter::SaveData(const string* data, int64 num_elements, argument
182 (num_elements * MaxBytesPerElement(DT_INT32));
183 for (int64 i = 0; i < num_elements; ++i) {
191 Fill(data, num_elements, ss->mutable_data());
/external/tensorflow/tensorflow/contrib/lite/kernels/
H A Dembedding_lookup_sparse.cc116 void FinalizeAggregation(TfLiteCombinerType combiner, int num_elements, argument
120 if (combiner != kTfLiteCombinerTypeSum && num_elements > 0) {
185 int num_elements = 0; local
208 FinalizeAggregation(params->combiner, num_elements, current_total_weight,
213 num_elements = 0;
220 ++num_elements;
232 FinalizeAggregation(params->combiner, num_elements, current_total_weight,
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
H A Dscatter_nd_op.cc96 buffer_shape.num_elements() > 0 || (indices_shape.num_elements() == 0 &&
97 updates_shape.num_elements() == 0),
H A Dtranspose_op.cc47 OP_REQUIRES(ctx, dims == perm_tensor_shape.num_elements(),
51 perm_tensor_shape.num_elements()));
109 FastBoundsCheck(ctx->InputShape(0).num_elements(),
H A Dselect_op.cc60 OP_REQUIRES(ctx, then_shape.dim_size(0) == cond_shape.num_elements(),
64 cond_shape.num_elements()));
/external/tensorflow/tensorflow/core/kernels/data/
H A Dmap_and_batch_dataset_op.cc134 int64 num_elements; variable
135 WaitForBatch(batch_index, &num_elements).IgnoreError();
169 int64 num_elements = 0; variable
170 Status status = WaitForBatch(current_batch_index_, &num_elements);
171 if (num_elements == 0) {
179 if (num_elements < dataset()->batch_size_) {
185 component_shape.set_dim(0, num_elements);
189 CopyPartialBatch(&component, output[i], num_elements));
225 int64 num_elements) {
231 for (size_t i = 0; i < num_elements;
224 CopyPartialBatch(Tensor* output, const Tensor& value, int64 num_elements) argument
[all...]
/external/mesa3d/src/util/
H A Dslab.h53 unsigned num_elements; member in struct:slab_parent_pool
H A Dslab.c114 parent->num_elements = num_items;
148 p_atomic_set(&page->u.num_remaining, pool->parent->num_elements);
150 for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
178 pool->parent->num_elements * pool->parent->element_size);
183 for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
/external/tensorflow/tensorflow/contrib/boosted_trees/kernels/
H A Dsplit_handler_ops.cc159 int32 num_elements = partition_boundaries.size() - 1; variable
163 num_elements = 0;
169 TensorShape({num_elements}),
177 context, context->allocate_output("gains", TensorShape({num_elements}),
184 "split_infos", TensorShape({num_elements}),
188 for (int root_idx = 0; root_idx < num_elements; ++root_idx) {
334 int num_elements = non_empty_partitions.size(); variable
338 TensorShape({num_elements}),
346 context, context->allocate_output("gains", TensorShape({num_elements}),
353 "split_infos", TensorShape({num_elements}),
578 int num_elements = non_empty_partitions.size(); variable
[all...]
/external/tensorflow/tensorflow/core/kernels/
H A Ddequantize_op.cc82 const int64 num_elements = input.NumElements(); variable
83 for (int i = 0; i < num_elements; ++i) {
118 const int64 num_elements = input.NumElements(); variable
119 for (int i = 0; i < num_elements; ++i) {
H A Dquantized_reshape_op_test.cc54 for (int i = 0; i < input.shape().num_elements(); ++i) {
H A Dassign_op.h90 old_lhs.shape().num_elements() == rhs.shape().num_elements()) {
H A Dquantized_mul_op.cc38 int32 full_input_offset, int64 num_elements, T scalar_input,
42 for (int i = 0; i < num_elements; ++i) {
53 int32 full_input_offset, int64 num_elements,
62 for (i = 0; i < (num_elements - 15); i += 16) {
106 for (; i < num_elements; ++i) {
115 const T* y_data, int32 offset_y, int64 num_elements,
117 for (int i = 0; i < num_elements; ++i) {
128 int64 num_elements, qint32* output) {
133 for (i = 0; i < (num_elements - 15); i += 16) {
182 for (; i < num_elements;
37 ScalarMultiply(OpKernelContext* context, const T* full_input, int32 full_input_offset, int64 num_elements, T scalar_input, int32 scalar_input_offset, Toutput* output) argument
51 ScalarMultiply(OpKernelContext* context, const quint8* full_input, int32 full_input_offset, int64 num_elements, quint8 scalar_input, int32 scalar_input_offset, qint32* output) argument
114 VectorMultiply(OpKernelContext* context, const T* x_data, int32 offset_x, const T* y_data, int32 offset_y, int64 num_elements, Toutput* output) argument
125 VectorMultiply(OpKernelContext* context, const quint8* x_data, int32 offset_x, const quint8* y_data, int32 offset_y, int64 num_elements, qint32* output) argument
[all...]
H A Dlookup_table_op.cc329 empty_key_input->template shaped<K, 2>({1, key_shape_.num_elements()}),
345 const int64 num_elements = key.dim_size(0); local
346 const int64 key_size = key_shape_.num_elements();
347 const int64 value_size = value_shape_.num_elements();
348 if (key.NumElements() != num_elements * key_size) {
349 TensorShape expected_shape({num_elements});
355 const auto key_matrix = key.shaped<K, 2>({num_elements, key_size});
356 auto value_matrix = value->shaped<V, 2>({num_elements, value_size});
368 for (int64 i = 0; i < num_elements; ++i) {
408 if (key.NumElements() != key.dim_size(0) * key_shape_.num_elements()) {
505 const int64 num_elements = key.dim_size(0); local
[all...]
H A Droll_op.cc38 void DoRoll(OpKernelContext* context, const int64 num_elements, argument
88 Shard(worker_threads->num_threads, worker_threads->workers, num_elements,
101 void DoRollWithMemcpy(OpKernelContext* context, const int64 num_elements, argument
215 const int total_work = 2 * num_elements / std::max<int>(dim_range[isd], 1);
249 const int64 num_elements = input.NumElements(); variable
294 DoRollWithMemcpy<T>(context, num_elements, num_dims, dim_size,
298 DoRoll<T>(context, num_elements, num_dims, dim_size, input_flat,
H A Dparameterized_truncated_normal_op_gpu.cu.cc52 int64 samples_per_batch, int64 num_elements,
74 CUDA_1D_KERNEL_LOOP(offset, num_elements) {
196 int64 samples_per_batch, int64 num_elements,
203 const auto config = GetCudaLaunchConfig(num_elements, d);
207 gen, output.data(), num_batches, samples_per_batch, num_elements,
51 TruncatedNormalKernel(random::PhiloxRandom gen, T* data, int64 num_batches, int64 samples_per_batch, int64 num_elements, const T* means, bool single_mean, const T* stddevs, bool single_stddev, const T* minvals, bool single_minval, const T* maxvals, bool single_maxval, int64 kMaxIterations) argument
/external/tensorflow/tensorflow/contrib/data/python/kernel_tests/
H A Dunique_dataset_op_test.py87 def build_dataset(num_elements, unique_elem_range):
88 return dataset_ops.Dataset.range(num_elements).map(
/external/mesa3d/src/gallium/drivers/svga/
H A Dsvga_state_vs.c262 unsigned num_elements; local
283 num_elements = 1;
298 dst[num_elements] = ureg_DECL_output(ureg,
301 src[num_elements] = ureg_DECL_vs_input(ureg, num_elements);
302 num_elements++;
309 for (i = 0; i < num_elements; i++) {
/external/mesa3d/src/gallium/state_trackers/va/
H A Dpicture_vc1.c34 assert(buf->size >= sizeof(VAPictureParameterBufferVC1) && buf->num_elements == 1);
72 assert(buf->size >= sizeof(VASliceParameterBufferVC1) && buf->num_elements == 1);
73 context->desc.vc1.slice_count += buf->num_elements;

Completed in 1546 milliseconds

1234567891011>>