/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
H A D | split_op.cc | 57 int32 split_dim = split_dim_orig < 0 ? split_dim_orig + input_shape.dims() variable 59 OP_REQUIRES(ctx, 0 <= split_dim && split_dim < input_shape.dims(), 61 ") <= split_dim < input rank (", 71 ctx, input_shape.dim_size(split_dim) % num_split == 0, 74 "dimension, but got split_dim ", 75 split_dim_orig, " (size = ", input_shape.dim_size(split_dim), ") ", 80 const int32 slice_size = input_shape.dim_size(split_dim) / num_split; 99 begin[split_dim] = i * slice_size; 100 limits[split_dim] 118 int32 split_dim; variable [all...] |
/external/tensorflow/tensorflow/core/kernels/ |
H A D | split_op_test.cc | 29 static Graph* MakeGraph(int split_dim, int num_split, argument 33 in_shape.set_dim(split_dim, in_shape.dim_size(split_dim) * num_split); 36 Tensor split_dim_tensor = test::AsScalar<int32>(split_dim); 55 auto g = MakeGraph(/* split_dim = */ 0, num_split, {chunk_size}); \ 61 #define BM_SPLIT_2D(split_dim, num_split, chunk_size0, chunk_size1) \ 63 BM_Split_2d_##split_dim##_##num_split##_##chunk_size0##_##chunk_size1( \ 70 num_split, split_dim, chunk_size0, chunk_size1); \ 73 auto g = MakeGraph(split_dim, num_split, {chunk_size0, chunk_size1}); \ 78 BM_Split_2d_##split_dim## [all...] |
H A D | sparse_split_op.cc | 33 const int64 split_dim = context->input(0).scalar<int64>()(); variable 53 input_shape.dim_size(0) && split_dim < input_shape.vec<int64>().size(), 55 "Input split_dim should be between 0 and rank (", 56 input_shape.vec<int64>().size(), "), got ", split_dim)); 60 num_split_ >= 1 && num_split_ <= input_shape.vec<int64>()(split_dim), 63 input_shape.vec<int64>()(split_dim), "), got ", 69 sparse::SparseTensor::Split<T>(sparse_tensor, split_dim, num_split_);
|
H A D | split_v_op_test.cc | 57 static Graph* MakeGraph(int split_dim, const std::vector<int64>& size_splits, argument 63 Tensor split_dim_tensor = test::AsScalar<int32>(split_dim); 83 auto g = MakeGraph(/* split_dim = */ 0, \ 91 #define BM_SPLITV_2D(split_dim, num_split, total_size0, total_size1) \ 93 BM_SplitV_2d_##split_dim##_##num_split##_##total_size0##_##total_size1( \ 101 num_split, split_dim, total_size0, total_size1); \ 105 split_dim, \ 106 GenerateRandomIntsWithSum(total_size_vec[split_dim], num_split), \ 112 BM_SplitV_2d_##split_dim##_##num_split##_##total_size0##_##total_size1);
|
H A D | split_op.cc | 53 const int32 split_dim = local 58 context, 0 <= split_dim && split_dim < input_shape.dims(), 60 ") <= split_dim < input rank (", input.dims(), 68 OP_REQUIRES(context, input_shape.dim_size(split_dim) % num_split == 0, 71 "dimension, but got split_dim ", 72 split_dim, " (size = ", input_shape.dim_size(split_dim), 90 if ((split_dim == 0) && IsInnerDimsSizeAligned<T>(input_shape)) { 103 const TensorShape& input_shape, int32 split_dim) cons [all...] |
H A D | split_v_op.cc | 60 const int32 split_dim = local 84 context, 0 <= split_dim && split_dim < input.dims(), 86 ") <= split_dim < input rank (", input.dims(), 89 Tlen input_size_split_dim = input_shape.dim_size(split_dim); 125 "input shape along split_dim exactly if " 127 "the input along split_dim if not fully " 143 if ((split_dim == 0) && IsInnerDimsSizeAligned<T>(input_shape)) { 157 const TensorShape& input_shape, const int32 split_dim) const { 161 for (int i = 0; i < split_dim; 195 const int32 split_dim = variable 307 const int32 split_dim = variable [all...] |
/external/tensorflow/tensorflow/python/kernel_tests/ |
H A D | split_op_test.py | 74 # the split_dim will be split, but we do know the axis 128 split_dim = np.random.randint(-5, 5) 134 shape[split_dim] = np.sum(size_splits) 137 result = self.evaluate(array_ops.split(inp, size_splits, split_dim)) 141 slices[split_dim] = slice(offset, offset + size_splits[i]) 160 split_dim = 1 163 result = self.evaluate(array_ops.split(inp, size_splits, split_dim)) 167 slices[split_dim] = slice(offset, offset + size_splits[i]) 265 split_dim = np.random.randint(-5, 5) 270 shape[split_dim] [all...] |
/external/tensorflow/tensorflow/python/eager/ |
H A D | ops_test.py | 81 split_dim = constant_op.constant(1) 83 x1, x2, x3 = array_ops.split(value, 3, axis=split_dim) 104 split_dim = constant_op.constant(1) 106 result = array_ops.split(value, 1, axis=split_dim) 128 split_dim = constant_op.constant(1, dtype=dtypes.int64) 134 split_dim, indices, values, shape, num_split=2)
|
H A D | core_test.py | 449 split_dim = 1 454 inputs=[constant_op.constant(split_dim),
|
/external/tensorflow/tensorflow/core/util/sparse/ |
H A D | sparse_tensor.h | 166 // isn't an integer multiple of split_dim, we add one extra dimension for 170 const int split_dim, 496 const int split_dim, 514 const int split_dim_size = input_tensor.shape()[split_dim]; 520 CHECK(split_dim >= 0 && split_dim < num_dim) << "num_dim must be in " 526 const int dim = input_tensor.indices().matrix<int64>()(i, split_dim); 541 output_shapes[i].set_dim(split_dim, size); 546 const int dim = input_indices_t(i, split_dim); 553 (j == split_dim) 495 Split(const SparseTensor& input_tensor, const int split_dim, const int num_split) argument [all...] |
/external/tensorflow/tensorflow/cc/framework/ |
H A D | gradient_checker_test.cc | 129 auto split_dim = Const(scope, 1, {}); local 130 auto y = Split(scope, split_dim, x, /* num_split */ 2);
|
/external/tensorflow/tensorflow/core/ops/ |
H A D | array_ops_test.cc | 1093 // No value for split_dim and no input. 1095 .Input("split_dim", 0, DT_INT32) 1103 // split_dim is unknown but other inputs are known. 1106 // split_dim is known. 1107 Tensor split_dim = test::AsTensor<int32>({1, 2}); local 1108 op.input_tensors[0] = &split_dim; 1110 split_dim = test::AsScalar<int32>(1); 1118 // split_dim too large. 1119 split_dim = test::AsScalar<int32>(3); 1124 // Negative split_dim [all...] |
H A D | array_ops.cc | 403 .Input("split_dim: int32") 422 int64 split_dim = c->Value(split_dimension); 423 TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input)); 426 c->Divide(c->Dim(input, split_dim), num_split, 430 c->ReplaceDim(input, split_dim, split_dim_size, &out)); 439 .Input("split_dim: int32") 482 int64 split_dim = c->Value(split_dimension); 483 TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input)); 502 TF_RETURN_IF_ERROR(c->ReplaceDim(input, split_dim, 512 auto split_dim_size = c->Value(c->Dim(input, split_dim)); [all...] |
/external/tensorflow/tensorflow/cc/gradients/ |
H A D | array_grad_test.cc | 105 auto split_dim = Const(scope_, 1, {}); local 106 auto y = Split(scope_, split_dim, x, /* num_split */ 2);
|
/external/tensorflow/tensorflow/python/ops/ |
H A D | sparse_ops.py | 624 split_dim=None): 652 split_dim: Deprecated old name for axis. 659 ValueError: If the deprecated `split_dim` and `axis` are both non None. 669 axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim", 670 split_dim)
|
/external/tensorflow/tensorflow/contrib/lite/toco/graph_transformations/ |
H A D | propagate_fixed_sizes.cc | 675 const int split_dim = input_shape.dims(axis); local 676 CHECK_EQ(split_dim % op->num_split, 0); 677 const int split_depth = split_dim / op->num_split;
|
/external/tensorflow/tensorflow/contrib/lite/toco/ |
H A D | export_tensorflow.cc | 1007 const int split_dim = split_dim_data[0]; local 1008 CreateDummyConcatDimTensorConst(src_op.inputs[0], split_dim, 1292 string split_dim_output = base + "split/split_dim";
|