Searched refs:input_rank (Results 1 - 25 of 29) sorted by relevance

12

/external/tensorflow/tensorflow/core/kernels/
H A Dsparse_concat_op.cc77 const int input_rank = input_shape.dims(); variable
79 ? input_rank + concat_dim_attr_
81 OP_REQUIRES(context, concat_dim >= 0 && concat_dim < input_rank,
83 -input_rank, ", ", input_rank,
88 context, current_shape.dims() == input_rank,
90 "Ranks of all input tensors must match: expected ", input_rank,
92 for (int j = 0; j < input_rank; ++j) {
112 gtl::InlinedVector<int64, 8> std_order(input_rank);
116 concat_order.reserve(input_rank);
[all...]
H A Dlinalg_ops_common.cc118 int input_rank = -1; local
122 input_rank = in.dims();
124 context, input_rank >= 2,
126 " must have rank >= 2, got ", input_rank));
130 for (int dim = 0; dim < input_rank - 2; ++dim) {
135 OP_REQUIRES(context, input_rank == in.dims(),
138 for (int dim = 0; dim < input_rank - 2; ++dim) {
146 const int row_dimension = input_rank - 2;
147 const int col_dimension = input_rank - 1;
H A Dreshape_util.cc50 const int64 input_rank = input_shape_in.NumElements(); local
109 gtl::InlinedVector<int64, 8> input_strides(input_rank);
110 input_strides[input_rank - 1] = 1;
111 for (int d = input_rank - 2; d >= 0; --d) {
130 for (int j = 0; j < input_rank; ++j) {
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
H A Ddepthtospace_op.cc45 int input_rank = input_tensor_shape.dims(); variable
47 OP_REQUIRES(ctx, kRequiredDims == input_rank,
49 "; got: ", input_rank));
56 int feature_dim = GetTensorFeatureDimIndex(input_rank, data_format_);
57 int num_spatial_dims = GetTensorSpatialDims(input_rank, data_format_);
62 reshaped_shape.reserve(input_rank);
63 transpose_order.reserve(input_rank);
64 output_shape.reserve(input_rank);
H A Dspacetodepth_op.cc45 int input_rank = input_tensor_shape.dims(); variable
47 OP_REQUIRES(ctx, kRequiredDims == input_rank,
49 "; got ", input_rank));
56 int feature_dim = GetTensorFeatureDimIndex(input_rank, data_format_);
57 int num_spatial_dims = GetTensorSpatialDims(input_rank, data_format_);
62 reshaped_shape.reserve(input_rank);
63 transpose_order.reserve(input_rank);
64 output_shape.reserve(input_rank);
H A Dbatchtospace_op.cc28 const int input_rank = input_tensor_shape.dims(); local
34 ctx, input_rank >= 1 + block_rank,
36 " instead of ", input_rank));
71 std::vector<int64> reshaped_shape(input_rank + block_rank);
106 std::vector<int64> reshaped_permuted_shape(input_rank);
126 std::vector<int64> start_indices(input_rank, 0);
128 std::vector<int64> strides(input_rank, 1);
H A Dspacetobatch_op.cc28 const int input_rank = input_tensor_shape.dims(); local
34 ctx, input_rank >= 1 + block_rank,
36 " instead of ", input_rank));
89 std::vector<int64> reshaped_padded_shape(input_rank + block_rank);
137 std::vector<int64> output_shape(input_rank);
/external/tensorflow/tensorflow/contrib/model_pruning/python/layers/
H A Dlayers.py198 input_rank = inputs.get_shape().ndims
200 if input_rank == 3:
202 input_rank)
203 elif input_rank == 4:
205 elif input_rank == 5:
207 input_rank)
210 input_rank)
/external/tensorflow/tensorflow/contrib/layers/python/layers/
H A Dfeature_column_ops.py45 1. If `output_rank > input_rank + 1`, raise a `ValueError`.
46 2. If `output_rank == input_rank + 1`, expand the tensor by one dimension.
47 3. If `output_rank == input_rank`, do nothing.
48 4. If `output_rank < input_rank`, flatten the inner dimensions of the tensor.
57 ValueError: if `output_rank > input_rank + 1` for the input tensor.
59 input_rank = tensor.get_shape().ndims
61 if input_rank is None and isinstance(tensor, sparse_tensor_py.SparseTensor):
63 input_rank = tensor.dense_shape.get_shape().as_list()[0]
65 if input_rank is None:
69 if output_rank > input_rank
[all...]
H A Dlayers.py1017 input_rank = inputs.get_shape().ndims
1019 if input_rank == 3:
1021 elif input_rank == 4:
1023 elif input_rank == 5:
1027 input_rank)
2370 input_rank = inputs.get_shape().ndims
2371 if input_rank is None:
2373 if input_rank < 3:
2375 num_spatial_dims = input_rank - 2
2927 input_rank
[all...]
H A Dfeature_column.py1633 1. If `output_rank > input_rank + 1` raise a `ValueError`.
1634 2. If `output_rank == input_rank + 1`, expand `input_tensor` by one
1636 3. If `output_rank == input_rank`, return `input_tensor`.
1637 4. If `output_rank < input_rank`, flatten the inner dimensions of
1648 ValueError: if `output_rank > input_rank + 1`.
1650 input_rank = input_tensor.get_shape().ndims
1651 if input_rank is not None:
1652 if output_rank > input_rank + 1:
1657 input_rank, output_rank))
1662 if output_rank == input_rank
[all...]
/external/tensorflow/tensorflow/contrib/lite/kernels/
H A Dgather.cc87 const int input_rank = NumDimensions(input); local
90 GetTensorData<data_type>(input), GetTensorDims(input), input_rank, \
/external/tensorflow/tensorflow/python/ops/
H A Dspectral_ops.py109 input_rank = _array_ops.rank(input_tensor)
111 outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
H A Dnn_ops.py1692 input_rank = array_ops.rank(logits)
1694 logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
1706 output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
1847 input_rank = array_ops.rank(precise_logits)
1862 precise_logits = _move_dim_to_end(precise_logits, dim, input_rank)
1863 labels = _move_dim_to_end(labels, dim, input_rank)
1879 [math_ops.subtract(input_rank, 1)])
H A Dmath_ops.py2544 input_rank = array_ops.size(input_shape) # 4
2545 axes = (axes + input_rank) % input_rank
2549 range(input_rank), # [0, 1, 2, 3]
H A Darray_grad.py246 input_rank = array_ops.rank(input_vec)
249 shape = array_ops.stack([input_rank, 1])
/external/tensorflow/tensorflow/core/framework/
H A Dshape_inference.cc857 int idx, int input_rank, DimensionHandle* out) {
866 if (input_rank < 0) {
869 } else if (val + input_rank < 0) {
871 val, " must be in range [-", input_rank,
872 ", ", input_rank, ")");
874 val += input_rank;
876 } else if (input_rank >= 0 && val >= input_rank) {
878 val, " must be in range [-", input_rank,
879 ", ", input_rank, ")");
856 MakeDimForScalarInputWithNegativeIndexing( int idx, int input_rank, DimensionHandle* out) argument
[all...]
H A Dcommon_shape_fns.cc1040 const int32 input_rank,
1045 if (reduction_index < -input_rank || reduction_index >= input_rank) {
1048 input_rank, " dimensions.");
1053 wrapped_index += input_rank;
1090 const int32 input_rank = c->Rank(input); local
1094 input_rank, true_indices));
1097 input_rank, true_indices));
1104 for (int i = 0; i < input_rank; ++i) {
1039 ReductionShapeHelper(const Tensor* reduction_indices_t, const int32 input_rank, std::set<int64>& true_indices) argument
H A Dshape_inference.h499 Status MakeDimForScalarInputWithNegativeIndexing(int idx, int input_rank,
/external/tensorflow/tensorflow/core/ops/
H A Dmath_ops.cc791 const int32 input_rank = c->Rank(input_shape); local
792 if (input_rank <= 1) {
802 std::vector<DimensionHandle> dims(input_rank - 1);
818 int64 axis = dimension_val < 0 ? dimension_val + input_rank : dimension_val;
819 if (axis < 0 || axis >= input_rank) {
821 "Dimension (", dimension_val, ") must be in the range [", -input_rank,
822 ", ", input_rank, "), where ", input_rank,
828 for (int i = 0; i < input_rank; ++i) {
H A Darray_ops.cc74 // Paddings is a matrix of [input_rank, 2].
1261 const int32 input_rank = c->Rank(input);
1262 if (batch_dim >= input_rank) {
1264 "batch_dim must be < input rank: ", batch_dim, " vs. ", input_rank);
1266 if (seq_dim >= input_rank) {
1268 "seq_dim must be < input rank: ", seq_dim, " vs. ", input_rank);
1663 const Tensor* paddings_t, int64 input_rank) {
1665 std::vector<DimensionHandle> dims(input_rank);
1666 for (int64 i = 0; i < input_rank; ++i) {
1699 int64 input_rank
1662 MirrorPadKnown(InferenceContext* c, ShapeHandle input, const Tensor* paddings_t, int64 input_rank) argument
[all...]
/external/tensorflow/tensorflow/core/common_runtime/
H A Dshape_refiner.cc488 int input_rank = c->Rank(c->input(0)); local
489 Tensor t(node->output_type(0), TensorShape({input_rank}));
492 for (int i = 0; i < input_rank; i++) {
503 for (int i = 0; i < input_rank; i++) {
516 int32 input_rank = c->Rank(c->input(0)); local
518 t.flat<int32>()(0) = input_rank;
/external/tensorflow/tensorflow/compiler/xla/
H A Dshape_util.cc1271 int64 input_rank = Rank(input_shape);
1293 std::vector<int64> dimension_to_alignment_index(input_rank);
1295 for (int64 i = 0, j = 0; i < input_rank || j < output_rank;) {
1304 if (i == input_rank) {
1320 alignment.push_back({input_rank, output_rank});
1330 for (int64 i = 0; i < input_rank;) {
1359 if (i == input_rank) {
/external/tensorflow/tensorflow/cc/gradients/
H A Dmath_grad.cc627 // input_rank = 4
628 auto input_rank = Size(scope, input_shape); local
632 auto axes = Mod(scope, Add(scope, reduction_axes, input_rank), input_rank);
634 // This [0..input_rank) range of integers is used in DynamicStitch to
637 auto input_rank_range = Range(scope, zero, input_rank, one);
/external/tensorflow/tensorflow/contrib/lite/toco/
H A Dmodel.h1315 int input_rank = 0; member in struct:toco::GatherOperator

Completed in 1880 milliseconds

12