1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15# Tests for this file live in python/kernel_tests/array_ops_test.py
16"""Support for manipulating tensors.
17
18See the @{$python/array_ops} guide.
19
20@@string_to_number
21@@to_double
22@@to_float
23@@to_bfloat16
24@@to_int32
25@@to_int64
26@@cast
27@@bitcast
28@@saturate_cast
29@@broadcast_dynamic_shape
30@@broadcast_static_shape
31@@shape
32@@shape_n
33@@size
34@@rank
35@@reshape
36@@squeeze
37@@expand_dims
38@@unravel_index
39@@meshgrid
40@@slice
41@@strided_slice
42@@split
43@@tile
44@@pad
45@@concat
46@@stack
47@@parallel_stack
48@@unstack
49@@reverse_sequence
50@@reverse
51@@reverse_v2
52@@transpose
53@@extract_image_patches
54@@space_to_batch_nd
55@@space_to_batch
56@@required_space_to_batch_paddings
57@@batch_to_space_nd
58@@batch_to_space
59@@space_to_depth
60@@depth_to_space
61@@gather
62@@gather_nd
63@@unique_with_counts
64@@scatter_nd
65@@dynamic_partition
66@@dynamic_stitch
67@@boolean_mask
68@@one_hot
69@@sequence_mask
70@@dequantize
71@@quantize
72@@quantize_v2
73@@quantized_concat
74@@setdiff1d
75@@guarantee_const
76@@fake_quant_with_min_max_args
77@@fake_quant_with_min_max_args_gradient
78@@fake_quant_with_min_max_vars
79@@fake_quant_with_min_max_vars_gradient
80@@fake_quant_with_min_max_vars_per_channel
81@@fake_quant_with_min_max_vars_per_channel_gradient
82"""
83
84from __future__ import absolute_import
85from __future__ import division
86from __future__ import print_function
87
88import sys
89
90import numpy as np
91
92from tensorflow.python.eager import context
93from tensorflow.python.framework import common_shapes
94from tensorflow.python.framework import constant_op
95from tensorflow.python.framework import dtypes
96from tensorflow.python.framework import ops
97from tensorflow.python.framework import sparse_tensor
98from tensorflow.python.framework import tensor_shape
99from tensorflow.python.framework import tensor_util
100# 'Constant' gets imported in the module 'array_ops'.
101from tensorflow.python.framework.constant_op import constant
102from tensorflow.python.ops import gen_array_ops
103from tensorflow.python.ops import gen_math_ops
104# go/tf-wildcard-import
105# pylint: disable=wildcard-import
106from tensorflow.python.ops.gen_array_ops import *
107from tensorflow.python.util import deprecation
108from tensorflow.python.util.tf_export import tf_export
109# pylint: enable=wildcard-import
110
111# Used for slicing to specify a new 1 size dimension
112newaxis = None
113tf_export("newaxis").export_constant(__name__, "newaxis")
114
115# We override the 'slice' for the "slice" op, so we keep python's
116# existing 'slice' for later use in this module.
117_BaseSlice = slice
118
119
120@tf_export("identity")
121def identity(input, name=None):  # pylint: disable=redefined-builtin
122  r"""Return a tensor with the same shape and contents as input.
123
124  Args:
125    input: A `Tensor`.
126    name: A name for the operation (optional).
127
128  Returns:
129    A `Tensor`. Has the same type as `input`.
130  """
131  if context.in_graph_mode():
132    return gen_array_ops.identity(input, name=name)
133  else:
134    input = ops.convert_to_tensor(input)
135    in_device = input.device
136    # TODO(ashankar): Does 'identity' need to invoke execution callbacks?
137    if context.context().device_name != in_device:
138      return input._copy()  # pylint: disable=protected-access
139    return input
140
141
142# pylint: disable=redefined-builtin,protected-access
143@tf_export("expand_dims")
144def expand_dims(input, axis=None, name=None, dim=None):
145  """Inserts a dimension of 1 into a tensor's shape.
146
147  Given a tensor `input`, this operation inserts a dimension of 1 at the
148  dimension index `axis` of `input`'s shape. The dimension index `axis` starts
149  at zero; if you specify a negative number for `axis` it is counted backward
150  from the end.
151
152  This operation is useful if you want to add a batch dimension to a single
153  element. For example, if you have a single image of shape `[height, width,
154  channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
155  which will make the shape `[1, height, width, channels]`.
156
157  Other examples:
158
159  ```python
160  # 't' is a tensor of shape [2]
161  tf.shape(tf.expand_dims(t, 0))  # [1, 2]
162  tf.shape(tf.expand_dims(t, 1))  # [2, 1]
163  tf.shape(tf.expand_dims(t, -1))  # [2, 1]
164
165  # 't2' is a tensor of shape [2, 3, 5]
166  tf.shape(tf.expand_dims(t2, 0))  # [1, 2, 3, 5]
167  tf.shape(tf.expand_dims(t2, 2))  # [2, 3, 1, 5]
168  tf.shape(tf.expand_dims(t2, 3))  # [2, 3, 5, 1]
169  ```
170
171  This operation requires that:
172
173  `-1-input.dims() <= dim <= input.dims()`
174
175  This operation is related to `squeeze()`, which removes dimensions of
176  size 1.
177
178  Args:
179    input: A `Tensor`.
180    axis: 0-D (scalar). Specifies the dimension index at which to
181      expand the shape of `input`. Must be in the range
182      `[-rank(input) - 1, rank(input)]`.
183    name: The name of the output `Tensor`.
184    dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
185
186  Returns:
187    A `Tensor` with the same data as `input`, but its shape has an additional
188    dimension of size 1 added.
189
190  Raises:
191    ValueError: if both `dim` and `axis` are specified.
192  """
193  # TODO(aselle): Remove argument dim
194  if dim is not None:
195    if axis is not None:
196      raise ValueError("can't specify both 'dim' and 'axis'")
197    axis = dim
198  return gen_array_ops._expand_dims(input, axis, name)
199
200
201# pylint: enable=redefined-builtin,protected-access
202
203
204# Aliases for some automatically-generated names.
205# pylint: disable=protected-access
206@deprecation.deprecated(
207    "2016-11-30",
208    "This op will be removed after the deprecation date. "
209    "Please switch to tf.setdiff1d().")
210def listdiff(x, y, out_idx=None, name=None):
211  return gen_array_ops._list_diff(x, y, out_idx, name)
212
213
214listdiff.__doc__ = gen_array_ops._list_diff.__doc__ + "\n" + listdiff.__doc__
215
216# pylint: enable=protected-access
217
218
219# pylint: disable=undefined-variable,protected-access
220@tf_export("setdiff1d")
221def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
222  return gen_array_ops._list_diff(x, y, index_dtype, name)
223
224
225setdiff1d.__doc__ = gen_array_ops._list_diff.__doc__
226
227# pylint: enable=protected-access
228
229
230@tf_export("broadcast_dynamic_shape")
231def broadcast_dynamic_shape(shape_x, shape_y):
232  # pylint: disable=protected-access
233  """Returns the broadcasted dynamic shape between `shape_x` and `shape_y`.
234
235  Args:
236    shape_x: A rank 1 integer `Tensor`, representing the shape of x.
237    shape_y: A rank 1 integer `Tensor`, representing the shape of y.
238
239  Returns:
240    A rank 1 integer `Tensor` representing the broadcasted shape.
241  """
242  return gen_array_ops._broadcast_args(shape_x, shape_y)
243  # pylint: enable=protected-access
244
245
246@tf_export("broadcast_static_shape")
247def broadcast_static_shape(shape_x, shape_y):
248  """Returns the broadcasted static shape between `shape_x` and `shape_y`.
249
250  Args:
251    shape_x: A `TensorShape`
252    shape_y: A `TensorShape`
253
254  Returns:
255    A `TensorShape` representing the broadcasted shape.
256
257  Raises:
258    ValueError: If the two shapes can not be broadcasted.
259  """
260  return common_shapes.broadcast_shape(shape_x, shape_y)
261
262
263@tf_export("shape")
264def shape(input, name=None, out_type=dtypes.int32):
265  # pylint: disable=redefined-builtin
266  """Returns the shape of a tensor.
267
268  This operation returns a 1-D integer tensor representing the shape of `input`.
269
270  For example:
271
272  ```python
273  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
274  tf.shape(t)  # [2, 2, 3]
275  ```
276
277  Args:
278    input: A `Tensor` or `SparseTensor`.
279    name: A name for the operation (optional).
280    out_type: (Optional) The specified output type of the operation
281      (`int32` or `int64`). Defaults to `tf.int32`.
282
283  Returns:
284    A `Tensor` of type `out_type`.
285  """
286  return shape_internal(input, name, optimize=True, out_type=out_type)
287
288
289def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
290  # pylint: disable=redefined-builtin
291  """Returns the shape of a tensor.
292
293  Args:
294    input: A `Tensor` or `SparseTensor`.
295    name: A name for the operation (optional).
296    optimize: if true, encode the shape as a constant when possible.
297    out_type: (Optional) The specified output type of the operation
298      (`int32` or `int64`). Defaults to tf.int32.
299
300  Returns:
301    A `Tensor` of type `out_type`.
302
303  """
304  with ops.name_scope(name, "Shape", [input]) as name:
305    if isinstance(input, (sparse_tensor.SparseTensor,
306                          sparse_tensor.SparseTensorValue)):
307      return gen_math_ops.cast(input.dense_shape, out_type)
308    else:
309      if context.in_graph_mode():
310        input_tensor = ops.convert_to_tensor(input)
311        input_shape = input_tensor.get_shape()
312        if optimize and input_shape.is_fully_defined():
313          return constant(input_shape.as_list(), out_type, name=name)
314      return gen_array_ops.shape(input, name=name, out_type=out_type)
315
316
317@tf_export("shape_n")
318def shape_n(input, out_type=dtypes.int32, name=None):
319  # pylint: disable=redefined-builtin
320  """Returns shape of tensors.
321
322  Args:
323    input: A list of at least 1 `Tensor` object with the same type.
324    out_type: The specified output type of the operation
325      (`int32` or `int64`). Defaults to `tf.int32`(optional).
326    name: A name for the operation (optional).
327
328  Returns:
329    A list with the same length as `input` of `Tensor` objects with
330      type `out_type`.
331  """
332
333  output = gen_array_ops.shape_n(input, out_type=out_type, name=name)
334  if context.in_graph_mode():
335    for i, input_tensor in enumerate(input):
336      input_tensor = ops.convert_to_tensor(input_tensor)
337      input_shape = input_tensor.get_shape()
338      if input_shape.is_fully_defined():
339        output[i] = constant(
340            input_shape.as_list(), dtype=out_type, name=name)
341  return output
342
343
344@tf_export("size")
345def size(input, name=None, out_type=dtypes.int32):
346  # pylint: disable=redefined-builtin
347  """Returns the size of a tensor.
348
349  Returns a 0-D `Tensor` representing the number of elements in `input`
350  of type `out_type`. Defaults to tf.int32.
351
352  For example:
353
354  ```python
355  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
356  tf.size(t)  # 12
357  ```
358
359  Args:
360    input: A `Tensor` or `SparseTensor`.
361    name: A name for the operation (optional).
362    out_type: (Optional) The specified non-quantized numeric output type
363      of the operation. Defaults to `tf.int32`.
364
365  Returns:
366    A `Tensor` of type `out_type`. Defaults to `tf.int32`.
367
368  @compatibility(numpy)
369  Equivalent to np.size()
370  @end_compatibility
371  """
372  return size_internal(input, name, optimize=True, out_type=out_type)
373
374
375def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
376  # pylint: disable=redefined-builtin,protected-access
377  """Returns the size of a tensor.
378
379  Args:
380    input: A `Tensor` or `SparseTensor`.
381    name: A name for the operation (optional).
382    optimize: if true, encode the size as a constant when possible.
383    out_type: (Optional) The specified non-quantized numeric output type
384      of the operation. Defaults to `tf.int32`.
385
386  Returns:
387    A `Tensor` of type `out_type`. Defaults to `tf.int32`.
388  """
389  with ops.name_scope(name, "Size", [input]) as name:
390    if isinstance(input, (sparse_tensor.SparseTensor,
391                          sparse_tensor.SparseTensorValue)):
392      return gen_math_ops._prod(
393          gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
394    else:
395      input_tensor = ops.convert_to_tensor(input)
396      input_shape = input_tensor.get_shape()
397      if optimize and input_shape.is_fully_defined():
398        return constant(input_shape.num_elements(), out_type, name=name)
399      return gen_array_ops.size(input, name=name, out_type=out_type)
400
401
402@tf_export("rank")
403def rank(input, name=None):
404  # pylint: disable=redefined-builtin
405  """Returns the rank of a tensor.
406
407  Returns a 0-D `int32` `Tensor` representing the rank of `input`.
408
409  For example:
410
411  ```python
412  # shape of tensor 't' is [2, 2, 3]
413  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
414  tf.rank(t)  # 3
415  ```
416
417  **Note**: The rank of a tensor is not the same as the rank of a matrix. The
418  rank of a tensor is the number of indices required to uniquely select each
419  element of the tensor. Rank is also known as "order", "degree", or "ndims."
420
421  Args:
422    input: A `Tensor` or `SparseTensor`.
423    name: A name for the operation (optional).
424
425  Returns:
426    A `Tensor` of type `int32`.
427
428  @compatibility(numpy)
429  Equivalent to np.ndim
430  @end_compatibility
431  """
432  return rank_internal(input, name, optimize=True)
433
434
435def rank_internal(input, name=None, optimize=True):
436  # pylint: disable=redefined-builtin
437  """Returns the rank of a tensor.
438
439  Args:
440    input: A `Tensor` or `SparseTensor`.
441    name: A name for the operation (optional).
442    optimize: if true, encode the rank as a constant when possible.
443
444  Returns:
445    A `Tensor` of type `int32`.
446  """
447  with ops.name_scope(name, "Rank", [input]) as name:
448    if isinstance(input, (sparse_tensor.SparseTensor,
449                          sparse_tensor.SparseTensorValue)):
450      return gen_array_ops.size(input.dense_shape, name=name)
451    else:
452      input_tensor = ops.convert_to_tensor(input)
453      input_shape = input_tensor.get_shape()
454      if optimize and input_shape.ndims is not None:
455        return constant(input_shape.ndims, dtypes.int32, name=name)
456      return gen_array_ops.rank(input, name=name)
457
458
459def _slice_helper(tensor, slice_spec, var=None):
460  """Overload for Tensor.__getitem__.
461
462  This operation extracts the specified region from the tensor.
463  The notation is similar to NumPy with the restriction that
464  currently only support basic indexing. That means that
465  using a non-scalar tensor as input is not currently allowed.
466
467  Some useful examples:
468
469  ```python
470  # strip leading and trailing 2 elements
471  foo = tf.constant([1,2,3,4,5,6])
472  print(foo[2:-2].eval())  # => [3,4]
473
474  # skip every row and reverse every column
475  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
476  print(foo[::2,::-1].eval())  # => [[3,2,1], [9,8,7]]
477
478  # Use scalar tensors as indices on both dimensions
479  print(foo[tf.constant(0), tf.constant(2)].eval())  # => 3
480
481  # Insert another dimension
482  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
483  print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
484  print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
485  print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
486  [[7],[8],[9]]]
487
488  # Ellipses (3 equivalent operations)
489  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
490  print(foo[tf.newaxis, :, :].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
491  print(foo[tf.newaxis, ...].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
492  print(foo[tf.newaxis].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
493  ```
494
495  Notes:
496    - `tf.newaxis` is `None` as in NumPy.
497    - An implicit ellipsis is placed at the end of the `slice_spec`
498    - NumPy advanced indexing is currently not supported.
499
500  Args:
501    tensor: An ops.Tensor object.
502    slice_spec: The arguments to Tensor.__getitem__.
503    var: In the case of variable slice assignment, the Variable
504      object to slice (i.e. tensor is the read-only view of this
505      variable).
506
507  Returns:
508    The appropriate slice of "tensor", based on "slice_spec".
509
510  Raises:
511    ValueError: If a slice range is negative size.
512    TypeError: If the slice indices aren't int, slice, or Ellipsis.
513  """
514
515  if not isinstance(slice_spec, (list, tuple)):
516    slice_spec = [slice_spec]
517
518  begin, end, strides = [], [], []
519  index = 0
520
521  new_axis_mask, shrink_axis_mask = 0, 0
522  begin_mask, end_mask = 0, 0
523  ellipsis_mask = 0
524  for s in slice_spec:
525    if isinstance(s, _BaseSlice):
526      # python doesn't always use None when constructing ranges
527      # for example a[:] gives slice(None,sys.maxsize,None)
528      # whereas a[::1] gives slice(None,None,None)
529      if s.start is not None and s.start is not sys.maxsize:
530        begin.append(s.start)
531      else:
532        begin.append(0)
533        begin_mask |= (1 << index)
534      if s.stop is not None and s.stop != sys.maxsize:
535        end.append(s.stop)
536      else:
537        end.append(0)
538        end_mask |= (1 << index)
539      if s.step is not None:
540        strides.append(s.step)
541      else:
542        strides.append(1)
543    elif s is Ellipsis:
544      begin.append(0)
545      end.append(0)
546      strides.append(1)
547      ellipsis_mask |= (1 << index)
548    elif s is newaxis:
549      begin.append(0)
550      end.append(0)
551      strides.append(1)
552      new_axis_mask |= (1 << index)
553    else:
554      begin.append(s)
555      end.append(s + 1)
556      strides.append(1)
557      shrink_axis_mask |= (1 << index)
558    index += 1
559
560  # stack possibly involves no tensors, so we must use op_scope correct graph.
561  with ops.name_scope(None, "strided_slice",
562                      [tensor] + begin + end + strides) as name:
563    if begin:
564      packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
565                                                  stack(strides))
566      if (packed_begin.dtype == dtypes.int64 or
567          packed_end.dtype == dtypes.int64 or
568          packed_strides.dtype == dtypes.int64):
569        if packed_begin.dtype != dtypes.int64:
570          packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
571        if packed_end.dtype != dtypes.int64:
572          packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
573        if packed_strides.dtype != dtypes.int64:
574          packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
575    else:
576      var_empty = constant([], dtype=dtypes.int32)
577      packed_begin = packed_end = packed_strides = var_empty
578    return strided_slice(
579        tensor,
580        packed_begin,
581        packed_end,
582        packed_strides,
583        begin_mask=begin_mask,
584        end_mask=end_mask,
585        shrink_axis_mask=shrink_axis_mask,
586        new_axis_mask=new_axis_mask,
587        ellipsis_mask=ellipsis_mask,
588        var=var,
589        name=name)
590
591
592# pylint: disable=undefined-variable,protected-access,redefined-outer-name
593@tf_export("slice")
594def slice(input_, begin, size, name=None):
595  # pylint: disable=redefined-builtin
596  """Extracts a slice from a tensor.
597
598  This operation extracts a slice of size `size` from a tensor `input` starting
599  at the location specified by `begin`. The slice `size` is represented as a
600  tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
601  of `input` that you want to slice. The starting location (`begin`) for the
602  slice is represented as an offset in each dimension of `input`. In other
603  words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
604  want to slice from.
605
606  Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to
607  perform slices, as it allows you to write `foo[3:7, :-2]` instead of
608  `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
609
610  `begin` is zero-based; `size` is one-based. If `size[i]` is -1,
611  all remaining elements in dimension i are included in the
612  slice. In other words, this is equivalent to setting:
613
614  `size[i] = input.dim_size(i) - begin[i]`
615
616  This operation requires that:
617
618  `0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n]`
619
620  For example:
621
622  ```python
623  t = tf.constant([[[1, 1, 1], [2, 2, 2]],
624                   [[3, 3, 3], [4, 4, 4]],
625                   [[5, 5, 5], [6, 6, 6]]])
626  tf.slice(t, [1, 0, 0], [1, 1, 3])  # [[[3, 3, 3]]]
627  tf.slice(t, [1, 0, 0], [1, 2, 3])  # [[[3, 3, 3],
628                                     #   [4, 4, 4]]]
629  tf.slice(t, [1, 0, 0], [2, 1, 3])  # [[[3, 3, 3]],
630                                     #  [[5, 5, 5]]]
631  ```
632
633  Args:
634    input_: A `Tensor`.
635    begin: An `int32` or `int64` `Tensor`.
636    size: An `int32` or `int64` `Tensor`.
637    name: A name for the operation (optional).
638
639  Returns:
640    A `Tensor` the same type as `input`.
641  """
642  return gen_array_ops._slice(input_, begin, size, name=name)
643
644
645# pylint: disable=invalid-name
646@tf_export("strided_slice")
647def strided_slice(input_,
648                  begin,
649                  end,
650                  strides=None,
651                  begin_mask=0,
652                  end_mask=0,
653                  ellipsis_mask=0,
654                  new_axis_mask=0,
655                  shrink_axis_mask=0,
656                  var=None,
657                  name=None):
658  """Extracts a strided slice of a tensor (generalized python array indexing).
659
660  **Instead of calling this op directly most users will want to use the
661  NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
662  is supported via @{tf.Tensor.__getitem__} and @{tf.Variable.__getitem__}.**
663  The interface of this op is a low-level encoding of the slicing syntax.
664
665  Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
666  from the given `input_` tensor. Starting at the location specified by `begin`
667  the slice continues by adding `stride` to the index until all dimensions are
668  not less than `end`.
669  Note that a stride can be negative, which causes a reverse slice.
670
671  Given a Python slice `input[spec0, spec1, ..., specn]`,
672  this function will be called as follows.
673
674  `begin`, `end`, and `strides` will be vectors of length n.
675  n in general is not equal to the rank of the `input_` tensor.
676
677  In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
678  `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
679  the ith spec.
680
681  If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
682  the fullest possible range in that dimension is used instead.
683  `end_mask` works analogously, except with the end range.
684
685  `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
686  `foo[::-1]` reverses a tensor with shape 8.
687
688  If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
689  as needed will be inserted between other dimensions. Only one
690  non-zero bit is allowed in `ellipsis_mask`.
691
692  For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
693  equivalent to `foo[3:5,:,:,4:5]` and
694  `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
695
696  If the ith bit of `new_axis_mask` is set, then `begin`,
697  `end`, and `stride` are ignored and a new length 1 dimension is
698  added at this point in the output tensor.
699
700  For example,
701  `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
702
703  If the ith bit of `shrink_axis_mask` is set, it implies that the ith
704  specification shrinks the dimensionality by 1. `begin[i]`, `end[i]` and
705  `strides[i]` must imply a slice of size 1 in the dimension. For example in
706  Python one might do `foo[:, 3, :]` which would result in
707  `shrink_axis_mask` equal to 2.
708
709
710  NOTE: `begin` and `end` are zero-indexed.
711  `strides` entries must be non-zero.
712
713
714  ```python
715  t = tf.constant([[[1, 1, 1], [2, 2, 2]],
716                   [[3, 3, 3], [4, 4, 4]],
717                   [[5, 5, 5], [6, 6, 6]]])
718  tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1])  # [[[3, 3, 3]]]
719  tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1])  # [[[3, 3, 3],
720                                                        #   [4, 4, 4]]]
721  tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1])  # [[[4, 4, 4],
722                                                           #   [3, 3, 3]]]
723  ```
724
725  Args:
726    input_: A `Tensor`.
727    begin: An `int32` or `int64` `Tensor`.
728    end: An `int32` or `int64` `Tensor`.
729    strides: An `int32` or `int64` `Tensor`.
730    begin_mask: An `int32` mask.
731    end_mask: An `int32` mask.
732    ellipsis_mask: An `int32` mask.
733    new_axis_mask: An `int32` mask.
734    shrink_axis_mask: An `int32` mask.
735    var: The variable corresponding to `input_` or None
736    name: A name for the operation (optional).
737
738  Returns:
739    A `Tensor` the same type as `input`.
740  """
741
742  if strides is None:
743    strides = ones_like(begin)
744
745  op = gen_array_ops.strided_slice(
746      input=input_,
747      begin=begin,
748      end=end,
749      strides=strides,
750      name=name,
751      begin_mask=begin_mask,
752      end_mask=end_mask,
753      ellipsis_mask=ellipsis_mask,
754      new_axis_mask=new_axis_mask,
755      shrink_axis_mask=shrink_axis_mask)
756
757  parent_name = name
758
759  def assign(val, name=None):
760    """Closure that holds all the arguments to create an assignment."""
761
762    if var is None:
763      raise ValueError("Sliced assignment is only supported for variables")
764
765    if name is None:
766      name = parent_name + "_assign"
767
768    return var._strided_slice_assign(
769        begin=begin,
770        end=end,
771        strides=strides,
772        value=val,
773        name=name,
774        begin_mask=begin_mask,
775        end_mask=end_mask,
776        ellipsis_mask=ellipsis_mask,
777        new_axis_mask=new_axis_mask,
778        shrink_axis_mask=shrink_axis_mask)
779
780  if context.in_graph_mode():
781    # TODO(apassos) In eager mode assignment will be done by overriding
782    # __setitem__ instead.
783    op.assign = assign
784  return op
785
786
787def _SliceHelperVar(var, slice_spec):
788  """Creates a slice helper object given a variable.
789
790  This allows creating a sub-tensor from part of the current contents
791  of a variable.  See ${tf.Tensor$`Tensor.__getitem__`}
792  for detailed examples of slicing.
793
794  This function in addition also allows assignment to a sliced range.
795  This is similar to `__setitem__` functionality in Python. However,
796  the syntax is different so that the user can capture the assignment
797  operation for grouping or passing to `sess.run()`.
798  For example,
799
800  ```python
801  import tensorflow as tf
802  A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
803  with tf.Session() as sess:
804    sess.run(tf.global_variables_initializer())
805    print(sess.run(A[:2, :2]))  # => [[1,2], [4,5]]
806
807    op = A[:2,:2].assign(22. * tf.ones((2, 2)))
808    print(sess.run(op))  # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
809  ```
810
811  Note that assignments currently do not support NumPy broadcasting
812  semantics.
813
814  Args:
815    var: An `ops.Variable` object.
816    slice_spec: The arguments to `Tensor.__getitem__`.
817
818  Returns:
819    The appropriate slice of "tensor", based on "slice_spec".
820    As an operator. The operator also has a `assign()` method
821    that can be used to generate an assignment operator.
822
823  Raises:
824    ValueError: If a slice range is negative size.
825    TypeError: If the slice indices aren't int, slice, or Ellipsis.
826
827  """
828
829  return _slice_helper(var._AsTensor(), slice_spec, var)
830
831
832ops.Tensor._override_operator("__getitem__", _slice_helper)
833
834
835@tf_export("parallel_stack")
836def parallel_stack(values, name="parallel_stack"):
837  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
838
839  Requires that the shape of inputs be known at graph construction time.
840
841  Packs the list of tensors in `values` into a tensor with rank one higher than
842  each tensor in `values`, by packing them along the first dimension.
843  Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
844  tensor will have the shape `(N, A, B, C)`.
845
846  For example:
847
848  ```python
849  x = tf.constant([1, 4])
850  y = tf.constant([2, 5])
851  z = tf.constant([3, 6])
852  tf.parallel_stack([x, y, z])  # [[1, 4], [2, 5], [3, 6]]
853  ```
854
855  The difference between `stack` and `parallel_stack` is that `stack` requires
856  all the inputs be computed before the operation will begin but doesn't require
857  that the input shapes be known during graph construction.
858
859  `parallel_stack` will copy pieces of the input into the output as they become
860  available, in some situations this can provide a performance benefit.
861
862  Unlike `stack`, `parallel_stack` does NOT support backpropagation.
863
864  This is the opposite of unstack.  The numpy equivalent is
865
866      tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
867
868  Args:
869    values: A list of `Tensor` objects with the same shape and type.
870    name: A name for this operation (optional).
871
872  Returns:
873    output: A stacked `Tensor` with the same type as `values`.
874  """
875  with ops.name_scope(name):
876    value_t = ops.convert_to_tensor(values[0])
877    value_shape = ops.convert_to_tensor(value_t).get_shape()
878
879    output_shape = tensor_shape.TensorShape([len(values)])
880    output_shape = output_shape.concatenate(value_shape)
881    # expand_dims converts concat to stack.
882    return gen_array_ops._parallel_concat(
883        [expand_dims(value, 0) for value in values], shape=output_shape)
884
885
886@tf_export("stack")
887def stack(values, axis=0, name="stack"):
888  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
889
890  Packs the list of tensors in `values` into a tensor with rank one higher than
891  each tensor in `values`, by packing them along the `axis` dimension.
892  Given a list of length `N` of tensors of shape `(A, B, C)`;
893
894  if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
895  if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
896  Etc.
897
898  For example:
899
900  ```python
901  x = tf.constant([1, 4])
902  y = tf.constant([2, 5])
903  z = tf.constant([3, 6])
904  tf.stack([x, y, z])  # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
905  tf.stack([x, y, z], axis=1)  # [[1, 2, 3], [4, 5, 6]]
906  ```
907
908  This is the opposite of unstack.  The numpy equivalent is
909
910  ```python
911  tf.stack([x, y, z]) = np.stack([x, y, z])
912  ```
913
914  Args:
915    values: A list of `Tensor` objects with the same shape and type.
916    axis: An `int`. The axis to stack along. Defaults to the first dimension.
917      Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
918    name: A name for this operation (optional).
919
920  Returns:
921    output: A stacked `Tensor` with the same type as `values`.
922
923  Raises:
924    ValueError: If `axis` is out of the range [-(R+1), R+1).
925  """
926  if axis == 0:
927    try:
928      # If the input is a constant list, it can be converted to a constant op
929      return ops.convert_to_tensor(values, name=name)
930    except (TypeError, ValueError):
931      pass  # Input list contains non-constant tensors
932
933  value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()
934  if value_shape.ndims is not None:
935    expanded_num_dims = value_shape.ndims + 1
936    if axis < -expanded_num_dims or axis >= expanded_num_dims:
937      raise ValueError("axis = %d not in [%d, %d)" % (axis, -expanded_num_dims,
938                                                      expanded_num_dims))
939
940  return gen_array_ops._pack(values, axis=axis, name=name)
941
942
943# pylint: disable=invalid-name
944def _autopacking_helper(list_or_tuple, dtype, name):
945  """Converts the given list or tuple to a tensor by packing.
946
947  Args:
948    list_or_tuple: A (possibly nested) list or tuple containing a tensor.
949    dtype: The element type of the returned tensor.
950    name: A name for the returned tensor.
951
952  Returns:
953    A `tf.Tensor` with value equivalent to `list_or_tuple`.
954  """
955  must_pack = False
956  converted_elems = []
957  with ops.name_scope(name) as scope:
958    for i, elem in enumerate(list_or_tuple):
959      if ops.is_dense_tensor_like(elem):
960        if dtype is not None and elem.dtype.base_dtype != dtype:
961          raise TypeError("Cannot convert a list containing a tensor of dtype "
962                          "%s to %s (Tensor is: %r)" % (elem.dtype, dtype,
963                                                        elem))
964        converted_elems.append(elem)
965        must_pack = True
966      elif isinstance(elem, (list, tuple)):
967        converted_elem = _autopacking_helper(elem, dtype, str(i))
968        if ops.is_dense_tensor_like(converted_elem):
969          must_pack = True
970        converted_elems.append(converted_elem)
971      else:
972        converted_elems.append(elem)
973    if must_pack:
974      elems_as_tensors = []
975      for i, elem in enumerate(converted_elems):
976        if ops.is_dense_tensor_like(elem):
977          elems_as_tensors.append(elem)
978        else:
979          # NOTE(mrry): This is inefficient, but it enables us to
980          # handle the case where the list arguments are other
981          # convertible-to-tensor types, such as numpy arrays.
982          elems_as_tensors.append(
983              constant_op.constant(elem, dtype=dtype, name=str(i)))
984      return gen_array_ops._pack(elems_as_tensors, name=scope)
985    else:
986      return converted_elems
987
988
989def _get_dtype_from_nested_lists(list_or_tuple):
990  """Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
991
992  Args:
993    list_or_tuple: A list or tuple representing an object that can be
994      converted to a `tf.Tensor`.
995
996  Returns:
997    The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
998    such object exists.
999  """
1000  for elem in list_or_tuple:
1001    if ops.is_dense_tensor_like(elem):
1002      return elem.dtype.base_dtype
1003    elif isinstance(elem, (list, tuple)):
1004      maybe_dtype = _get_dtype_from_nested_lists(elem)
1005      if maybe_dtype is not None:
1006        return maybe_dtype
1007  return None
1008
1009
1010def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
1011  """Tensor conversion function that automatically packs arguments."""
1012  if as_ref:
1013    return NotImplemented
1014  inferred_dtype = _get_dtype_from_nested_lists(v)
1015  if inferred_dtype is None:
1016    # We did not find any tensor-like objects in the nested lists, so defer to
1017    # other conversion functions.
1018    return NotImplemented
1019  if dtype is not None and dtype != inferred_dtype:
1020    return NotImplemented
1021  return _autopacking_helper(v, inferred_dtype, name or "packed")
1022
1023
1024# pylint: enable=invalid-name
1025
1026# NOTE: Register this conversion function to run *before* one that
1027# assumes every element is a value.
1028ops.register_tensor_conversion_function((list, tuple),
1029                                        _autopacking_conversion_function, 99)
1030
1031
1032@tf_export("unstack")
1033def unstack(value, num=None, axis=0, name="unstack"):
1034  """Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
1035
1036  Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
1037  If `num` is not specified (the default), it is inferred from `value`'s shape.
1038  If `value.shape[axis]` is not known, `ValueError` is raised.
1039
1040  For example, given a tensor of shape `(A, B, C, D)`;
1041
1042  If `axis == 0` then the i'th tensor in `output` is the slice
1043    `value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
1044    (Note that the dimension unpacked along is gone, unlike `split`).
1045
1046  If `axis == 1` then the i'th tensor in `output` is the slice
1047    `value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
1048  Etc.
1049
1050  This is the opposite of stack.  The numpy equivalent is
1051
1052      tf.unstack(x, n) = np.unstack(x)
1053
1054  Args:
1055    value: A rank `R > 0` `Tensor` to be unstacked.
1056    num: An `int`. The length of the dimension `axis`. Automatically inferred
1057      if `None` (the default).
1058    axis: An `int`. The axis to unstack along. Defaults to the first
1059      dimension. Negative values wrap around, so the valid range is `[-R, R)`.
1060    name: A name for the operation (optional).
1061
1062  Returns:
1063    The list of `Tensor` objects unstacked from `value`.
1064
1065  Raises:
1066    ValueError: If `num` is unspecified and cannot be inferred.
1067    ValueError: If `axis` is out of the range [-R, R).
1068  """
1069  if num is None:
1070    value = ops.convert_to_tensor(value)
1071    value_shape = value.get_shape()
1072    if value_shape.ndims is not None:
1073      if axis < -value_shape.ndims or axis >= value_shape.ndims:
1074        raise ValueError("axis = %d not in [%d, %d)" %
1075                         (axis, -value_shape.ndims, value_shape.ndims))
1076      num = value_shape[axis].value
1077  if num is None:
1078    raise ValueError("Cannot infer num from shape %s" % value_shape)
1079  return gen_array_ops._unpack(value, num=num, axis=axis, name=name)
1080
1081
1082@tf_export("concat")
1083def concat(values, axis, name="concat"):
1084  """Concatenates tensors along one dimension.
1085
1086  Concatenates the list of tensors `values` along dimension `axis`.  If
1087  `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
1088  result has shape
1089
1090      [D0, D1, ... Raxis, ...Dn]
1091
1092  where
1093
1094      Raxis = sum(Daxis(i))
1095
1096  That is, the data from the input tensors is joined along the `axis`
1097  dimension.
1098
1099  The number of dimensions of the input tensors must match, and all dimensions
1100  except `axis` must be equal.
1101
1102  For example:
1103
1104  ```python
1105  t1 = [[1, 2, 3], [4, 5, 6]]
1106  t2 = [[7, 8, 9], [10, 11, 12]]
1107  tf.concat([t1, t2], 0)  # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
1108  tf.concat([t1, t2], 1)  # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
1109
1110  # tensor t3 with shape [2, 3]
1111  # tensor t4 with shape [2, 3]
1112  tf.shape(tf.concat([t3, t4], 0))  # [4, 3]
1113  tf.shape(tf.concat([t3, t4], 1))  # [2, 6]
1114  ```
1115  As in Python, the `axis` could also be negative numbers. Negative `axis`
1116  are interpreted as counting from the end of the rank, i.e.,
1117   `axis + rank(values)`-th dimension.
1118
1119  For example:
1120
1121  ```python
1122  t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
1123  t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
1124  tf.concat([t1, t2], -1)
1125  ```
1126
1127  would produce:
1128
1129  ```python
1130  [[[ 1,  2,  7,  4],
1131    [ 2,  3,  8,  4]],
1132
1133   [[ 4,  4,  2, 10],
1134    [ 5,  3, 15, 11]]]
1135  ```
1136
1137  Note: If you are concatenating along a new axis consider using stack.
1138  E.g.
1139
1140  ```python
1141  tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
1142  ```
1143
1144  can be rewritten as
1145
1146  ```python
1147  tf.stack(tensors, axis=axis)
1148  ```
1149
1150  Args:
1151    values: A list of `Tensor` objects or a single `Tensor`.
1152    axis: 0-D `int32` `Tensor`.  Dimension along which to concatenate. Must be
1153      in the range `[-rank(values), rank(values))`. As in Python, indexing
1154      for axis is 0-based. Positive axis in the rage of
1155      `[0, rank(values))` refers to `axis`-th dimension. And negative axis
1156      refers to `axis + rank(values)`-th dimension.
1157    name: A name for the operation (optional).
1158
1159  Returns:
1160    A `Tensor` resulting from concatenation of the input tensors.
1161  """
1162  if not isinstance(values, (list, tuple)):
1163    values = [values]
1164  # TODO(mrry): Change to return values?
1165  if len(values) == 1:  # Degenerate case of one tensor.
1166    # Make a throwaway call to convert_to_tensor to make sure
1167    # that axis is of the correct type, and make sure that
1168    # the returned tensor is a scalar.
1169    # TODO(keveman): Implement a standalone type and shape checker.
1170    with ops.name_scope(name) as scope:
1171      ops.convert_to_tensor(
1172          axis, name="concat_dim",
1173          dtype=dtypes.int32).get_shape().assert_is_compatible_with(
1174              tensor_shape.scalar())
1175      return identity(values[0], name=scope)
1176  return gen_array_ops._concat_v2(values=values, axis=axis, name=name)
1177
1178
1179@tf_export("boolean_mask")
1180def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
1181  """Apply boolean mask to tensor.  Numpy equivalent is `tensor[mask]`.
1182
1183  ```python
1184  # 1-D example
1185  tensor = [0, 1, 2, 3]
1186  mask = np.array([True, False, True, False])
1187  boolean_mask(tensor, mask)  # [0, 2]
1188  ```
1189
1190  In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
1191  the first K dimensions of `tensor`'s shape.  We then have:
1192    `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
1193  where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
1194  The `axis` could be used with `mask` to indicate the axis to mask from.
1195  In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
1196  the first `axis + dim(mask)` dimensions of `tensor`'s shape.
1197
1198  Args:
1199    tensor:  N-D tensor.
1200    mask:  K-D boolean tensor, K <= N and K must be known statically.
1201    name:  A name for this operation (optional).
1202    axis:  A 0-D int Tensor representing the axis in `tensor` to mask from.
1203      By default, axis is 0 which will mask from the first dimension. Otherwise
1204      K + axis <= N.
1205
1206  Returns:
1207    (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
1208    to `True` values in `mask`.
1209
1210  Raises:
1211    ValueError:  If shapes do not conform.
1212
1213  Examples:
1214
1215  ```python
1216  # 2-D example
1217  tensor = [[1, 2], [3, 4], [5, 6]]
1218  mask = np.array([True, False, True])
1219  boolean_mask(tensor, mask)  # [[1, 2], [5, 6]]
1220  ```
1221  """
1222
1223  def _apply_mask_1d(reshaped_tensor, mask, axis=None):
1224    """Mask tensor along dimension 0 with a 1-D mask."""
1225    indices = squeeze(where(mask), squeeze_dims=[1])
1226    return gather(reshaped_tensor, indices, axis=axis)
1227
1228  with ops.name_scope(name, values=[tensor, mask]):
1229    tensor = ops.convert_to_tensor(tensor, name="tensor")
1230    mask = ops.convert_to_tensor(mask, name="mask")
1231
1232    shape_mask = mask.get_shape()
1233    ndims_mask = shape_mask.ndims
1234    shape_tensor = tensor.get_shape()
1235    if ndims_mask == 0:
1236      raise ValueError("mask cannot be scalar.")
1237    if ndims_mask is None:
1238      raise ValueError(
1239          "Number of mask dimensions must be specified, even if some dimensions"
1240          " are None.  E.g. shape=[None] is ok, but shape=None is not.")
1241    axis = 0 if axis is None else axis
1242    shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
1243
1244    leading_size = gen_math_ops._prod(
1245        shape(tensor)[axis:axis + ndims_mask], [0])
1246    tensor = reshape(tensor,
1247                     concat([
1248                         shape(tensor)[:axis], [leading_size],
1249                         shape(tensor)[axis + ndims_mask:]
1250                     ], 0))
1251    first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
1252    tensor.set_shape(
1253        tensor_shape.as_shape(shape_tensor[:axis]).concatenate([first_dim])
1254        .concatenate(shape_tensor[axis + ndims_mask:]))
1255
1256    mask = reshape(mask, [-1])
1257    return _apply_mask_1d(tensor, mask, axis)
1258
1259
1260@tf_export("sparse_mask")
1261def sparse_mask(a, mask_indices, name=None):
1262  """Masks elements of `IndexedSlices`.
1263
1264  Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
1265  contains a subset of the slices of `a`. Only the slices at indices not
1266  specified in `mask_indices` are returned.
1267
1268  This is useful when you need to extract a subset of slices in an
1269  `IndexedSlices` object.
1270
1271  For example:
1272
1273  ```python
1274  # `a` contains slices at indices [12, 26, 37, 45] from a large tensor
1275  # with shape [1000, 10]
1276  a.indices  # [12, 26, 37, 45]
1277  tf.shape(a.values)  # [4, 10]
1278
1279  # `b` will be the subset of `a` slices at its second and third indices, so
1280  # we want to mask its first and last indices (which are at absolute
1281  # indices 12, 45)
1282  b = tf.sparse_mask(a, [12, 45])
1283
1284  b.indices  # [26, 37]
1285  tf.shape(b.values)  # [2, 10]
1286  ```
1287
1288  Args:
1289    a: An `IndexedSlices` instance.
1290    mask_indices: Indices of elements to mask.
1291    name: A name for the operation (optional).
1292
1293  Returns:
1294    The masked `IndexedSlices` instance.
1295  """
1296  with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
1297    indices = a.indices
1298    out_indices, to_gather = setdiff1d(indices, mask_indices)
1299    out_values = gather(a.values, to_gather, name=name)
1300    return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
1301
1302
1303@tf_export("unique")
1304def unique(x, out_idx=dtypes.int32, name=None):
1305  # TODO(yongtang): switch to v2 once API deprecation
1306  # period (3 weeks) pass.
1307  # TODO(yongtang): The documentation should also
1308  # be updated when switch  to v2.
1309  return gen_array_ops._unique(x, out_idx, name)
1310
1311
1312unique.__doc__ = gen_array_ops._unique.__doc__
1313
1314
1315@tf_export("split")
1316def split(value, num_or_size_splits, axis=0, num=None, name="split"):
1317  """Splits a tensor into sub tensors.
1318
1319  If `num_or_size_splits` is an integer type, `num_split`, then splits `value`
1320  along dimension `axis` into `num_split` smaller tensors.
1321  Requires that `num_split` evenly divides `value.shape[axis]`.
1322
1323  If `num_or_size_splits` is not an integer type, it is presumed to be a Tensor
1324  `size_splits`, then splits `value` into `len(size_splits)` pieces. The shape
1325  of the `i`-th piece has the same size as the `value` except along dimension
1326  `axis` where the size is `size_splits[i]`.
1327
1328  For example:
1329
1330  ```python
1331  # 'value' is a tensor with shape [5, 30]
1332  # Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
1333  split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
1334  tf.shape(split0)  # [5, 4]
1335  tf.shape(split1)  # [5, 15]
1336  tf.shape(split2)  # [5, 11]
1337  # Split 'value' into 3 tensors along dimension 1
1338  split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
1339  tf.shape(split0)  # [5, 10]
1340  ```
1341
1342  Args:
1343    value: The `Tensor` to split.
1344    num_or_size_splits: Either a 0-D integer `Tensor` indicating the number of
1345      splits along split_dim or a 1-D integer `Tensor` containing
1346      the sizes of each output tensor along split_dim. If a scalar then it must
1347      evenly divide `value.shape[axis]`; otherwise the sum of sizes along the
1348      split dimension must match that of the `value`.
1349    axis: A 0-D `int32` `Tensor`. The dimension along which to split.
1350      Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
1351    num: Optional, used to specify the number of outputs when it cannot be
1352      inferred from the shape of `size_splits`.
1353    name: A name for the operation (optional).
1354
1355  Returns:
1356    if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
1357    objects; if `num_or_size_splits` is a 1-D Tensor returns
1358    `num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
1359    `value`.
1360
1361  Raises:
1362    ValueError: If `num` is unspecified and cannot be inferred.
1363  """
1364  size_splits = ops.convert_to_tensor(num_or_size_splits)
1365  if size_splits._rank() == 0 and size_splits.dtype.is_integer:
1366    return gen_array_ops._split(
1367        axis=axis, num_split=num_or_size_splits, value=value, name=name)
1368
1369  if num is None:
1370    num = size_splits._shape_tuple()[0]
1371    if num is None:
1372      raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
1373
1374  return gen_array_ops._split_v(
1375      value=value,
1376      size_splits=size_splits,
1377      axis=axis,
1378      num_split=num,
1379      name=name)
1380
1381
1382@tf_export("transpose")
1383def transpose(a, perm=None, name="transpose", conjugate=False):
1384  """Transposes `a`. Permutes the dimensions according to `perm`.
1385
1386  The returned tensor's dimension i will correspond to the input dimension
1387  `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
1388  the rank of the input tensor. Hence by default, this operation performs a
1389  regular matrix transpose on 2-D input Tensors. If conjugate is True and
1390  `a.dtype` is either `complex64` or `complex128` then the values of `a`
1391  are conjugated and transposed.
1392
1393  For example:
1394
1395  ```python
1396  x = tf.constant([[1, 2, 3], [4, 5, 6]])
1397  tf.transpose(x)  # [[1, 4]
1398                   #  [2, 5]
1399                   #  [3, 6]]
1400
1401  # Equivalently
1402  tf.transpose(x, perm=[1, 0])  # [[1, 4]
1403                                #  [2, 5]
1404                                #  [3, 6]]
1405
1406  # If x is complex, setting conjugate=True gives the conjugate transpose
1407  x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
1408                   [4 + 4j, 5 + 5j, 6 + 6j]])
1409  tf.transpose(x, conjugate=True)  # [[1 - 1j, 4 - 4j],
1410                                   #  [2 - 2j, 5 - 5j],
1411                                   #  [3 - 3j, 6 - 6j]]
1412
1413  # 'perm' is more useful for n-dimensional tensors, for n > 2
1414  x = tf.constant([[[ 1,  2,  3],
1415                    [ 4,  5,  6]],
1416                   [[ 7,  8,  9],
1417                    [10, 11, 12]]])
1418
1419  # Take the transpose of the matrices in dimension-0
1420  # (this common operation has a shorthand `matrix_transpose`)
1421  tf.transpose(x, perm=[0, 2, 1])  # [[[1,  4],
1422                                   #   [2,  5],
1423                                   #   [3,  6]],
1424                                   #  [[7, 10],
1425                                   #   [8, 11],
1426                                   #   [9, 12]]]
1427  ```
1428
1429  Args:
1430    a: A `Tensor`.
1431    perm: A permutation of the dimensions of `a`.
1432    name: A name for the operation (optional).
1433    conjugate: Optional bool. Setting it to `True` is mathematically equivalent
1434      to tf.conj(tf.transpose(input)).
1435
1436  Returns:
1437    A transposed `Tensor`.
1438  """
1439  with ops.name_scope(name, "transpose", [a]) as name:
1440    transpose_fn = (
1441        gen_array_ops._conjugate_transpose
1442        if (conjugate and a.dtype.is_complex) else gen_array_ops.transpose)
1443    if perm is None:
1444      rank = gen_array_ops.rank(a)
1445      perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
1446      ret = transpose_fn(a, perm, name=name)
1447      # NOTE(mrry): Setting the shape explicitly because
1448      #   reverse is not handled by the shape function.
1449      if context.in_graph_mode():
1450        input_shape = ret.op.inputs[0].get_shape().dims
1451        if input_shape is not None:
1452          ret.set_shape(input_shape[::-1])
1453    else:
1454      ret = transpose_fn(a, perm, name=name)
1455    return ret
1456
1457
1458# pylint: disable=invalid-name
1459@tf_export("matrix_transpose", "linalg.transpose")
1460def matrix_transpose(a, name="matrix_transpose", conjugate=False):
1461  """Transposes last two dimensions of tensor `a`.
1462
1463  For example:
1464
1465  ```python
1466  x = tf.constant([[1, 2, 3], [4, 5, 6]])
1467  tf.matrix_transpose(x)  # [[1, 4],
1468                          #  [2, 5],
1469                          #  [3, 6]]
1470
1471  x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
1472                   [4 + 4j, 5 + 5j, 6 + 6j]])
1473  tf.matrix_transpose(x, conjugate=True)  # [[1 - 1j, 4 - 4j],
1474                                          #  [2 - 2j, 5 - 5j],
1475                                          #  [3 - 3j, 6 - 6j]]
1476
1477  # Matrix with two batch dimensions.
1478  # x.shape is [1, 2, 3, 4]
1479  # tf.matrix_transpose(x) is shape [1, 2, 4, 3]
1480  ```
1481
1482  Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
1483  This is done with minimal cost, and is preferable to using this function. E.g.
1484
1485  ```python
1486  # Good!  Transpose is taken at minimal additional cost.
1487  tf.matmul(matrix, b, transpose_b=True)
1488
1489  # Inefficient!
1490  tf.matmul(matrix, tf.matrix_transpose(b))
1491  ```
1492
1493  Args:
1494    a: A `Tensor` with `rank >= 2`.
1495    name: A name for the operation (optional).
1496    conjugate: Optional bool. Setting it to `True` is mathematically equivalent
1497      to tf.conj(tf.matrix_transpose(input)).
1498
1499  Returns:
1500    A transposed batch matrix `Tensor`.
1501
1502  Raises:
1503    ValueError:  If `a` is determined statically to have `rank < 2`.
1504  """
1505  with ops.name_scope(name, values=[a]):
1506    a = ops.convert_to_tensor(a, name="a")
1507
1508    # If we know the number of dimensions (statically), we can do two things:
1509    # 1. Check that `a` is a (batch) matrix.
1510    # 2. Use a python list for perm.  This preserves static shape information
1511    #    and avoids extra computations.
1512    a_shape = a.get_shape()
1513    ndims = a_shape.ndims
1514    if ndims is not None:
1515      if ndims < 2:
1516        raise ValueError(
1517            "Argument 'a' should be a (batch) matrix, with rank >= 2.  Found: "
1518            "%s" % a_shape)
1519      perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
1520    else:
1521      a_rank = rank(a)
1522      perm = concat((gen_math_ops._range(0, a_rank - 2, 1),
1523                     [a_rank - 1, a_rank - 2]), 0)
1524
1525    return transpose(a, perm=perm, conjugate=conjugate)
1526
1527
1528# pylint: enable=invalid-name
1529
1530
1531@tf_export("zeros")
1532def zeros(shape, dtype=dtypes.float32, name=None):
1533  """Creates a tensor with all elements set to zero.
1534
1535  This operation returns a tensor of type `dtype` with shape `shape` and
1536  all elements set to zero.
1537
1538  For example:
1539
1540  ```python
1541  tf.zeros([3, 4], tf.int32)  # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
1542  ```
1543
1544  Args:
1545    shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
1546      `int32`.
1547    dtype: The type of an element in the resulting `Tensor`.
1548    name: A name for the operation (optional).
1549
1550  Returns:
1551    A `Tensor` with all elements set to zero.
1552  """
1553  dtype = dtypes.as_dtype(dtype).base_dtype
1554  with ops.name_scope(name, "zeros", [shape]) as name:
1555    if dtype == dtypes.bool:
1556      zero = False
1557    elif dtype == dtypes.string:
1558      zero = ""
1559    else:
1560      zero = 0
1561    if not isinstance(shape, ops.Tensor):
1562      try:
1563        # Go through tensor shapes to get int64-if-needed semantics
1564        shape = constant_op._tensor_shape_tensor_conversion_function(
1565            tensor_shape.TensorShape(shape))
1566      except (TypeError, ValueError):
1567        # Happens when shape is a list with tensor elements
1568        shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
1569    if not shape._shape_tuple():
1570      shape = reshape(shape, [-1])  # Ensure it's a vector
1571    output = fill(shape, constant(zero, dtype=dtype), name=name)
1572  assert output.dtype.base_dtype == dtype
1573  return output
1574
1575
1576@tf_export("zeros_like")
1577def zeros_like(tensor, dtype=None, name=None, optimize=True):
1578  """Creates a tensor with all elements set to zero.
1579
1580  Given a single tensor (`tensor`), this operation returns a tensor of the
1581  same type and shape as `tensor` with all elements set to zero. Optionally,
1582  you can use `dtype` to specify a new type for the returned tensor.
1583
1584  For example:
1585
1586  ```python
1587  tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
1588  tf.zeros_like(tensor)  # [[0, 0, 0], [0, 0, 0]]
1589  ```
1590
1591  Args:
1592    tensor: A `Tensor`.
1593    dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
1594      `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
1595      `complex64`, `complex128`, `bool` or `string`.
1596    name: A name for the operation (optional).
1597    optimize: if true, attempt to statically determine the shape of 'tensor'
1598    and encode it as a constant.
1599
1600  Returns:
1601    A `Tensor` with all elements set to zero.
1602  """
1603  with ops.name_scope(name, "zeros_like", [tensor]) as name:
1604    tensor = ops.convert_to_tensor(tensor, name="tensor")
1605
1606    if context.in_eager_mode():
1607      if dtype is not None and dtype != tensor.dtype:
1608        return zeros(
1609            shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
1610      with ops.device(tensor.device):
1611        return gen_array_ops._zeros_like(tensor, name=name)
1612
1613    # For now, variant types must be created via zeros_like; as we need to
1614    # pass the input variant object to the proper zeros callback.
1615
1616    if (optimize and tensor.shape.is_fully_defined() and
1617        tensor.dtype != dtypes.variant):
1618      # We can produce a zeros tensor independent of the value of 'tensor',
1619      # since the shape is known statically.
1620      return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
1621
1622    if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
1623      return zeros(
1624          shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
1625    else:
1626      return gen_array_ops._zeros_like(tensor, name=name)
1627
1628
1629@tf_export("ones_like")
1630def ones_like(tensor, dtype=None, name=None, optimize=True):
1631  """Creates a tensor with all elements set to 1.
1632
1633  Given a single tensor (`tensor`), this operation returns a tensor of the same
1634  type and shape as `tensor` with all elements set to 1. Optionally, you can
1635  specify a new type (`dtype`) for the returned tensor.
1636
1637  For example:
1638
1639  ```python
1640  tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
1641  tf.ones_like(tensor)  # [[1, 1, 1], [1, 1, 1]]
1642  ```
1643
1644  Args:
1645    tensor: A `Tensor`.
1646    dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
1647      `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`,
1648      `complex64`, `complex128` or `bool`.
1649    name: A name for the operation (optional).
1650    optimize: if true, attempt to statically determine the shape of 'tensor'
1651    and encode it as a constant.
1652
1653  Returns:
1654    A `Tensor` with all elements set to 1.
1655  """
1656  with ops.name_scope(name, "ones_like", [tensor]) as name:
1657    tensor = ops.convert_to_tensor(tensor, name="tensor")
1658    ones_shape = shape_internal(tensor, optimize=optimize)
1659    if dtype is None:
1660      dtype = tensor.dtype
1661    ret = ones(ones_shape, dtype=dtype, name=name)
1662    if context.in_graph_mode():
1663      ret.set_shape(tensor.get_shape())
1664    return ret
1665
1666
1667@tf_export("ones")
1668def ones(shape, dtype=dtypes.float32, name=None):
1669  """Creates a tensor with all elements set to 1.
1670
1671  This operation returns a tensor of type `dtype` with shape `shape` and all
1672  elements set to 1.
1673
1674  For example:
1675
1676  ```python
1677  tf.ones([2, 3], tf.int32)  # [[1, 1, 1], [1, 1, 1]]
1678  ```
1679
1680  Args:
1681    shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
1682      `int32`.
1683    dtype: The type of an element in the resulting `Tensor`.
1684    name: A name for the operation (optional).
1685
1686  Returns:
1687    A `Tensor` with all elements set to 1.
1688  """
1689  dtype = dtypes.as_dtype(dtype).base_dtype
1690  with ops.name_scope(name, "ones", [shape]) as name:
1691    one = True if dtype == dtypes.bool else 1
1692    if not isinstance(shape, ops.Tensor):
1693      try:
1694        # Go through tensor shapes to get int64-if-needed semantics
1695        shape = constant_op._tensor_shape_tensor_conversion_function(
1696            tensor_shape.TensorShape(shape))
1697      except (TypeError, ValueError):
1698        # Happens when shape is a list with tensor elements
1699        shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
1700    if not shape._shape_tuple():
1701      shape = reshape(shape, [-1])  # Ensure it's a vector
1702    output = fill(shape, constant(one, dtype=dtype), name=name)
1703  assert output.dtype.base_dtype == dtype
1704  return output
1705
1706
1707@tf_export("placeholder")
1708def placeholder(dtype, shape=None, name=None):
1709  """Inserts a placeholder for a tensor that will be always fed.
1710
1711  **Important**: This tensor will produce an error if evaluated. Its value must
1712  be fed using the `feed_dict` optional argument to `Session.run()`,
1713  `Tensor.eval()`, or `Operation.run()`.
1714
1715  For example:
1716
1717  ```python
1718  x = tf.placeholder(tf.float32, shape=(1024, 1024))
1719  y = tf.matmul(x, x)
1720
1721  with tf.Session() as sess:
1722    print(sess.run(y))  # ERROR: will fail because x was not fed.
1723
1724    rand_array = np.random.rand(1024, 1024)
1725    print(sess.run(y, feed_dict={x: rand_array}))  # Will succeed.
1726  ```
1727
1728  @compatibility{eager} Placeholders are not compatible with eager execution.
1729
1730  Args:
1731    dtype: The type of elements in the tensor to be fed.
1732    shape: The shape of the tensor to be fed (optional). If the shape is not
1733      specified, you can feed a tensor of any shape.
1734    name: A name for the operation (optional).
1735
1736  Returns:
1737    A `Tensor` that may be used as a handle for feeding a value, but not
1738    evaluated directly.
1739
1740  Raises:
1741    RuntimeError: if eager execution is enabled
1742  """
1743  if context.in_eager_mode():
1744    raise RuntimeError("tf.placeholder() is not compatible with "
1745                       "eager execution.")
1746
1747  return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
1748
1749
1750# pylint: disable=redefined-outer-name
1751def _normalize_sparse_shape(shape, name):
1752  """Returns a tuple of (Tensor or None, rank or None)."""
1753  if shape is None:
1754    return (None, None)
1755  rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
1756  if not isinstance(shape, ops.Tensor) and None in shape:
1757    return (None, rank)
1758  return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
1759
1760
1761@tf_export("sparse_placeholder")
1762def sparse_placeholder(dtype, shape=None, name=None):
1763  """Inserts a placeholder for a sparse tensor that will be always fed.
1764
1765  **Important**: This sparse tensor will produce an error if evaluated.
1766  Its value must be fed using the `feed_dict` optional argument to
1767  `Session.run()`, `Tensor.eval()`, or `Operation.run()`.
1768
1769  For example:
1770
1771  ```python
1772  x = tf.sparse_placeholder(tf.float32)
1773  y = tf.sparse_reduce_sum(x)
1774
1775  with tf.Session() as sess:
1776    print(sess.run(y))  # ERROR: will fail because x was not fed.
1777
1778    indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
1779    values = np.array([1.0, 2.0], dtype=np.float32)
1780    shape = np.array([7, 9, 2], dtype=np.int64)
1781    print(sess.run(y, feed_dict={
1782      x: tf.SparseTensorValue(indices, values, shape)}))  # Will succeed.
1783    print(sess.run(y, feed_dict={
1784      x: (indices, values, shape)}))  # Will succeed.
1785
1786    sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
1787    sp_value = sp.eval(session=sess)
1788    print(sess.run(y, feed_dict={x: sp_value}))  # Will succeed.
1789  ```
1790
1791  @compatibility{eager} Placeholders are not compatible with eager execution.
1792
1793  Args:
1794    dtype: The type of `values` elements in the tensor to be fed.
1795    shape: The shape of the tensor to be fed (optional). If the shape is not
1796      specified, you can feed a sparse tensor of any shape.
1797    name: A name for prefixing the operations (optional).
1798
1799  Returns:
1800    A `SparseTensor` that may be used as a handle for feeding a value, but not
1801    evaluated directly.
1802
1803  Raises:
1804    RuntimeError: if eager execution is enabled
1805  """
1806  if context.in_eager_mode():
1807    raise RuntimeError("tf.placeholder() is not compatible with "
1808                       "eager execution.")
1809
1810  shape_name = (name + "/shape") if name is not None else None
1811  shape, rank = _normalize_sparse_shape(shape, shape_name)
1812  if shape is None:
1813    shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
1814  return sparse_tensor.SparseTensor(
1815      values=placeholder(
1816          dtype,
1817          shape=[None],
1818          name=(name + "/values") if name is not None else None),
1819      indices=placeholder(
1820          dtypes.int64, shape=[None, rank],
1821          name=(name + "/indices") if name is not None else None),
1822      dense_shape=shape)
1823
1824
1825# pylint: enable=redefined-outer-name
1826
1827
1828@tf_export("pad")
1829def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0):  # pylint: disable=invalid-name
1830  """Pads a tensor.
1831
1832  This operation pads a `tensor` according to the `paddings` you specify.
1833  `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
1834  `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
1835  many values to add before the contents of `tensor` in that dimension, and
1836  `paddings[D, 1]` indicates how many values to add after the contents of
1837  `tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
1838  and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
1839  `mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
1840  no greater than `tensor.dim_size(D)`.
1841
1842  The padded size of each dimension D of the output is:
1843
1844  `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
1845
1846  For example:
1847
1848  ```python
1849  t = tf.constant([[1, 2, 3], [4, 5, 6]])
1850  paddings = tf.constant([[1, 1,], [2, 2]])
1851  # 'constant_values' is 0.
1852  # rank of 't' is 2.
1853  tf.pad(t, paddings, "CONSTANT")  # [[0, 0, 0, 0, 0, 0, 0],
1854                                   #  [0, 0, 1, 2, 3, 0, 0],
1855                                   #  [0, 0, 4, 5, 6, 0, 0],
1856                                   #  [0, 0, 0, 0, 0, 0, 0]]
1857
1858  tf.pad(t, paddings, "REFLECT")  # [[6, 5, 4, 5, 6, 5, 4],
1859                                  #  [3, 2, 1, 2, 3, 2, 1],
1860                                  #  [6, 5, 4, 5, 6, 5, 4],
1861                                  #  [3, 2, 1, 2, 3, 2, 1]]
1862
1863  tf.pad(t, paddings, "SYMMETRIC")  # [[2, 1, 1, 2, 3, 3, 2],
1864                                    #  [2, 1, 1, 2, 3, 3, 2],
1865                                    #  [5, 4, 4, 5, 6, 6, 5],
1866                                    #  [5, 4, 4, 5, 6, 6, 5]]
1867  ```
1868
1869  Args:
1870    tensor: A `Tensor`.
1871    paddings: A `Tensor` of type `int32`.
1872    mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
1873    name: A name for the operation (optional).
1874    constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
1875      same type as `tensor`.
1876
1877  Returns:
1878    A `Tensor`. Has the same type as `tensor`.
1879
1880  Raises:
1881    ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
1882  """
1883
1884  # Convert lower/mixed case to upper for NumPy compatibility
1885  # NumPy uses all lower-case modes.
1886  mode = mode.upper()
1887  if mode == "CONSTANT":
1888    # TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
1889    # remove the "Pad" fallback here.
1890    if constant_values != 0:
1891      result = gen_array_ops._pad_v2(
1892          tensor, paddings, constant_values, name=name)
1893    else:
1894      result = gen_array_ops._pad(tensor, paddings, name=name)
1895  elif mode == "REFLECT":
1896    result = gen_array_ops._mirror_pad(
1897        tensor, paddings, mode="REFLECT", name=name)
1898  elif mode == "SYMMETRIC":
1899    result = gen_array_ops._mirror_pad(
1900        tensor, paddings, mode="SYMMETRIC", name=name)
1901  else:
1902    raise ValueError("Unknown padding mode: %s" % mode)
1903
1904  # Restore shape information where possible.
1905  if context.in_graph_mode():
1906    paddings_constant = tensor_util.constant_value(
1907        result.op.inputs[1], partial=True)
1908    input_shape = result.op.inputs[0].shape
1909    if (input_shape.ndims is not None and not result.shape.is_fully_defined()
1910        and paddings_constant is not None):
1911      new_shape = []
1912      for padding, dim in zip(paddings_constant, input_shape.as_list()):
1913        if padding is None or dim is None or not all(padding):
1914          new_shape.append(None)
1915        else:
1916          new_shape.append(sum(padding) + dim)
1917      result.set_shape(new_shape)
1918
1919  return result
1920
1921
1922@tf_export("meshgrid")
1923def meshgrid(*args, **kwargs):
1924  """Broadcasts parameters for evaluation on an N-D grid.
1925
1926  Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
1927  of N-D coordinate arrays for evaluating expressions on an N-D grid.
1928
1929  Notes:
1930
1931  `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
1932  When the `indexing` argument is set to 'xy' (the default), the broadcasting
1933  instructions for the first two dimensions are swapped.
1934
1935  Examples:
1936
1937  Calling `X, Y = meshgrid(x, y)` with the tensors
1938
1939  ```python
1940  x = [1, 2, 3]
1941  y = [4, 5, 6]
1942  X, Y = tf.meshgrid(x, y)
1943  # X = [[1, 2, 3],
1944  #      [1, 2, 3],
1945  #      [1, 2, 3]]
1946  # Y = [[4, 4, 4],
1947  #      [5, 5, 5],
1948  #      [6, 6, 6]]
1949  ```
1950
1951  Args:
1952    *args: `Tensor`s with rank 1.
1953    **kwargs:
1954      - indexing: Either 'xy' or 'ij' (optional, default: 'xy').
1955      - name: A name for the operation (optional).
1956
1957  Returns:
1958    outputs: A list of N `Tensor`s with rank N.
1959
1960  Raises:
1961    TypeError: When no keyword arguments (kwargs) are passed.
1962    ValueError: When indexing keyword argument is not one of `xy` or `ij`.
1963  """
1964
1965  indexing = kwargs.pop("indexing", "xy")
1966  name = kwargs.pop("name", "meshgrid")
1967  if kwargs:
1968    key = list(kwargs.keys())[0]
1969    raise TypeError("'{}' is an invalid keyword argument "
1970                    "for this function".format(key))
1971
1972  if indexing not in ("xy", "ij"):
1973    raise ValueError("indexing parameter must be either 'xy' or 'ij'")
1974
1975  with ops.name_scope(name, "meshgrid", args) as name:
1976    ndim = len(args)
1977    s0 = (1,) * ndim
1978
1979    # Prepare reshape by inserting dimensions with size 1 where needed
1980    output = []
1981    for i, x in enumerate(args):
1982      output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
1983    # Create parameters for broadcasting each tensor to the full size
1984    shapes = [size(x) for x in args]
1985
1986    output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
1987
1988    if indexing == "xy" and ndim > 1:
1989      output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
1990      output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
1991      shapes[0], shapes[1] = shapes[1], shapes[0]
1992
1993    # TODO(nolivia): improve performance with a broadcast
1994    mult_fact = ones(shapes, output_dtype)
1995    return [x * mult_fact for x in output]
1996
1997
1998NEW_AXIS = -1
1999SHRINK_AXIS = -2
2000
2001
2002# PEP-8 naming
2003# pylint: disable=invalid-name,redefined-outer-name
2004def _compute_size_of_strided_dim(shrink, spec, size):
2005  """Computes the size of a single strided slice dimension."""
2006
2007  unknown = None  # Document what None means here.
2008  use_full_range = None  # Document other use of None.
2009  # if this is a shrink axis (i.e. a non-range index)
2010  # it either will produce an error or return 1
2011  if shrink:
2012    return 1
2013  if size is unknown or size.value is unknown:
2014    return unknown
2015  size = size.value
2016  stride = spec.step
2017  if stride is not unknown:
2018    if stride == 0:
2019      return unknown
2020    stride = spec.step
2021    valid_range = [0, size] if stride > 0 else [-1, size - 1]
2022
2023    # PEP-8 naming
2024    # pylint: disable=invalid-name
2025    def canonical(x, c):
2026      if x is use_full_range:
2027        return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
2028      else:
2029        x_fwd = size + x if x < 0 else x  # make negative indices positive
2030        return max(valid_range[0], min(valid_range[1], x_fwd))
2031
2032    begin = canonical(spec.start, 0)
2033    end = canonical(spec.stop, 1)
2034    interval_length = end - begin
2035    if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
2036      return 0
2037    else:
2038      remainder = 1 if interval_length % stride != 0 else 0
2039      return interval_length // stride + remainder
2040  else:
2041    return unknown  # unknown because stride is unknown
2042
2043
2044def _TileGradShape(op):
2045  """Shape function for the TileGrad op."""
2046  multiples_shape = op.inputs[1].get_shape().with_rank(1)
2047  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
2048  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
2049  # it is a vector of non-negative integers, and (ii) doing so allows
2050  # us to handle partially-known multiples.
2051  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
2052      input_shape.ndims)
2053  if multiples.ndims is None:
2054    return [tensor_shape.unknown_shape()]
2055  else:
2056    output_dims = []
2057    for dim, multiple in zip(input_shape.dims, multiples.dims):
2058      output_dims.append(dim // multiple)
2059    return [tensor_shape.TensorShape(output_dims)]
2060
2061
2062@tf_export("edit_distance")
2063def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
2064  """Computes the Levenshtein distance between sequences.
2065
2066  This operation takes variable-length sequences (`hypothesis` and `truth`),
2067  each provided as a `SparseTensor`, and computes the Levenshtein distance.
2068  You can normalize the edit distance by length of `truth` by setting
2069  `normalize` to true.
2070
2071  For example, given the following input:
2072
2073  ```python
2074  # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
2075  #   (0,0) = ["a"]
2076  #   (1,0) = ["b"]
2077  hypothesis = tf.SparseTensor(
2078      [[0, 0, 0],
2079       [1, 0, 0]],
2080      ["a", "b"],
2081      (2, 1, 1))
2082
2083  # 'truth' is a tensor of shape `[2, 2]` with variable-length values:
2084  #   (0,0) = []
2085  #   (0,1) = ["a"]
2086  #   (1,0) = ["b", "c"]
2087  #   (1,1) = ["a"]
2088  truth = tf.SparseTensor(
2089      [[0, 1, 0],
2090       [1, 0, 0],
2091       [1, 0, 1],
2092       [1, 1, 0]],
2093      ["a", "b", "c", "a"],
2094      (2, 2, 2))
2095
2096  normalize = True
2097  ```
2098
2099  This operation would return the following:
2100
2101  ```python
2102  # 'output' is a tensor of shape `[2, 2]` with edit distances normalized
2103  # by 'truth' lengths.
2104  output ==> [[inf, 1.0],  # (0,0): no truth, (0,1): no hypothesis
2105             [0.5, 1.0]]  # (1,0): addition, (1,1): no hypothesis
2106  ```
2107
2108  Args:
2109    hypothesis: A `SparseTensor` containing hypothesis sequences.
2110    truth: A `SparseTensor` containing truth sequences.
2111    normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
2112      length of `truth.`
2113    name: A name for the operation (optional).
2114
2115  Returns:
2116    A dense `Tensor` with rank `R - 1`, where R is the rank of the
2117    `SparseTensor` inputs `hypothesis` and `truth`.
2118
2119  Raises:
2120    TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
2121  """
2122  if not isinstance(hypothesis, (sparse_tensor.SparseTensor,
2123                                 sparse_tensor.SparseTensorValue)):
2124    raise TypeError("Hypothesis must be a SparseTensor.")
2125  if not isinstance(truth, (sparse_tensor.SparseTensor,
2126                            sparse_tensor.SparseTensorValue)):
2127    raise TypeError("Truth must be a SparseTensor.")
2128
2129  return gen_array_ops._edit_distance(
2130      hypothesis.indices,
2131      hypothesis.values,
2132      hypothesis.dense_shape,
2133      truth.indices,
2134      truth.values,
2135      truth.dense_shape,
2136      normalize=normalize,
2137      name=name)
2138
2139
2140@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
2141def _FakeQuantWithMinMaxArgsGradient(op, grad):
2142  """Gradient for FakeQuantWithMinMaxArgs op."""
2143  return fake_quant_with_min_max_args_gradient(
2144      grad,
2145      op.inputs[0],
2146      min=op.get_attr("min"),
2147      max=op.get_attr("max"),
2148      num_bits=op.get_attr("num_bits"),
2149      narrow_range=op.get_attr("narrow_range"))
2150
2151
2152@ops.RegisterGradient("FakeQuantWithMinMaxVars")
2153def _FakeQuantWithMinMaxVarsGradient(op, grad):
2154  """Gradient for FakeQuantWithMinMaxVars op."""
2155  return fake_quant_with_min_max_vars_gradient(
2156      grad,
2157      op.inputs[0],
2158      op.inputs[1],
2159      op.inputs[2],
2160      num_bits=op.get_attr("num_bits"),
2161      narrow_range=op.get_attr("narrow_range"))
2162
2163
2164@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
2165def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
2166  """Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
2167  return fake_quant_with_min_max_vars_per_channel_gradient(
2168      grad,
2169      op.inputs[0],
2170      op.inputs[1],
2171      op.inputs[2],
2172      num_bits=op.get_attr("num_bits"),
2173      narrow_range=op.get_attr("narrow_range"))
2174
2175
2176@tf_export("required_space_to_batch_paddings")
2177def required_space_to_batch_paddings(input_shape,
2178                                     block_shape,
2179                                     base_paddings=None,
2180                                     name=None):
2181  """Calculate padding required to make block_shape divide input_shape.
2182
2183  This function can be used to calculate a suitable paddings argument for use
2184  with space_to_batch_nd and batch_to_space_nd.
2185
2186  Args:
2187    input_shape: int32 Tensor of shape [N].
2188    block_shape: int32 Tensor of shape [N].
2189    base_paddings: Optional int32 Tensor of shape [N, 2].  Specifies the minimum
2190      amount of padding to use.  All elements must be >= 0.  If not specified,
2191      defaults to 0.
2192    name: string.  Optional name prefix.
2193
2194  Returns:
2195    (paddings, crops), where:
2196
2197    `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
2198    satisfying:
2199
2200        paddings[i, 0] = base_paddings[i, 0].
2201        0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
2202        (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
2203
2204        crops[i, 0] = 0
2205        crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
2206
2207  Raises: ValueError if called with incompatible shapes.
2208  """
2209  with ops.name_scope(name, "required_space_to_batch_paddings",
2210                      [input_shape, block_shape]):
2211    input_shape = ops.convert_to_tensor(
2212        input_shape, dtype=dtypes.int32, name="input_shape")
2213    block_shape = ops.convert_to_tensor(
2214        block_shape, dtype=dtypes.int32, name="block_shape")
2215
2216    block_shape.get_shape().assert_is_fully_defined()
2217    block_shape.get_shape().assert_has_rank(1)
2218    num_block_dims = block_shape.get_shape()[0].value
2219    if num_block_dims == 0:
2220      return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
2221
2222    input_shape.get_shape().assert_is_compatible_with([num_block_dims])
2223
2224    if base_paddings is not None:
2225      base_paddings = ops.convert_to_tensor(
2226          base_paddings, dtype=dtypes.int32, name="base_paddings")
2227      base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
2228    else:
2229      base_paddings = zeros([num_block_dims, 2], dtypes.int32)
2230
2231    const_block_shape = tensor_util.constant_value(block_shape)
2232    const_input_shape = tensor_util.constant_value(input_shape)
2233    const_base_paddings = tensor_util.constant_value(base_paddings)
2234    if (const_block_shape is not None and const_input_shape is not None and
2235        const_base_paddings is not None):
2236      block_shape = const_block_shape
2237      input_shape = const_input_shape
2238      base_paddings = const_base_paddings
2239
2240    # Use same expression for both constant and non-constant case.
2241    pad_start = base_paddings[:, 0]
2242    orig_pad_end = base_paddings[:, 1]
2243    full_input_shape = input_shape + pad_start + orig_pad_end
2244    pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
2245    pad_end = orig_pad_end + pad_end_extra
2246
2247    result_paddings = stack(
2248        [[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
2249        name="paddings")
2250    result_crops = stack(
2251        [[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
2252    return result_paddings, result_crops
2253
2254
2255@tf_export("space_to_batch")
2256def space_to_batch(input, paddings, block_size, name=None):  # pylint: disable=redefined-builtin
2257  result = space_to_batch_nd(
2258      input,
2259      paddings=paddings,
2260      block_shape=np.array([block_size, block_size], dtype=np.int64),
2261      name=name)
2262  result.set_shape(result.get_shape().with_rank(4))
2263  return result
2264
2265
2266space_to_batch.__doc__ = gen_array_ops._space_to_batch.__doc__
2267
2268
2269@tf_export("space_to_depth")
2270def space_to_depth(input, block_size, name=None, data_format="NHWC"):  # pylint: disable=redefined-builtin
2271  return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
2272
2273
2274space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
2275
2276
2277@tf_export("depth_to_space")
2278def depth_to_space(input, block_size, name=None, data_format="NHWC"):  # pylint: disable=redefined-builtin
2279  return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
2280
2281
2282depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
2283
2284
2285@tf_export("batch_to_space")
2286def batch_to_space(input, crops, block_size, name=None):  # pylint: disable=redefined-builtin
2287  result = batch_to_space_nd(
2288      input,
2289      crops=crops,
2290      block_shape=np.array([block_size, block_size], dtype=np.int64),
2291      name=name)
2292  result.set_shape(result.get_shape().with_rank(4))
2293  return result
2294
2295
2296batch_to_space.__doc__ = gen_array_ops._batch_to_space.__doc__
2297
2298
2299@tf_export("one_hot")
2300def one_hot(indices,
2301            depth,
2302            on_value=None,
2303            off_value=None,
2304            axis=None,
2305            dtype=None,
2306            name=None):
2307  """Returns a one-hot tensor.
2308
2309  The locations represented by indices in `indices` take value `on_value`,
2310  while all other locations take value `off_value`.
2311
2312  `on_value` and `off_value` must have matching data types. If `dtype` is also
2313  provided, they must be the same data type as specified by `dtype`.
2314
2315  If `on_value` is not provided, it will default to the value `1` with type
2316  `dtype`
2317
2318  If `off_value` is not provided, it will default to the value `0` with type
2319  `dtype`
2320
2321  If the input `indices` is rank `N`, the output will have rank `N+1`. The
2322  new axis is created at dimension `axis` (default: the new axis is appended
2323  at the end).
2324
2325  If `indices` is a scalar the output shape will be a vector of length `depth`
2326
2327  If `indices` is a vector of length `features`, the output shape will be:
2328
2329  ```
2330    features x depth if axis == -1
2331    depth x features if axis == 0
2332  ```
2333
2334  If `indices` is a matrix (batch) with shape `[batch, features]`, the output
2335  shape will be:
2336
2337  ```
2338    batch x features x depth if axis == -1
2339    batch x depth x features if axis == 1
2340    depth x batch x features if axis == 0
2341  ```
2342
2343  If `dtype` is not provided, it will attempt to assume the data type of
2344  `on_value` or `off_value`, if one or both are passed in. If none of
2345  `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
2346  value `tf.float32`.
2347
2348  Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
2349  etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
2350
2351  For example:
2352
2353  ```python
2354  indices = [0, 1, 2]
2355  depth = 3
2356  tf.one_hot(indices, depth)  # output: [3 x 3]
2357  # [[1., 0., 0.],
2358  #  [0., 1., 0.],
2359  #  [0., 0., 1.]]
2360
2361  indices = [0, 2, -1, 1]
2362  depth = 3
2363  tf.one_hot(indices, depth,
2364             on_value=5.0, off_value=0.0,
2365             axis=-1)  # output: [4 x 3]
2366  # [[5.0, 0.0, 0.0],  # one_hot(0)
2367  #  [0.0, 0.0, 5.0],  # one_hot(2)
2368  #  [0.0, 0.0, 0.0],  # one_hot(-1)
2369  #  [0.0, 5.0, 0.0]]  # one_hot(1)
2370
2371  indices = [[0, 2], [1, -1]]
2372  depth = 3
2373  tf.one_hot(indices, depth,
2374             on_value=1.0, off_value=0.0,
2375             axis=-1)  # output: [2 x 2 x 3]
2376  # [[[1.0, 0.0, 0.0],   # one_hot(0)
2377  #   [0.0, 0.0, 1.0]],  # one_hot(2)
2378  #  [[0.0, 1.0, 0.0],   # one_hot(1)
2379  #   [0.0, 0.0, 0.0]]]  # one_hot(-1)
2380  ```
2381
2382  Args:
2383    indices: A `Tensor` of indices.
2384    depth: A scalar defining the depth of the one hot dimension.
2385    on_value: A scalar defining the value to fill in output when `indices[j]
2386      = i`. (default: 1)
2387    off_value: A scalar defining the value to fill in output when `indices[j]
2388      != i`. (default: 0)
2389    axis: The axis to fill (default: -1, a new inner-most axis).
2390    dtype: The data type of the output tensor.
2391    name: A name for the operation (optional).
2392
2393  Returns:
2394    output: The one-hot tensor.
2395
2396  Raises:
2397    TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
2398    TypeError: If dtype of `on_value` and `off_value` don't match one another
2399  """
2400  with ops.name_scope(name, "one_hot",
2401                      [indices, depth, on_value, off_value, axis,
2402                       dtype]) as name:
2403    on_exists = on_value is not None
2404    off_exists = off_value is not None
2405
2406    on_dtype = (ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists
2407                else None)
2408    off_dtype = (ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists
2409                 else None)
2410
2411    if on_exists or off_exists:
2412      if dtype is not None:
2413        # Ensure provided on_value and/or off_value match dtype
2414        if on_exists and on_dtype != dtype:
2415          raise TypeError("dtype {0} of on_value does not match "
2416                          "dtype parameter {1}".format(on_dtype, dtype))
2417        if off_exists and off_dtype != dtype:
2418          raise TypeError("dtype {0} of off_value does not match "
2419                          "dtype parameter {1}".format(off_dtype, dtype))
2420      else:
2421        # dtype not provided: automatically assign it
2422        dtype = on_dtype if on_exists else off_dtype
2423    elif dtype is None:
2424      # None of on_value, off_value, or dtype provided. Default dtype to float32
2425      dtype = dtypes.float32
2426
2427    if not on_exists:
2428      # on_value not provided: assign to value 1 of type dtype
2429      on_value = ops.convert_to_tensor(1, dtype, name="on_value")
2430      on_dtype = dtype
2431    if not off_exists:
2432      # off_value not provided: assign to value 0 of type dtype
2433      off_value = ops.convert_to_tensor(0, dtype, name="off_value")
2434      off_dtype = dtype
2435
2436    if on_dtype != off_dtype:
2437      raise TypeError("dtype {0} of on_value does not match "
2438                      "dtype {1} of off_value".format(on_dtype, off_dtype))
2439
2440    return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,
2441                                  name)
2442
2443
2444def _all_dimensions(x):
2445  """Returns a 1D-tensor listing all dimensions in x."""
2446  # Fast path: avoid creating Rank and Range ops if ndims is known.
2447  if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
2448    return constant_op.constant(
2449        np.arange(x.get_shape().ndims), dtype=dtypes.int32)
2450  if (isinstance(x, sparse_tensor.SparseTensor) and
2451      x.dense_shape.get_shape().is_fully_defined()):
2452    r = x.dense_shape.get_shape()[0].value  # sparse.dense_shape is 1-D.
2453    return constant_op.constant(np.arange(r), dtype=dtypes.int32)
2454
2455  # Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
2456  return gen_math_ops._range(0, rank(x), 1)
2457
2458
2459@tf_export("sequence_mask")
2460def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
2461  """Returns a mask tensor representing the first N positions of each cell.
2462
2463  If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
2464  dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
2465
2466  ```
2467  mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
2468  ```
2469
2470  Examples:
2471
2472  ```python
2473  tf.sequence_mask([1, 3, 2], 5)  # [[True, False, False, False, False],
2474                                  #  [True, True, True, False, False],
2475                                  #  [True, True, False, False, False]]
2476
2477  tf.sequence_mask([[1, 3],[2,0]])  # [[[True, False, False],
2478                                    #   [True, True, True]],
2479                                    #  [[True, True, False],
2480                                    #   [False, False, False]]]
2481  ```
2482
2483  Args:
2484    lengths: integer tensor, all its values <= maxlen.
2485    maxlen: scalar integer tensor, size of last dimension of returned tensor.
2486      Default is the maximum value in `lengths`.
2487    dtype: output type of the resulting tensor.
2488    name: name of the op.
2489  Returns:
2490    A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
2491  Raises:
2492    ValueError: if `maxlen` is not a scalar.
2493  """
2494  with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
2495    lengths = ops.convert_to_tensor(lengths)
2496
2497    if maxlen is None:
2498      maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
2499    else:
2500      maxlen = ops.convert_to_tensor(maxlen)
2501    if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
2502      raise ValueError("maxlen must be scalar for sequence_mask")
2503
2504    # The basic idea is to compare a range row vector of size maxlen:
2505    # [0, 1, 2, 3, 4]
2506    # to length as a matrix with 1 column: [[1], [3], [2]].
2507    # Because of broadcasting on both arguments this comparison results
2508    # in a matrix of size (len(lengths), maxlen)
2509    row_vector = gen_math_ops._range(
2510        constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
2511    # Since maxlen >= max(lengths), it is safe to use maxlen as a cast
2512    # authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
2513    matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
2514    result = row_vector < matrix
2515
2516    if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
2517      return result
2518    else:
2519      return gen_math_ops.cast(result, dtype)
2520
2521
2522@tf_export("squeeze")
2523def squeeze(input, axis=None, name=None, squeeze_dims=None):
2524  # pylint: disable=redefined-builtin
2525  """Removes dimensions of size 1 from the shape of a tensor.
2526
2527  Given a tensor `input`, this operation returns a tensor of the same type with
2528  all dimensions of size 1 removed. If you don't want to remove all size 1
2529  dimensions, you can remove specific size 1 dimensions by specifying
2530  `axis`.
2531
2532  For example:
2533
2534  ```python
2535  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
2536  tf.shape(tf.squeeze(t))  # [2, 3]
2537  ```
2538
2539  Or, to remove specific size 1 dimensions:
2540
2541  ```python
2542  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
2543  tf.shape(tf.squeeze(t, [2, 4]))  # [1, 2, 3, 1]
2544  ```
2545
2546  Args:
2547    input: A `Tensor`. The `input` to squeeze.
2548    axis: An optional list of `ints`. Defaults to `[]`.
2549      If specified, only squeezes the dimensions listed. The dimension
2550      index starts at 0. It is an error to squeeze a dimension that is not 1.
2551      Must be in the range `[-rank(input), rank(input))`.
2552    name: A name for the operation (optional).
2553    squeeze_dims: Deprecated keyword argument that is now axis.
2554
2555  Returns:
2556    A `Tensor`. Has the same type as `input`.
2557    Contains the same data as `input`, but has one or more dimensions of
2558    size 1 removed.
2559
2560  Raises:
2561    ValueError: When both `squeeze_dims` and `axis` are specified.
2562  """
2563  if squeeze_dims is not None:
2564    if axis is not None:
2565      raise ValueError("Cannot specify both 'squeeze_dims' and 'axis'")
2566    axis = squeeze_dims
2567  if np.isscalar(axis):
2568    axis = [axis]
2569  return gen_array_ops._squeeze(input, axis, name)
2570
2571
2572@tf_export("where")
2573def where(condition, x=None, y=None, name=None):
2574  """Return the elements, either from `x` or `y`, depending on the `condition`.
2575
2576  If both `x` and `y` are None, then this operation returns the coordinates of
2577  true elements of `condition`.  The coordinates are returned in a 2-D tensor
2578  where the first dimension (rows) represents the number of true elements, and
2579  the second dimension (columns) represents the coordinates of the true
2580  elements. Keep in mind, the shape of the output tensor can vary depending on
2581  how many true values there are in input. Indices are output in row-major
2582  order.
2583
2584  If both non-None, `x` and `y` must have the same shape.
2585  The `condition` tensor must be a scalar if `x` and `y` are scalar.
2586  If `x` and `y` are vectors of higher rank, then `condition` must be either a
2587  vector with size matching the first dimension of `x`, or must have the same
2588  shape as `x`.
2589
2590  The `condition` tensor acts as a mask that chooses, based on the value at each
2591  element, whether the corresponding element / row in the output should be taken
2592  from `x` (if true) or `y` (if false).
2593
2594  If `condition` is a vector and `x` and `y` are higher rank matrices, then it
2595  chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
2596  has the same shape as `x` and `y`, then it chooses which element to copy from
2597  `x` and `y`.
2598
2599  Args:
2600    condition: A `Tensor` of type `bool`
2601    x: A Tensor which may have the same shape as `condition`. If `condition` is
2602      rank 1, `x` may have higher rank, but its first dimension must match the
2603      size of `condition`.
2604    y: A `tensor` with the same shape and type as `x`.
2605    name: A name of the operation (optional)
2606
2607  Returns:
2608    A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
2609    A `Tensor` with shape `(num_true, dim_size(condition))`.
2610
2611  Raises:
2612    ValueError: When exactly one of `x` or `y` is non-None.
2613  """
2614  if x is None and y is None:
2615    with ops.name_scope(name, "Where", [condition]) as name:
2616      condition = ops.convert_to_tensor(
2617          condition, preferred_dtype=dtypes.bool, name="condition")
2618      return gen_array_ops.where(condition=condition, name=name)
2619  elif x is not None and y is not None:
2620    return gen_math_ops._select(condition=condition, x=x, y=y, name=name)
2621  else:
2622    raise ValueError("x and y must both be non-None or both be None.")
2623
2624
2625@tf_export("reverse")
2626def reverse(tensor, axis, name=None):
2627  return gen_array_ops.reverse_v2(tensor, axis, name)
2628
2629
2630reverse.__doc__ = gen_array_ops.reverse_v2.__doc__
2631
2632
2633# pylint: disable=redefined-builtin
2634@tf_export("reverse_sequence")
2635def reverse_sequence(input,
2636                     seq_lengths,
2637                     seq_axis=None,
2638                     batch_axis=None,
2639                     name=None,
2640                     seq_dim=None,
2641                     batch_dim=None):
2642  seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
2643                                                    "seq_dim", seq_dim)
2644  batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
2645                                                      "batch_dim", batch_dim)
2646  return gen_array_ops.reverse_sequence(
2647      input=input,
2648      seq_lengths=seq_lengths,
2649      seq_dim=seq_axis,
2650      batch_dim=batch_axis,
2651      name=name)
2652
2653
2654# pylint: enable=redefined-builtin
2655
2656reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
2657    deprecation.rewrite_argument_docstring(
2658        gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
2659    "seq_dim", "seq_axis")
2660
2661
2662@tf_export("gather")
2663def gather(params, indices, validate_indices=None, name=None, axis=0):
2664  # TODO(rjryan): Remove "Gather" creation in favor of GatherV2 once the forward
2665  # compatibility 3 week period has passed.
2666  if axis == 0:
2667    return gen_array_ops.gather(
2668        params, indices, validate_indices=validate_indices, name=name)
2669  else:
2670    return gen_array_ops.gather_v2(params, indices, axis, name=name)
2671
2672
2673gather.__doc__ = gen_array_ops.gather_v2.__doc__
2674
2675
2676# Define quantize_v2 here in order to make name the second-to-last attribute,
2677# because round_mode was added later.
2678@tf_export("quantize_v2")
2679@deprecation.deprecated(
2680    "2017-10-25",
2681    "`tf.quantize_v2` is deprecated, please use `tf.quantize` instead.")
2682def quantize_v2(input,  # pylint: disable=redefined-builtin
2683                min_range,
2684                max_range,
2685                T,
2686                mode="MIN_COMBINED",
2687                name=None,
2688                round_mode="HALF_AWAY_FROM_ZERO"):
2689  return gen_array_ops.quantize_v2(input,
2690                                   min_range,
2691                                   max_range,
2692                                   T=T,
2693                                   mode=mode,
2694                                   name=name,
2695                                   round_mode=round_mode)
2696
2697
2698quantize_v2.__doc__ = """Please use `tf.quantize` instead."""
2699
2700
2701# We want to expose tf.quantize instead of tf.quantize_v2; we can deprecate
2702# tf.quantize_v2 in next version of TensorFlow.
2703@tf_export("quantize")
2704def quantize(input,  # pylint: disable=redefined-builtin
2705             min_range,
2706             max_range,
2707             T,
2708             mode="MIN_COMBINED",
2709             round_mode="HALF_AWAY_FROM_ZERO",
2710             name=None):
2711  return gen_array_ops.quantize_v2(
2712      input,
2713      min_range,
2714      max_range,
2715      T,
2716      mode=mode,
2717      round_mode=round_mode,
2718      name=name)
2719
2720
2721quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
2722