data_flow_grad.py revision f06f18e57ff17297e6e20c889d51f141f841496f
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15
16"""Gradients for operators defined in data_flow_ops.py."""
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21from six.moves import xrange  # pylint: disable=redefined-builtin
22
23from tensorflow.python.framework import dtypes
24from tensorflow.python.framework import ops
25from tensorflow.python.ops import array_ops
26from tensorflow.python.ops import data_flow_ops
27from tensorflow.python.ops import math_ops
28
29
30@ops.RegisterGradient("DynamicPartition")
31def _DynamicPartitionGrads(op, *grads):
32  """Gradients for DynamicPartition."""
33  data = op.inputs[0]
34  indices = op.inputs[1]
35  num_partitions = op.get_attr("num_partitions")
36
37  prefix_shape = array_ops.shape(indices)
38  original_indices = array_ops.reshape(
39      math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)
40  partitioned_indices = data_flow_ops.dynamic_partition(
41      original_indices, indices, num_partitions)
42  reconstructed = data_flow_ops.dynamic_stitch(partitioned_indices, grads)
43  reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))
44  return [reconstructed, None]
45
46
47@ops.RegisterGradient("DynamicStitch")
48@ops.RegisterGradient("ParallelDynamicStitch")
49def _DynamicStitchGrads(op, grad):
50  """Gradients for DynamicStitch and ParallelDynamicStitch."""
51
52  num_values = len(op.inputs) // 2
53  indices_grad = [None] * num_values
54
55  def AsInt32(x):
56    return (x if op.inputs[0].dtype == dtypes.int32 else
57            math_ops.cast(x, dtypes.int32))
58  inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
59  if isinstance(grad, ops.IndexedSlices):
60    output_shape = array_ops.shape(op.outputs[0])
61    output_rows = output_shape[0]
62    grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
63  values_grad = [array_ops.gather(grad, inp) for inp in inputs]
64  return indices_grad + values_grad
65
66
67ops.NotDifferentiable("Queue")
68ops.NotDifferentiable("QueueEnqueue")
69ops.NotDifferentiable("QueueEnqueueMany")
70ops.NotDifferentiable("QueueDequeue")
71ops.NotDifferentiable("QueueDequeueMany")
72ops.NotDifferentiable("QueueDequeueUpTo")
73ops.NotDifferentiable("QueueClose")
74ops.NotDifferentiable("QueueSize")
75
76ops.NotDifferentiable("Stack")
77ops.NotDifferentiable("StackPush")
78ops.NotDifferentiable("StackPop")
79ops.NotDifferentiable("StackClose")
80
81ops.NotDifferentiable("GetSessionHandle")
82ops.NotDifferentiable("GetSessionHandleV2")
83ops.NotDifferentiable("GetSessionTensor")
84ops.NotDifferentiable("DeleteSessionTensor")
85