data_flow_grad.py revision 7760ce56fc3ab4ab8cdc408e29d8ad8b539c417e
1# Copyright 2015 Google Inc. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15
16"""Gradients for operators defined in data_flow_ops.py."""
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21from six.moves import xrange  # pylint: disable=redefined-builtin
22
23from tensorflow.python.framework import dtypes
24from tensorflow.python.framework import ops
25from tensorflow.python.ops import array_ops
26from tensorflow.python.ops import constant_op
27from tensorflow.python.ops import data_flow_ops
28from tensorflow.python.ops import math_ops
29
30
31@ops.RegisterGradient("DynamicPartition")
32def _DynamicPartitionGrads(op, *grads):
33  """Gradients for DynamicPartition."""
34  data = op.inputs[0]
35  indices = op.inputs[1]
36  num_partitions = op.get_attr("num_partitions")
37
38  prefix_shape = array_ops.shape(indices)
39  original_indices = array_ops.reshape(
40      math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)
41  partitioned_indices = data_flow_ops.dynamic_partition(
42      original_indices, indices, num_partitions)
43  reconstructed = data_flow_ops.dynamic_stitch(partitioned_indices, grads)
44  reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))
45  return [reconstructed, None]
46
47
48@ops.RegisterGradient("DynamicStitch")
49def _DynamicStitchGrads(op, grad):
50  """Gradients for DynamicStitch."""
51
52  num_values = len(op.inputs) // 2
53  indices_grad = [None] * num_values
54
55  def AsInt32(x):
56    return (x if op.inputs[0].dtype == dtypes.int32 else
57            math_ops.cast(x, dtypes.int32))
58  inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
59  if isinstance(grad, ops.IndexedSlices):
60    output_shape = array_ops.shape(op.outputs[0])
61    output_rows = output_shape[0]
62    grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
63  values_grad = [array_ops.gather(grad, inp) for inp in inputs]
64  return indices_grad + values_grad
65
66
67ops.NoGradient("Queue")
68ops.NoGradient("QueueEnqueue")
69ops.NoGradient("QueueEnqueueMany")
70ops.NoGradient("QueueDequeue")
71ops.NoGradient("QueueDequeueMany")
72ops.NoGradient("QueueClose")
73ops.NoGradient("QueueSize")
74
75ops.NoGradient("Stack")
76ops.NoGradient("StackPush")
77ops.NoGradient("StackPop")
78ops.NoGradient("StackClose")
79