nn_grad.cc revision 4957f8a8de587ff12d367ead7d91479005487a08
1/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include "tensorflow/cc/ops/nn_ops.h"
17#include "tensorflow/cc/ops/nn_ops_internal.h"
18#include "tensorflow/cc/ops/standard_ops.h"
19
20#include "tensorflow/cc/framework/grad_op_registry.h"
21
22namespace tensorflow {
23namespace ops {
24namespace {
25
26Status SoftmaxGrad(const Scope& scope, const Operation& op,
27                   const std::vector<Output>& grad_inputs,
28                   std::vector<Output>* grad_outputs) {
29  // Softmax gradient function.
30  // p = softmax(x) maps from [batch, n] to [batch, m]
31  // dp/dx = [dp0/dx0   ... dp0/dxn-1  ]
32  //         [  ...           ...      ]
33  //         [dpm-1/dx0 ... dpm-1/dxn-1]
34  // dL/dx = dp/dx * dL/dy
35  //
36  // Using alternative formula:
37  // dL/dx = dL/dy * y - sum(dL/dy * y) * y
38  //    = (dL/dy - sum(dL/dy * y)) * y
39  auto y = op.output(0);
40  auto dyy = Mul(scope, grad_inputs[0], y);
41  auto sum = Reshape(scope, Sum(scope, dyy, {1}), {-1, 1});
42  auto sub = Sub(scope, grad_inputs[0], sum);
43  auto dx = Mul(scope, sub, y);
44  grad_outputs->push_back(dx);
45  return scope.status();
46}
47REGISTER_GRADIENT_OP("Softmax", SoftmaxGrad);
48
49Status ReluGradHelper(const Scope& scope, const Operation& op,
50                      const std::vector<Output>& grad_inputs,
51                      std::vector<Output>* grad_outputs) {
52  auto dx = internal::ReluGrad(scope, grad_inputs[0], op.input(0));
53  grad_outputs->push_back(dx);
54  return scope.status();
55}
56REGISTER_GRADIENT_OP("Relu", ReluGradHelper);
57
58Status Relu6GradHelper(const Scope& scope, const Operation& op,
59                       const std::vector<Output>& grad_inputs,
60                       std::vector<Output>* grad_outputs) {
61  auto dx = internal::Relu6Grad(scope, grad_inputs[0], op.input(0));
62  grad_outputs->push_back(dx);
63  return scope.status();
64}
65REGISTER_GRADIENT_OP("Relu6", Relu6GradHelper);
66
67Status EluGradHelper(const Scope& scope, const Operation& op,
68                     const std::vector<Output>& grad_inputs,
69                     std::vector<Output>* grad_outputs) {
70  auto dx = internal::EluGrad(scope, grad_inputs[0], op.output(0));
71  grad_outputs->push_back(dx);
72  return scope.status();
73}
74REGISTER_GRADIENT_OP("Elu", EluGradHelper);
75
76}  // anonymous namespace
77}  // namespace ops
78}  // namespace tensorflow
79