nn_grad_test.cc revision 040b4cbce7084b8340e075af4797f81359c2bbf4
1/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include "tensorflow/cc/framework/grad_op_registry.h"
17#include "tensorflow/cc/framework/gradient_checker.h"
18#include "tensorflow/cc/framework/testutil.h"
19#include "tensorflow/cc/gradients/grad_testutil.h"
20#include "tensorflow/cc/ops/standard_ops.h"
21#include "tensorflow/core/framework/tensor_testutil.h"
22#include "tensorflow/core/lib/core/status_test_util.h"
23#include "tensorflow/core/lib/random/random.h"
24
25namespace tensorflow {
26namespace {
27
28using ops::BiasAdd;
29using ops::Conv2D;
30using ops::Elu;
31using ops::L2Loss;
32using ops::LogSoftmax;
33using ops::LRN;
34using ops::MaxPool;
35using ops::MaxPoolV2;
36using ops::Placeholder;
37using ops::Relu;
38using ops::Relu6;
39using ops::Selu;
40using ops::Softmax;
41
42class NNGradTest : public ::testing::Test {
43 protected:
44  NNGradTest() : scope_(Scope::NewRootScope()) {}
45
46  void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
47               const TensorShape& y_shape) {
48    float max_error;
49    TF_ASSERT_OK((ComputeGradientError<float, float, float>(
50        scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error)));
51    EXPECT_LT(max_error, 1e-3);
52  }
53
54  void RunTest(const Output& x, const Tensor& x_init_value, const Output& y,
55               const TensorShape& y_shape) {
56    float max_error;
57    TF_ASSERT_OK((ComputeGradientError<float, float, float>(
58        scope_, x, x_init_value, y, y_shape, &max_error)));
59    EXPECT_LT(max_error, 1e-3);
60  }
61
62  void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
63               const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
64    TF_ASSERT_OK(scope_.status());
65    float max_error;
66    TF_ASSERT_OK((ComputeGradientError<float, float, float>(
67        scope_, xs, x_shapes, ys, y_shapes, &max_error)));
68    EXPECT_LT(max_error, 1e-3);
69  }
70
71  // Sets tensor with random values, ensuring that the max value is largest by
72  // a reasonable amount.
73  // This is an issue for MaxPool and MaxPoolV2, in which perturbations by the
74  // numeric gradient computation in the gradient checker can change the max
75  // value if values are too close together.
76  template <typename T>
77  void SetRandomValuesWithBumpedMax(Tensor* tensor) {
78    auto tensor_flat = tensor->flat<T>();
79    tensor_flat.setRandom();
80    int32 max_index = 0;
81    for (size_t i = 1; i < tensor->NumElements(); i++) {
82      if (tensor_flat(i) > tensor_flat(max_index)) {
83        max_index = i;
84      }
85    }
86    tensor_flat(max_index) += 1e-2;
87  }
88
89  Scope scope_;
90};
91
92TEST_F(NNGradTest, SoftmaxGrad) {
93  TensorShape shape({32, 10});
94  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
95  auto y = Softmax(scope_, x);
96  RunTest(x, shape, y, shape);
97}
98
99TEST_F(NNGradTest, LogSoftmaxGrad) {
100  TensorShape shape({5, 3});
101  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
102  auto y = LogSoftmax(scope_, x);
103  // Avoid numerical instability when computing finite differences.
104  Tensor x_init_value =
105      test::AsTensor<float>({-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f,
106                             0.5f, 0.7f, 0.8f, -0.1f, 0.1f, 0.1f, 0.1f, 1.2f},
107                            {5, 3});
108  RunTest(x, x_init_value, y, shape);
109}
110
111TEST_F(NNGradTest, ReluGrad) {
112  TensorShape shape({5, 2});
113  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
114  auto y = Relu(scope_, x);
115  // Avoid input values where ReLU gradient is not well defined (around zero).
116  Tensor x_init_value = test::AsTensor<float>(
117      {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
118      {5, 2});
119  RunTest(x, x_init_value, y, shape);
120}
121
122TEST_F(NNGradTest, Relu6Grad) {
123  TensorShape shape({5, 2});
124  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
125  auto y = Relu6(scope_, x);
126  // Avoid input values where ReLU gradient is not well defined (around zero
127  // and six).
128  Tensor x_init_value = test::AsTensor<float>(
129      {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 6.1f, 6.3f, 6.5f, 6.7f, 6.9f},
130      {5, 2});
131  RunTest(x, x_init_value, y, shape);
132}
133
134TEST_F(NNGradTest, EluGrad) {
135  TensorShape shape({5, 2});
136  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
137  auto y = Elu(scope_, x);
138  Tensor x_init_value = test::AsTensor<float>(
139      {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
140      {5, 2});
141  RunTest(x, x_init_value, y, shape);
142}
143
144TEST_F(NNGradTest, SeluGrad) {
145  TensorShape shape({5, 2});
146  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
147  auto y = Selu(scope_, x);
148  Tensor x_init_value = test::AsTensor<float>(
149      {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
150      {5, 2});
151  RunTest(x, x_init_value, y, shape);
152}
153
154TEST_F(NNGradTest, L2LossGrad) {
155  TensorShape x_shape({5, 2});
156  TensorShape y_shape({1});
157  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
158  auto y = L2Loss(scope_, x);
159  RunTest(x, x_shape, y, y_shape);
160}
161
162TEST_F(NNGradTest, BiasAddGradHelper) {
163  TensorShape shape({4, 5});
164  TensorShape bias_shape({5});
165  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
166  auto bias = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(bias_shape));
167  auto y = BiasAdd(scope_, x, bias);
168  RunTest({x, bias}, {shape, bias_shape}, {y}, {shape});
169}
170
171TEST_F(NNGradTest, Conv2DGrad) {
172  TensorShape shape({1, 2, 2, 1});
173  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
174  Tensor filter = test::AsTensor<float>({0.5f}, {1, 1, 1, 1});
175  const std::vector<int> strides{1, 1, 1, 1};
176  auto y = Conv2D(scope_, x, filter, strides, "SAME");
177  RunTest(x, shape, y, shape);
178}
179
180TEST_F(NNGradTest, MaxPoolGradHelper) {
181  TensorShape x_shape({1, 2, 2, 1});
182  TensorShape y_shape({1, 1, 1, 1});
183  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
184  // Setup window and strides so that we only do one MaxPool.
185  const std::vector<int> ksize{1, 2, 2, 1};
186  const std::vector<int> strides{1, 2, 2, 1};
187  auto y = MaxPool(scope_, x, ksize, strides, "VALID");
188  Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
189  SetRandomValuesWithBumpedMax<float>(&x_init_value);
190  RunTest(x, x_init_value, y, y_shape);
191}
192
193TEST_F(NNGradTest, MaxPoolGradV2Helper) {
194  TensorShape x_shape({1, 2, 2, 1});
195  TensorShape y_shape({1, 1, 1, 1});
196  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
197  // Setup window and strides so that we only do one MaxPool.
198  Tensor ksize = test::AsTensor<int>({1, 2, 2, 1}, {4});
199  Tensor strides = test::AsTensor<int>({1, 2, 2, 1}, {4});
200  auto y = MaxPoolV2(scope_, x, ksize, strides, "VALID");
201  Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
202  SetRandomValuesWithBumpedMax<float>(&x_init_value);
203  RunTest(x, x_init_value, y, y_shape);
204}
205
206TEST_F(NNGradTest, LRN){
207  TensorShape x_shape({1, 1, 2, 1});
208  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
209  auto y = LRN(scope_, x);
210  RunTest(x, x_shape, y, x_shape);
211}
212
213}  // namespace
214}  // namespace tensorflow
215