1/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15#include <cstdarg>
16#include <gtest/gtest.h>
17#include "tensorflow/contrib/lite/interpreter.h"
18#include "tensorflow/contrib/lite/kernels/register.h"
19#include "tensorflow/contrib/lite/kernels/test_util.h"
20#include "tensorflow/contrib/lite/model.h"
21
22namespace tflite {
23namespace {
24
25using ::testing::ElementsAreArray;
26
27class BaseActivationsOpModel : public SingleOpModel {
28 public:
29  // Most activations don't take any options, so this constructor works for
30  // them.
31  BaseActivationsOpModel(BuiltinOperator type, TensorData input) {
32    input_ = AddInput(input);
33    if (input.type == TensorType_UINT8) {
34      output_ = AddOutput({input.type, {}, 0, 0, 1. / 256});
35    } else {
36      output_ = AddOutput({input.type, {}});
37    }
38    SetBuiltinOp(type, BuiltinOptions_NONE, 0);
39    BuildInterpreter({GetShape(input_)});
40  }
41
42  // A dedicated constructor for SOFTMAX, which does some options.
43  BaseActivationsOpModel(float softmax_beta, TensorData input) {
44    input_ = AddInput(input);
45    if (input.type == TensorType_UINT8) {
46      output_ = AddOutput({input.type, {}, 0, 0, 1. / 256});
47    } else {
48      output_ = AddOutput({input.type, {}});
49    }
50    SetBuiltinOp(BuiltinOperator_SOFTMAX, BuiltinOptions_SoftmaxOptions,
51                 CreateSoftmaxOptions(builder_, softmax_beta).Union());
52    BuildInterpreter({GetShape(input_)});
53  }
54
55 protected:
56  int input_;
57  int output_;
58};
59
60class FloatActivationsOpModel : public BaseActivationsOpModel {
61 public:
62  using BaseActivationsOpModel::BaseActivationsOpModel;
63
64  void SetInput(std::initializer_list<float> data) {
65    PopulateTensor(input_, data);
66  }
67  std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
68};
69
70// TODO(ahentz): I don't quite understand the tradeoffs in the quantized
71// implementation of sigmoid and software, but a tolerance of twice the output
72// scale seems reasonable. We might want to change this if we have a better
73// theoretical bound.
74const float kQuantizedTolerance = 2 * (1. / 256);
75
76class QuantizedActivationsOpModel : public BaseActivationsOpModel {
77 public:
78  using BaseActivationsOpModel::BaseActivationsOpModel;
79
80  void SetInput(std::initializer_list<float> data) {
81    QuantizeAndPopulate<uint8_t>(input_, data);
82  }
83  std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
84  std::vector<float> GetDequantizedOutput() {
85    return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
86                               GetScale(output_), GetZeroPoint(output_));
87  }
88};
89
90TEST(FloatActivationsOpTest, Relu) {
91  FloatActivationsOpModel m(BuiltinOperator_RELU,
92                            /*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}});
93  m.SetInput({
94      0, -6, 2, 4,   //
95      3, -2, 10, 1,  //
96  });
97  m.Invoke();
98  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
99                                 0, 0, 2, 4,   //
100                                 3, 0, 10, 1,  //
101                             }));
102}
103
104TEST(FloatActivationsOpTest, Relu1) {
105  FloatActivationsOpModel m(BuiltinOperator_RELU_N1_TO_1,
106                            /*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}});
107  m.SetInput({
108      0.0, -0.6, 0.2, -0.4,  //
109      0.3, -2.0, 1.1, -0.1,  //
110  });
111  m.Invoke();
112  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
113                                 0.0, -0.6, 0.2, -0.4,  //
114                                 0.3, -1.0, 1.0, -0.1,  //
115                             }));
116}
117
118TEST(FloatActivationsOpTest, Relu6) {
119  FloatActivationsOpModel m(BuiltinOperator_RELU6,
120                            /*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}});
121  m.SetInput({
122      0, -6, 2, 4,   //
123      3, -2, 10, 1,  //
124  });
125  m.Invoke();
126  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
127                                 0, 0, 2, 4,  //
128                                 3, 0, 6, 1,  //
129                             }));
130}
131
132TEST(FloatActivationsOpTest, Tanh) {
133  FloatActivationsOpModel m(BuiltinOperator_TANH,
134                            /*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}});
135  m.SetInput({
136      0, -6, 2, 4,   //
137      3, -2, 10, 1,  //
138  });
139  m.Invoke();
140  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
141                                 0, -0.9999877, 0.9640275, 0.999329,    //
142                                 0.99505475, -0.9640275, 1, 0.7615941,  //
143                             })));
144}
145
146TEST(FloatActivationsOpTest, Sigmoid) {
147  FloatActivationsOpModel m(BuiltinOperator_LOGISTIC,
148                            /*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}});
149  m.SetInput({
150      0, -6, 2, 4,   //
151      3, -2, 10, 1,  //
152  });
153  m.Invoke();
154  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
155                                 0.5, 0.002473, 0.880797, 0.982014,       //
156                                 0.952574, 0.119203, 0.999955, 0.731059,  //
157                             })));
158}
159
160TEST(QuantizedActivationsOpTest, Sigmoid) {
161  QuantizedActivationsOpModel m(
162      BuiltinOperator_LOGISTIC,
163      /*input=*/{TensorType_UINT8, {1, 2, 4, 1}, -10, 10});
164  m.SetInput({
165      0, -6, 2, 4,   //
166      3, -2, 10, 1,  //
167  });
168  m.Invoke();
169  EXPECT_THAT(m.GetDequantizedOutput(),
170              ElementsAreArray(ArrayFloatNear(
171                  {
172                      0.5, 0.002473, 0.880797, 0.982014,       //
173                      0.952574, 0.119203, 0.999955, 0.731059,  //
174                  },
175                  kQuantizedTolerance)));
176  EXPECT_THAT(m.GetOutput(),
177              ElementsAreArray({128, 1, 227, 251, 244, 32, 255, 188}));
178}
179
180TEST(FloatActivationsOpTest, Softmax4D) {
181  FloatActivationsOpModel m(0.1,
182                            /*input=*/{TensorType_FLOAT32, {1, 2, 1, 4}});
183  m.SetInput({
184      0, -6, 2, 4,   // depth = 0
185      3, -2, 10, 1,  // depth = 1
186  });
187  m.Invoke();
188  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
189                                 .23463, .12877, .28658, .35003,  //
190                                 .22528, .13664, .45365, .18443,  //
191                             })));
192
193  // Same input, but a different shape.
194  FloatActivationsOpModel m2(0.1,
195                             /*input=*/{TensorType_FLOAT32, {4, 1, 1, 2}});
196  m2.SetInput({
197      0, -6,  //
198      2, 4,   //
199      3, -2,  //
200      10, 1,  //
201  });
202  m2.Invoke();
203  EXPECT_THAT(m2.GetOutput(), ElementsAreArray(ArrayFloatNear({
204                                  0.645656, 0.354344,  //
205                                  0.450166, 0.549834,  //
206                                  0.622459, 0.377541,  //
207                                  0.710949, 0.28905,   //
208                              })));
209}
210
211TEST(QuantizedActivationsOpTest, Softmax4D) {
212  QuantizedActivationsOpModel m(
213      0.1,
214      /*input=*/{TensorType_UINT8, {1, 2, 1, 4}, -10, 10});
215  m.SetInput({
216      0, -6, 2, 4,   // depth = 0
217      3, -2, 10, 1,  // depth = 1
218  });
219  m.Invoke();
220  EXPECT_THAT(m.GetDequantizedOutput(),
221              ElementsAreArray(ArrayFloatNear(
222                  {
223                      .23463, .12877, .28658, .35003,  //
224                      .22528, .13664, .45365, .18443,  //
225                  },
226                  kQuantizedTolerance)));
227
228  // Same input, but a different shape.
229  QuantizedActivationsOpModel m2(
230      0.1,
231      /*input=*/{TensorType_UINT8, {4, 1, 1, 2}, -10, 10});
232  m2.SetInput({
233      0, -6,  //
234      2, 4,   //
235      3, -2,  //
236      10, 1,  //
237  });
238  m2.Invoke();
239  EXPECT_THAT(m2.GetDequantizedOutput(), ElementsAreArray(ArrayFloatNear(
240                                             {
241                                                 0.645656, 0.354344,  //
242                                                 0.450166, 0.549834,  //
243                                                 0.622459, 0.377541,  //
244                                                 0.710949, 0.28905,   //
245                                             },
246                                             kQuantizedTolerance)));
247}
248
249TEST(FloatActivationsOpTest, Softmax2D) {
250  FloatActivationsOpModel m(0.1,
251                            /*input=*/{TensorType_FLOAT32, {2, 4}});
252  m.SetInput({
253      0, -6, 2, 4,   //
254      3, -2, 10, 1,  //
255  });
256  m.Invoke();
257  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
258                                 .23463, .12877, .28658, .35003,  //
259                                 .22528, .13664, .45365, .18443,  //
260                             })));
261
262  // Same input, but a different shape.
263  FloatActivationsOpModel m2(0.1,
264                             /*input=*/{TensorType_FLOAT32, {4, 2}});
265  m2.SetInput({
266      0, -6,  //
267      2, 4,   //
268      3, -2,  //
269      10, 1,  //
270  });
271  m2.Invoke();
272  EXPECT_THAT(m2.GetOutput(), ElementsAreArray(ArrayFloatNear({
273                                  0.645656, 0.354344,  //
274                                  0.450166, 0.549834,  //
275                                  0.622459, 0.377541,  //
276                                  0.710949, 0.28905,   //
277                              })));
278}
279
280TEST(QuantizedActivationsOpTest, Softmax2D) {
281  QuantizedActivationsOpModel m(0.1,
282                                /*input=*/{TensorType_UINT8, {2, 4}, -10, 10});
283  m.SetInput({
284      0, -6, 2, 4,   //
285      3, -2, 10, 1,  //
286  });
287  m.Invoke();
288  EXPECT_THAT(m.GetDequantizedOutput(),
289              ElementsAreArray(ArrayFloatNear(
290                  {
291                      .23463, .12877, .28658, .35003,  //
292                      .22528, .13664, .45365, .18443,  //
293                  },
294                  kQuantizedTolerance)));
295
296  // Same input, but a different shape.
297  QuantizedActivationsOpModel m2(0.1,
298                                 /*input=*/{TensorType_UINT8, {4, 2}, -10, 10});
299  m2.SetInput({
300      0, -6,  //
301      2, 4,   //
302      3, -2,  //
303      10, 1,  //
304  });
305  m2.Invoke();
306  EXPECT_THAT(m2.GetDequantizedOutput(), ElementsAreArray(ArrayFloatNear(
307                                             {
308                                                 0.645656, 0.354344,  //
309                                                 0.450166, 0.549834,  //
310                                                 0.622459, 0.377541,  //
311                                                 0.710949, 0.28905,   //
312                                             },
313                                             kQuantizedTolerance)));
314}
315
316}  // namespace
317}  // namespace tflite
318
319int main(int argc, char** argv) {
320  ::tflite::LogToStderr();
321  ::testing::InitGoogleTest(&argc, argv);
322  return RUN_ALL_TESTS();
323}
324