1/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include "tensorflow/core/framework/tensor.h"
17
18#include "tensorflow/core/framework/tensor.pb.h"
19#include "tensorflow/core/framework/tensor_testutil.h"
20#include "tensorflow/core/framework/types.h"
21#include "tensorflow/core/framework/variant_encode_decode.h"
22#include "tensorflow/core/framework/variant_tensor_data.h"
23#include "tensorflow/core/lib/math/math_util.h"
24#include "tensorflow/core/lib/strings/strcat.h"
25#include "tensorflow/core/platform/logging.h"
26#include "tensorflow/core/platform/test.h"
27#include "tensorflow/core/platform/test_benchmark.h"
28
29namespace tensorflow {
30
31class TensorTestHelper {
32 public:
33  // This is an operation that can be done by VariableOp.
34  static void set_shape(Tensor* t, const TensorShape& s) { t->set_shape(s); }
35};
36
37// To make TestCopies do the right thing.
38bool operator==(const ResourceHandle& a, const ResourceHandle& b) {
39  return a.device() == b.device() && a.container() == b.container() &&
40         a.name() == b.name() && a.hash_code() == b.hash_code() &&
41         a.maybe_type_name() == b.maybe_type_name();
42}
43
44bool operator==(const Variant& a, const Variant& b) {
45  if (a.is_empty()) {
46    return b.is_empty();
47  }
48
49  if (a.TypeId() != b.TypeId()) return false;
50  if (a.TypeName() != b.TypeName()) return false;
51
52  VariantTensorData a_data, b_data;
53  a.Encode(&a_data);
54  b.Encode(&b_data);
55
56  string a_metadata;
57  string b_metadata;
58  a_data.get_metadata(&a_metadata);
59  b_data.get_metadata(&b_metadata);
60  if (a_metadata != b_metadata) return false;
61
62  if (a_data.tensors_size() != b_data.tensors_size()) return false;
63
64  for (int i = 0; i < a_data.tensors_size(); ++i) {
65    TensorProto a_proto, b_proto;
66    a_data.tensors(i).AsProtoTensorContent(&a_proto);
67    b_data.tensors(i).AsProtoTensorContent(&b_proto);
68    string a_str, b_str;
69    a_proto.SerializeToString(&a_str);
70    b_proto.SerializeToString(&b_str);
71    if (a_str != b_str) return false;
72  }
73
74  return true;
75}
76
77namespace {
78
79TEST(TensorTest, Default) {
80  Tensor t;
81  EXPECT_EQ(t.dtype(), DT_FLOAT);
82  EXPECT_EQ(t.dims(), 1);
83  EXPECT_EQ(t.NumElements(), 0);
84}
85
86TEST(TensorTest, DataType_Traits) {
87  EXPECT_TRUE(std::is_trivial<float>::value);
88  EXPECT_TRUE(std::is_trivial<double>::value);
89  EXPECT_TRUE(std::is_trivial<int32>::value);
90  EXPECT_TRUE(std::is_trivial<uint8>::value);
91  EXPECT_TRUE(std::is_trivial<uint16>::value);
92  EXPECT_TRUE(std::is_trivial<int16>::value);
93  EXPECT_TRUE(std::is_trivial<int8>::value);
94  EXPECT_TRUE(std::is_trivial<int64>::value);
95  EXPECT_TRUE(std::is_trivial<bool>::value);
96  EXPECT_FALSE(std::is_trivial<string>::value);
97
98  EXPECT_EQ(sizeof(bool), 1);
99
100  // Unfortunately. std::complex::complex() initializes (0, 0).
101  EXPECT_FALSE(std::is_trivial<complex64>::value);
102  EXPECT_FALSE(std::is_trivial<complex128>::value);
103  EXPECT_TRUE(std::is_trivial<float[2]>::value);
104  EXPECT_TRUE(std::is_trivial<double[2]>::value);
105  struct MyComplex64 {
106    float re, im;
107  };
108  EXPECT_TRUE(std::is_trivial<MyComplex64>::value);
109  struct MyComplex128 {
110    double re, im;
111  };
112  EXPECT_TRUE(std::is_trivial<MyComplex128>::value);
113}
114
115template <typename T>
116void TestCopies(const Tensor& t) {
117  {
118    LOG(INFO) << "CopyFrom()";
119    Tensor t2(t.dtype());
120    EXPECT_TRUE(t2.CopyFrom(t, t.shape()));
121    test::ExpectTensorEqual<T>(t, t2);
122  }
123  {
124    LOG(INFO) << "operator=()";
125    Tensor t2(t.dtype());
126    t2 = t;
127    test::ExpectTensorEqual<T>(t, t2);
128  }
129  {
130    LOG(INFO) << "deep copy";
131    Tensor t2(t.dtype(), t.shape());
132    t2.flat<T>() = t.flat<T>();
133    test::ExpectTensorEqual<T>(t, t2);
134  }
135  {
136    LOG(INFO) << "AsProtoField()";
137    TensorProto proto;
138    t.AsProtoField(&proto);
139    Tensor t2(t.dtype());
140    EXPECT_TRUE(t2.FromProto(proto));
141    test::ExpectTensorEqual<T>(t, t2);
142  }
143  {
144    LOG(INFO) << "AsProtoTensorContent()";
145    TensorProto proto;
146    t.AsProtoTensorContent(&proto);
147    Tensor t2(t.dtype());
148    EXPECT_TRUE(t2.FromProto(proto));
149    test::ExpectTensorEqual<T>(t, t2);
150    // Make another copy via tensor_content field.
151    *proto.mutable_tensor_content() = proto.tensor_content();
152    Tensor t3(t.dtype());
153    EXPECT_TRUE(t3.FromProto(proto));
154    test::ExpectTensorEqual<T>(t, t2);
155  }
156  {
157    LOG(INFO) << "AsTensor";
158    gtl::ArraySlice<T> values(t.flat<T>().data(), t.NumElements());
159    Tensor t2 = test::AsTensor(values, t.shape());
160    test::ExpectTensorEqual<T>(t, t2);
161  }
162  {
163    LOG(INFO) << "Move constructor";
164    Tensor t2 = t;
165    Tensor t3(std::move(t2));
166    test::ExpectTensorEqual<T>(t, t3);
167    EXPECT_TRUE(t3.IsInitialized());
168    EXPECT_FALSE(t2.IsInitialized());
169  }
170  {
171    LOG(INFO) << "Move assignment";
172    Tensor t2 = t;
173    Tensor t3 = std::move(t2);
174    Tensor* t4 = &t3;
175    *t4 = std::move(t3);
176    test::ExpectTensorEqual<T>(t, t3);
177    EXPECT_TRUE(t3.IsInitialized());
178    EXPECT_FALSE(t2.IsInitialized());
179  }
180}
181
182TEST(Tensor_Half, Simple) {
183  Tensor t(DT_HALF, TensorShape({5, 7}));
184  EXPECT_TRUE(t.shape().IsSameSize(TensorShape({5, 7})));
185  for (int64 a = 0; a < t.shape().dim_size(0); a++) {
186    for (int64 b = 0; b < t.shape().dim_size(1); b++) {
187      t.matrix<Eigen::half>()(a, b) = static_cast<Eigen::half>(a * b);
188    }
189  }
190  TestCopies<Eigen::half>(t);
191}
192
193TEST(Tensor_Bfloat16, Simple) {
194  Tensor t(DT_BFLOAT16, TensorShape({5, 7}));
195  EXPECT_TRUE(t.shape().IsSameSize(TensorShape({5, 7})));
196  for (int64 a = 0; a < t.shape().dim_size(0); a++) {
197    for (int64 b = 0; b < t.shape().dim_size(1); b++) {
198      t.matrix<bfloat16>()(a, b) = static_cast<bfloat16>(a * b);
199    }
200  }
201  TestCopies<bfloat16>(t);
202}
203
204TEST(Tensor_Float, Simple) {
205  Tensor t(DT_FLOAT, TensorShape({10, 20}));
206  EXPECT_TRUE(t.shape().IsSameSize(TensorShape({10, 20})));
207  for (int64 a = 0; a < t.shape().dim_size(0); a++) {
208    for (int64 b = 0; b < t.shape().dim_size(1); b++) {
209      t.matrix<float>()(a, b) = static_cast<float>(a * b);
210    }
211  }
212  TestCopies<float>(t);
213}
214
215TEST(Tensor_ResourceHandle, Simple) {
216  Tensor t(DT_RESOURCE, TensorShape({}));
217  ResourceHandle tmp;
218  tmp.set_name("a");
219  t.flat<ResourceHandle>()(0) = tmp;
220  TestCopies<ResourceHandle>(t);
221}
222
223TEST(Tensor_Variant, Simple) {
224  Tensor t(DT_VARIANT, TensorShape({}));
225  Tensor value(DT_FLOAT, TensorShape({}));
226  value.flat<float>()(0) = 42.0f;
227  t.flat<Variant>()(0) = value;
228  // All the tests in TestCopies except the ones that serialize and deserialize
229  // the tensor. The consumer of a serialized Variant Tensor should know what
230  // type is stored in the Tensor, so not testing the generic
231  // serialize/deserialize case here.
232  {
233    LOG(INFO) << "CopyFrom()";
234    Tensor t2(t.dtype());
235    EXPECT_TRUE(t2.CopyFrom(t, t.shape()));
236    test::ExpectTensorEqual<Variant>(t, t2);
237  }
238  {
239    LOG(INFO) << "operator=()";
240    Tensor t2(t.dtype());
241    t2 = t;
242    test::ExpectTensorEqual<Variant>(t, t2);
243  }
244  {
245    LOG(INFO) << "deep copy";
246    Tensor t2(t.dtype(), t.shape());
247    t2.flat<Variant>() = t.flat<Variant>();
248    test::ExpectTensorEqual<Variant>(t, t2);
249  }
250  {
251    LOG(INFO) << "AsTensor";
252    gtl::ArraySlice<Variant> values(t.flat<Variant>().data(), t.NumElements());
253    Tensor t2 = test::AsTensor(values, t.shape());
254    test::ExpectTensorEqual<Variant>(t, t2);
255  }
256  {
257    LOG(INFO) << "Move constructor";
258    Tensor t2 = t;
259    Tensor t3(std::move(t2));
260    test::ExpectTensorEqual<Variant>(t, t3);
261    EXPECT_TRUE(t3.IsInitialized());
262    EXPECT_FALSE(t2.IsInitialized());
263  }
264  {
265    LOG(INFO) << "Move assignment";
266    Tensor t2 = t;
267    Tensor t3 = std::move(t2);
268    Tensor* t4 = &t3;
269    *t4 = std::move(t3);
270    test::ExpectTensorEqual<Variant>(t, t3);
271    EXPECT_TRUE(t3.IsInitialized());
272    EXPECT_FALSE(t2.IsInitialized());
273  }
274}
275
276TEST(Tensor_Variant, Marshal) {
277  Tensor t(DT_VARIANT, TensorShape({}));
278
279  Tensor internal(DT_FLOAT, TensorShape({}));
280  internal.flat<float>()(0) = 42.0f;
281  t.flat<Variant>()(0) = internal;
282
283  LOG(INFO) << "AsProtoField()";
284  TensorProto proto;
285  t.AsProtoField(&proto);
286
287  // This performs a decode operation.
288  Tensor t2(t.dtype());
289  EXPECT_TRUE(t2.FromProto(proto));
290
291  Tensor* out = t2.flat<Variant>()(0).get<Tensor>();
292  EXPECT_NE(out, nullptr);
293  EXPECT_FLOAT_EQ(out->scalar<float>()(), 42.0f);
294}
295
296TEST(Tensor_UInt16, Simple) {
297  Tensor t(DT_UINT16, TensorShape({2, 2}));
298  EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
299  for (int64 a = 0; a < t.shape().dim_size(0); a++) {
300    for (int64 b = 0; b < t.shape().dim_size(1); b++) {
301      t.matrix<uint16>()(a, b) = uint16(a * b);
302    }
303  }
304  TestCopies<uint16>(t);
305}
306
307TEST(Tensor_QInt8, Simple) {
308  Tensor t(DT_QINT8, TensorShape({2, 2}));
309  EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
310  for (int64 a = 0; a < t.shape().dim_size(0); a++) {
311    for (int64 b = 0; b < t.shape().dim_size(1); b++) {
312      t.matrix<qint8>()(a, b) = qint8(a * b);
313    }
314  }
315  TestCopies<qint8>(t);
316}
317
318TEST(Tensor_QUInt8, Simple) {
319  Tensor t(DT_QUINT8, TensorShape({2, 2}));
320  EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
321  for (int64 a = 0; a < t.shape().dim_size(0); a++) {
322    for (int64 b = 0; b < t.shape().dim_size(1); b++) {
323      t.matrix<Eigen::QUInt8>()(a, b) = Eigen::QUInt8(a * b);
324    }
325  }
326  TestCopies<Eigen::QUInt8>(t);
327}
328
329TEST(Tensor_QInt32, Simple) {
330  Tensor t(DT_QINT32, TensorShape({2, 2}));
331  EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
332  for (int64 a = 0; a < t.shape().dim_size(0); a++) {
333    for (int64 b = 0; b < t.shape().dim_size(1); b++) {
334      t.matrix<qint32>()(a, b) = qint32(static_cast<int32>(a * b));
335    }
336  }
337  TestCopies<qint32>(t);
338}
339
340class TensorReshapeTest : public ::testing::Test {
341 protected:
342  Tensor t;
343  Tensor zero_t;
344
345  TensorReshapeTest()
346      : t(DT_FLOAT, TensorShape({2, 3, 4, 5})),
347        zero_t(DT_FLOAT, TensorShape({3, 0, 2, 0, 5})) {}
348
349  void SetUp() override {
350    EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 3, 4, 5})));
351    EXPECT_TRUE(zero_t.shape().IsSameSize(TensorShape({3, 0, 2, 0, 5})));
352
353    auto tensor = t.tensor<float, 4>();
354    EXPECT_EQ(2, tensor.dimension(0));
355    EXPECT_EQ(3, tensor.dimension(1));
356    EXPECT_EQ(4, tensor.dimension(2));
357    EXPECT_EQ(5, tensor.dimension(3));
358
359    // Set first and last elements.
360    tensor(0, 0, 0, 0) = 0.01f;
361    tensor(1, 2, 3, 4) = 0.02f;
362  }
363
364  template <typename T>
365  using ReshapeFunc = T (Tensor::*)(gtl::ArraySlice<int64>);
366  template <typename T>
367  using ConstReshapeFunc = T (Tensor::*)(gtl::ArraySlice<int64>) const;
368
369  template <typename T, ReshapeFunc<T> Func>
370  void TestReshape(std::initializer_list<int64> sizes) {
371    T shaped = (t.*Func)(sizes);
372    TestReshapeImpl(shaped, sizes);
373  }
374
375  template <typename T, ConstReshapeFunc<T> Func>
376  void TestReshape(std::initializer_list<int64> sizes) {
377    T shaped = (static_cast<const Tensor&>(t).*Func)(sizes);
378    TestReshapeImpl(shaped, sizes);
379  }
380
381  template <typename T>
382  void TestReshapeImpl(T shaped, std::initializer_list<int64> sizes) {
383    auto iter = sizes.begin();
384    for (int i = 0; i < shaped.rank(); ++i, ++iter) {
385      EXPECT_EQ(*iter, shaped.dimension(i));
386    }
387
388    using Index = typename T::Index;
389    using Scalar = typename T::Scalar;
390    constexpr int N = T::NumIndices;
391
392    // To handle the cast when `shaped` is bit casted into a different type.
393    const float expected_first = 0.01f;
394    Eigen::DSizes<Index, N> coord;
395    EXPECT_EQ(shaped(coord), *reinterpret_cast<const Scalar*>(&expected_first));
396
397    for (int i = 0; i < N; ++i) {
398      coord[i] = shaped.dimension(i) - 1;
399    }
400    const float expected_last = 0.02f;
401    constexpr int kNumScalarPerFloat =
402        sizeof(float) / sizeof(Scalar);  // Assuming even divide.
403    EXPECT_EQ(shaped(coord), reinterpret_cast<const Scalar*>(
404                                 &expected_last)[kNumScalarPerFloat - 1]);
405  }
406};
407
408TEST_F(TensorReshapeTest, Reshape) {
409  LOG(INFO) << "shaped";
410
411#define TEST_RESHAPE(...)                                                  \
412  {                                                                        \
413    constexpr int N = (sizeof((int[]){__VA_ARGS__}) / sizeof(int));        \
414    TestReshape<TTypes<float, N>::Tensor, &Tensor::shaped<float, N>>(      \
415        {__VA_ARGS__});                                                    \
416    TestReshape<TTypes<float, N>::ConstTensor, &Tensor::shaped<float, N>>( \
417        {__VA_ARGS__});                                                    \
418    TestReshape<TTypes<float, N>::UnalignedTensor,                         \
419                &Tensor::unaligned_shaped<float, N>>({__VA_ARGS__});       \
420    TestReshape<TTypes<float, N>::UnalignedConstTensor,                    \
421                &Tensor::unaligned_shaped<float, N>>({__VA_ARGS__});       \
422    TestReshape<TTypes<float, N>::Tensor,                                  \
423                &Tensor::bit_casted_shaped<float, N>>({__VA_ARGS__});      \
424    TestReshape<TTypes<float, N>::ConstTensor,                             \
425                &Tensor::bit_casted_shaped<float, N>>({__VA_ARGS__});      \
426    TestReshape<TTypes<int32, N>::Tensor,                                  \
427                &Tensor::bit_casted_shaped<int32, N>>({__VA_ARGS__});      \
428    TestReshape<TTypes<int32, N>::ConstTensor,                             \
429                &Tensor::bit_casted_shaped<int32, N>>({__VA_ARGS__});      \
430  }
431
432  TEST_RESHAPE(120);
433  TEST_RESHAPE(6, 20);
434  TEST_RESHAPE(6, 4, 5);
435  TEST_RESHAPE(2, 3, 4, 5);
436#undef TEST_RESHAPE
437}
438
439TEST_F(TensorReshapeTest, BitcastReshapeDifferentSize) {
440#define TEST_BITCAST8_RESHAPE(...)                                    \
441  {                                                                   \
442    constexpr int N = (sizeof((int[]){__VA_ARGS__}) / sizeof(int));   \
443    TestReshape<TTypes<uint8, N>::Tensor,                             \
444                &Tensor::bit_casted_shaped<uint8, N>>({__VA_ARGS__}); \
445  }
446
447  TEST_BITCAST8_RESHAPE(480);
448  TEST_BITCAST8_RESHAPE(24, 20);
449  TEST_BITCAST8_RESHAPE(6, 16, 5);
450  TEST_BITCAST8_RESHAPE(2, 3, 4, 20);
451#undef TEST_BITCAST8_RESHAPE
452#define TEST_BITCAST16_RESHAPE(...)                                   \
453  {                                                                   \
454    constexpr int N = (sizeof((int[]){__VA_ARGS__}) / sizeof(int));   \
455    TestReshape<TTypes<int16, N>::Tensor,                             \
456                &Tensor::bit_casted_shaped<int16, N>>({__VA_ARGS__}); \
457  }
458
459  TEST_BITCAST16_RESHAPE(240);
460  TEST_BITCAST16_RESHAPE(6, 40);
461  TEST_BITCAST16_RESHAPE(12, 4, 5);
462  TEST_BITCAST16_RESHAPE(2, 3, 8, 5);
463  TEST_BITCAST16_RESHAPE(2, 3, 4, 1, 10);
464#undef TEST_BITCAST16_RESHAPE
465}
466
467TEST_F(TensorReshapeTest, ReshapeError) {
468  EXPECT_DEATH((t.shaped<float, 0>({})), "1 vs. 120");
469  EXPECT_DEATH((t.shaped<float, 1>({119})), "119 vs. 120");
470  EXPECT_DEATH((t.shaped<float, 4>({2, 3, 4, 6})), "144 vs. 120");
471
472  EXPECT_DEATH((t.unaligned_shaped<float, 0>({})), "1 vs. 120");
473  EXPECT_DEATH((t.unaligned_shaped<float, 1>({119})), "119 vs. 120");
474  EXPECT_DEATH((t.unaligned_shaped<float, 4>({2, 3, 4, 6})), "144 vs. 120");
475
476  EXPECT_DEATH((t.bit_casted_shaped<float, 0>({})), "4 vs. 480");
477  EXPECT_DEATH((t.bit_casted_shaped<float, 1>({119})), "476 vs. 480");
478  EXPECT_DEATH((t.bit_casted_shaped<float, 4>({2, 3, 4, 6})), "576 vs. 480");
479
480  Tensor string_tensor{DT_STRING, {10}};
481  // Note that the error message compare # of elements, not # of bytes.
482  EXPECT_DEATH((string_tensor.bit_casted_shaped<string, 1>({9})), "9 vs. 10");
483}
484
485TEST_F(TensorReshapeTest, Flat) {
486  LOG(INFO) << "flat";
487  {
488    auto flat = t.flat<float>();
489    EXPECT_EQ(flat(0), 0.01f);
490    EXPECT_EQ(120, flat.dimension(0));
491    EXPECT_EQ(flat(0), 0.01f);
492    EXPECT_EQ(flat(119), 0.02f);
493  }
494}
495
496TEST_F(TensorReshapeTest, FlatInnerDims) {
497  LOG(INFO) << "flat_inner_dims";
498  {
499    auto flat_inner_dims = t.flat_inner_dims<float>();
500    EXPECT_EQ(24, flat_inner_dims.dimension(0));
501    EXPECT_EQ(5, flat_inner_dims.dimension(1));
502    EXPECT_EQ(flat_inner_dims(0, 0), 0.01f);
503    EXPECT_EQ(flat_inner_dims(23, 4), 0.02f);
504  }
505  {
506    auto flat_inner_dims = t.flat_inner_dims<float, 3>();
507    EXPECT_EQ(6, flat_inner_dims.dimension(0));
508    EXPECT_EQ(4, flat_inner_dims.dimension(1));
509    EXPECT_EQ(5, flat_inner_dims.dimension(2));
510    EXPECT_EQ(flat_inner_dims(0, 0, 0), 0.01f);
511    EXPECT_EQ(flat_inner_dims(5, 3, 4), 0.02f);
512  }
513  {
514    auto flat_inner_dims = t.flat_inner_dims<float, 5>();
515    EXPECT_EQ(1, flat_inner_dims.dimension(0));
516    EXPECT_EQ(2, flat_inner_dims.dimension(1));
517    EXPECT_EQ(3, flat_inner_dims.dimension(2));
518    EXPECT_EQ(4, flat_inner_dims.dimension(3));
519    EXPECT_EQ(5, flat_inner_dims.dimension(4));
520    EXPECT_EQ(flat_inner_dims(0, 0, 0, 0, 0), 0.01f);
521    EXPECT_EQ(flat_inner_dims(0, 1, 2, 3, 4), 0.02f);
522  }
523  {
524    auto flat_inner_dims = zero_t.flat_inner_dims<float>();
525    EXPECT_EQ(0, flat_inner_dims.dimension(0));
526    EXPECT_EQ(5, flat_inner_dims.dimension(1));
527  }
528  {
529    auto flat_inner_dims = zero_t.flat_inner_dims<float, 3>();
530    EXPECT_EQ(0, flat_inner_dims.dimension(0));
531    EXPECT_EQ(0, flat_inner_dims.dimension(1));
532    EXPECT_EQ(5, flat_inner_dims.dimension(2));
533  }
534  {
535    auto flat_inner_dims = zero_t.flat_inner_dims<float, 5>();
536    EXPECT_EQ(3, flat_inner_dims.dimension(0));
537    EXPECT_EQ(0, flat_inner_dims.dimension(1));
538    EXPECT_EQ(2, flat_inner_dims.dimension(2));
539    EXPECT_EQ(0, flat_inner_dims.dimension(3));
540    EXPECT_EQ(5, flat_inner_dims.dimension(4));
541  }
542}
543
544TEST_F(TensorReshapeTest, FlatOuterDims) {
545  LOG(INFO) << "flat_outer_dims";
546  {
547    auto flat_outer_dims = t.flat_outer_dims<float>();
548    EXPECT_EQ(2, flat_outer_dims.dimension(0));
549    EXPECT_EQ(60, flat_outer_dims.dimension(1));
550    EXPECT_EQ(flat_outer_dims(0, 0), 0.01f);
551    EXPECT_EQ(flat_outer_dims(1, 59), 0.02f);
552  }
553  {
554    auto flat_outer_dims = t.flat_outer_dims<float, 3>();
555    EXPECT_EQ(2, flat_outer_dims.dimension(0));
556    EXPECT_EQ(3, flat_outer_dims.dimension(1));
557    EXPECT_EQ(20, flat_outer_dims.dimension(2));
558    EXPECT_EQ(flat_outer_dims(0, 0, 0), 0.01f);
559    EXPECT_EQ(flat_outer_dims(1, 2, 19), 0.02f);
560  }
561  {
562    auto flat_outer_dims = t.flat_outer_dims<float, 5>();
563    EXPECT_EQ(2, flat_outer_dims.dimension(0));
564    EXPECT_EQ(3, flat_outer_dims.dimension(1));
565    EXPECT_EQ(4, flat_outer_dims.dimension(2));
566    EXPECT_EQ(5, flat_outer_dims.dimension(3));
567    EXPECT_EQ(1, flat_outer_dims.dimension(4));
568    EXPECT_EQ(flat_outer_dims(0, 0, 0, 0, 0), 0.01f);
569    EXPECT_EQ(flat_outer_dims(1, 2, 3, 4, 0), 0.02f);
570  }
571  {
572    auto flat_outer_dims = zero_t.flat_outer_dims<float>();
573    EXPECT_EQ(3, flat_outer_dims.dimension(0));
574    EXPECT_EQ(0, flat_outer_dims.dimension(1));
575  }
576  {
577    auto flat_outer_dims = zero_t.flat_outer_dims<float, 3>();
578    EXPECT_EQ(3, flat_outer_dims.dimension(0));
579    EXPECT_EQ(0, flat_outer_dims.dimension(1));
580    EXPECT_EQ(0, flat_outer_dims.dimension(2));
581  }
582  {
583    auto flat_outer_dims = zero_t.flat_outer_dims<float, 5>();
584    EXPECT_EQ(3, flat_outer_dims.dimension(0));
585    EXPECT_EQ(0, flat_outer_dims.dimension(1));
586    EXPECT_EQ(2, flat_outer_dims.dimension(2));
587    EXPECT_EQ(0, flat_outer_dims.dimension(3));
588    EXPECT_EQ(5, flat_outer_dims.dimension(4));
589  }
590}
591
592TEST_F(TensorReshapeTest, FlatInnerOuterDims) {
593  LOG(INFO) << "flat_inner_outer_dims";
594  {
595    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 4>(0);
596    EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
597    EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
598    EXPECT_EQ(4, flat_inner_outer_dims.dimension(2));
599    EXPECT_EQ(5, flat_inner_outer_dims.dimension(3));
600    EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0), 0.01f);
601    EXPECT_EQ(flat_inner_outer_dims(1, 2, 3, 4), 0.02f);
602  }
603  {
604    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 6>(-2);
605    EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
606    EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
607    EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
608    EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
609    EXPECT_EQ(4, flat_inner_outer_dims.dimension(4));
610    EXPECT_EQ(5, flat_inner_outer_dims.dimension(5));
611    EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0), 0.01f);
612    EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 3, 4), 0.02f);
613  }
614  {
615    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 6>(0);
616    EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
617    EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
618    EXPECT_EQ(4, flat_inner_outer_dims.dimension(2));
619    EXPECT_EQ(5, flat_inner_outer_dims.dimension(3));
620    EXPECT_EQ(1, flat_inner_outer_dims.dimension(4));
621    EXPECT_EQ(1, flat_inner_outer_dims.dimension(5));
622    EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0), 0.01f);
623    EXPECT_EQ(flat_inner_outer_dims(1, 2, 3, 4, 0, 0), 0.02f);
624  }
625  {
626    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 8>(-2);
627    EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
628    EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
629    EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
630    EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
631    EXPECT_EQ(4, flat_inner_outer_dims.dimension(4));
632    EXPECT_EQ(5, flat_inner_outer_dims.dimension(5));
633    EXPECT_EQ(1, flat_inner_outer_dims.dimension(6));
634    EXPECT_EQ(1, flat_inner_outer_dims.dimension(7));
635    EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0, 0, 0), 0.01f);
636    EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 3, 4, 0, 0), 0.02f);
637  }
638  {
639    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 3>(1);
640    EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
641    EXPECT_EQ(4, flat_inner_outer_dims.dimension(1));
642    EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
643    EXPECT_EQ(flat_inner_outer_dims(0, 0, 0), 0.01f);
644    EXPECT_EQ(flat_inner_outer_dims(5, 3, 4), 0.02f);
645  }
646  {
647    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 5>(1);
648    EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
649    EXPECT_EQ(4, flat_inner_outer_dims.dimension(1));
650    EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
651    EXPECT_EQ(1, flat_inner_outer_dims.dimension(3));
652    EXPECT_EQ(1, flat_inner_outer_dims.dimension(4));
653    EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0), 0.01f);
654    EXPECT_EQ(flat_inner_outer_dims(5, 3, 4, 0, 0), 0.02f);
655  }
656  {
657    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 3>(0);
658    EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
659    EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
660    EXPECT_EQ(20, flat_inner_outer_dims.dimension(2));
661    EXPECT_EQ(flat_inner_outer_dims(0, 0, 0), 0.01f);
662    EXPECT_EQ(flat_inner_outer_dims(1, 2, 19), 0.02f);
663  }
664  {
665    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 5>(-2);
666    EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
667    EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
668    EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
669    EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
670    EXPECT_EQ(20, flat_inner_outer_dims.dimension(4));
671    EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0), 0.01f);
672    EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 19), 0.02f);
673  }
674  {
675    auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 2>(1);
676    EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
677    EXPECT_EQ(20, flat_inner_outer_dims.dimension(1));
678    EXPECT_EQ(flat_inner_outer_dims(0, 0), 0.01f);
679    EXPECT_EQ(flat_inner_outer_dims(5, 19), 0.02f);
680  }
681  {
682    auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 2>(0);
683    EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
684    EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
685  }
686  {
687    auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(0);
688    EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
689    EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
690    EXPECT_EQ(0, flat_inner_outer_dims.dimension(2));
691  }
692  {
693    auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 5>(0);
694    EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
695    EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
696    EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
697    EXPECT_EQ(0, flat_inner_outer_dims.dimension(3));
698    EXPECT_EQ(5, flat_inner_outer_dims.dimension(4));
699  }
700  {
701    auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 2>(3);
702    EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
703    EXPECT_EQ(5, flat_inner_outer_dims.dimension(1));
704  }
705  {
706    auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(2);
707    EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
708    EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
709    EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
710  }
711  {
712    auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(1);
713    EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
714    EXPECT_EQ(2, flat_inner_outer_dims.dimension(1));
715    EXPECT_EQ(0, flat_inner_outer_dims.dimension(2));
716  }
717}
718
719TEST(ReinterpretLastDimension, Reinterpret_NCHW_VECT_C_as_NCHW) {
720  LOG(INFO) << "reinterpret_last_dimension";
721  {
722    Tensor t_nchw_vect_c(DT_QINT8, TensorShape({2, 3, 5, 7, 4}));
723    auto nchw_vect_c = t_nchw_vect_c.tensor<qint8, 5>();
724    Tensor t_expected_nchw(DT_INT32, TensorShape({2, 3, 5, 7}));
725    auto expected_nchw = t_expected_nchw.tensor<int32, 4>();
726    int8 val = 0;
727    for (int n = 0; n < t_nchw_vect_c.shape().dim_size(0); ++n) {
728      for (int c = 0; c < t_nchw_vect_c.shape().dim_size(1); ++c) {
729        for (int h = 0; h < t_nchw_vect_c.shape().dim_size(2); ++h, ++val) {
730          int8 packet[4];
731          for (int w = 0; w < t_nchw_vect_c.shape().dim_size(3); ++w) {
732            packet[0] = nchw_vect_c(n, c, h, w, 0) = ++val;
733            packet[1] = nchw_vect_c(n, c, h, w, 1) = ++val;
734            packet[2] = nchw_vect_c(n, c, h, w, 2) = ++val;
735            packet[3] = nchw_vect_c(n, c, h, w, 3) = ++val;
736            expected_nchw(n, c, h, w) = *reinterpret_cast<int32*>(&packet[0]);
737          }
738        }
739      }
740    }
741    auto actual_nchw = t_nchw_vect_c.reinterpret_last_dimension<int32, 4>();
742    const auto& const_t_nchw_vect_c = t_nchw_vect_c;
743    auto const_actual_nchw =
744        const_t_nchw_vect_c.reinterpret_last_dimension<int32, 4>();
745    for (int n = 0; n < t_nchw_vect_c.shape().dim_size(0); ++n) {
746      for (int c = 0; c < t_nchw_vect_c.shape().dim_size(1); ++c) {
747        for (int h = 0; h < t_nchw_vect_c.shape().dim_size(2); ++h) {
748          for (int w = 0; w < t_nchw_vect_c.shape().dim_size(3); ++w) {
749            EXPECT_EQ(expected_nchw(n, c, h, w), actual_nchw(n, c, h, w));
750            EXPECT_EQ(expected_nchw(n, c, h, w), const_actual_nchw(n, c, h, w));
751          }
752        }
753      }
754    }
755  }
756}
757
758TEST(Tensor_Scalar, Basics) {
759  {
760    Tensor t(DT_BOOL, TensorShape({}));
761    EXPECT_EQ(1, t.NumElements());
762    auto Tt = t.scalar<bool>();
763    EXPECT_EQ(1, Tt.size());
764    EXPECT_EQ(0, Tt.rank());
765    t.scalar<bool>()() = true;
766    EXPECT_TRUE(Tt());
767  }
768  {
769    Tensor t(DT_FLOAT, TensorShape({}));
770    EXPECT_EQ(1, t.NumElements());
771    auto Tt = t.scalar<float>();
772    EXPECT_EQ(1, Tt.size());
773    EXPECT_EQ(0, Tt.rank());
774    t.scalar<float>()() = 123.45f;
775    EXPECT_FLOAT_EQ(123.45f, Tt());
776  }
777  {
778    Tensor t(DT_FLOAT, TensorShape({1}));
779    EXPECT_EQ(1, t.NumElements());
780    auto Tt = t.vec<float>();
781    EXPECT_EQ(1, Tt.size());
782    t.vec<float>()(0) = 123.45f;
783    EXPECT_FLOAT_EQ(123.45f, Tt(0));
784  }
785  {
786    Tensor t(DT_FLOAT, TensorShape({1, 1, 1}));
787    EXPECT_EQ(1, t.NumElements());
788    auto Tt = t.scalar<float>();
789    EXPECT_EQ(1, Tt.size());
790    EXPECT_EQ(0, Tt.rank());
791    t.flat<float>()(0) = 123.45f;
792    EXPECT_FLOAT_EQ(123.45f, Tt());
793  }
794  {
795    Tensor t(DT_STRING, TensorShape({}));
796    EXPECT_EQ(1, t.NumElements());
797    auto Tt = t.scalar<string>();
798    EXPECT_EQ(1, Tt.size());
799    EXPECT_EQ(0, Tt.rank());
800    t.scalar<string>()() = "foo";
801    EXPECT_EQ("foo", Tt());
802  }
803  {
804    Tensor t(DT_STRING, TensorShape({1}));
805    EXPECT_EQ(1, t.NumElements());
806    auto Tt = t.vec<string>();
807    EXPECT_EQ(1, Tt.size());
808    t.flat<string>()(0) = "foo";
809    EXPECT_EQ("foo", Tt(0));
810  }
811  {
812    Tensor t(DT_STRING, TensorShape({1, 1, 1}));
813    EXPECT_EQ(1, t.NumElements());
814    auto Tt = t.scalar<string>();
815    EXPECT_EQ(1, Tt.size());
816    EXPECT_EQ(0, Tt.rank());
817    t.flat<string>()(0) = "bar";
818    EXPECT_EQ("bar", Tt());
819  }
820  {
821    Tensor t(DT_FLOAT, TensorShape({0, 1}));
822    EXPECT_EQ(0, t.NumElements());
823    auto Tt = t.flat<float>();
824    EXPECT_EQ(0, Tt.size());
825    auto Tm = t.matrix<float>();
826    EXPECT_EQ(0, Tm.size());
827    EXPECT_EQ(0, Tm.dimensions()[0]);
828    EXPECT_EQ(1, Tm.dimensions()[1]);
829  }
830}
831
832TEST(Tensor_Float, Reshape_And_Slice_Assignment) {
833  // A test to experiment with a way to assign to a subset of a tensor
834  Tensor t(DT_FLOAT, TensorShape({10, 4, 3, 2}));
835  EXPECT_TRUE(t.shape().IsSameSize(TensorShape({10, 4, 3, 2})));
836
837  // Get the N dimensional tensor (N==4 here)
838  auto e_t = t.tensor<float, 4>();
839  // Reshape to view it as a two-dimensional tensor
840  auto e_2d = t.shaped<float, 2>({10, 4 * 3 * 2});
841  for (int i = 0; i < 10; i++) {
842    // Assign a 1 x 4*3*2 matrix (really vector) to a slice of size
843    // 1 x 4*3*2 in e_t.
844    Eigen::Tensor<float, 2, Eigen::RowMajor> m(1, 4 * 3 * 2);
845    m.setConstant(i * 2.0);
846
847    Eigen::DSizes<Eigen::DenseIndex, 2> indices(i, 0);
848    Eigen::DSizes<Eigen::DenseIndex, 2> sizes(1, 4 * 3 * 2);
849    e_2d.slice(indices, sizes) = m;
850  }
851  for (int i = 0; i < 10; i++) {
852    for (int j = 0; j < 4; j++) {
853      for (int k = 0; k < 3; k++) {
854        for (int l = 0; l < 2; l++) {
855          EXPECT_EQ(e_t(i, j, k, l), i * 2.0f);
856          LOG(INFO) << i << "," << j << "," << k << "," << l
857                    << " &e_t(i, j, k, l): " << &e_t(i, j, k, l) << " = "
858                    << e_t(i, j, k, l);
859        }
860      }
861    }
862  }
863}
864
865TEST(Tensor_String, Simple) {
866  Tensor t = test::AsTensor<string>(
867      {"hello", "world", "machine", "learning", "new", "york"},
868      TensorShape({3, 2}));
869  auto s = t.shape();
870  ASSERT_EQ(s.dims(), 2);
871  ASSERT_EQ(s.dim_size(0), 3);
872  ASSERT_EQ(s.dim_size(1), 2);
873  auto m = t.matrix<string>();
874  EXPECT_EQ(t.TotalBytes(), 3 * 2 * sizeof(string) + 5 + 5 + 7 + 8 + 3 + 4);
875
876  EXPECT_EQ(m(0, 0), "hello");
877  EXPECT_EQ(m(0, 1), "world");
878  EXPECT_EQ(m(1, 0), "machine");
879  EXPECT_EQ(m(1, 1), "learning");
880  EXPECT_EQ(m(2, 0), "new");
881  EXPECT_EQ(m(2, 1), "york");
882
883  TestCopies<string>(t);
884}
885
886TEST(Tensor_Float, SimpleWithHelper) {
887  Tensor t1 = test::AsTensor<float>({0, 1, 2, 3, 4, 5}, {2, 3});
888  Tensor t2(t1.dtype(), t1.shape());
889  t2.flat<float>() = t1.flat<float>() * 2.0f;
890  Tensor t3 = test::AsTensor<float>({0, 2, 4, 6, 8, 10}, t1.shape());
891  test::ExpectTensorEqual<float>(t2, t3);
892}
893
894TEST(Tensor_Int32, SimpleWithHelper) {
895  Tensor t1 = test::AsTensor<int32>({0, 1, 2, 3, 4, 5}, {2, 3});
896  Tensor t2(t1.dtype(), t1.shape());
897  t2.flat<int32>() = t1.flat<int32>() * 2;
898  Tensor t3 = test::AsTensor<int32>({0, 2, 4, 6, 8, 10}, t1.shape());
899  test::ExpectTensorEqual<int32>(t2, t3);
900}
901
902TEST(Tensor_UInt16, SimpleWithHelper) {
903  Tensor t1 = test::AsTensor<uint16>({0, 1, 2, 3, 4, 5}, {2, 3});
904  Tensor t2(t1.dtype(), t1.shape());
905  t2.flat<uint16>() = t1.flat<uint16>() * uint16(2);
906  Tensor t3 = test::AsTensor<uint16>({0, 2, 4, 6, 8, 10}, t1.shape());
907  test::ExpectTensorEqual<uint16>(t2, t3);
908}
909
910TEST(Tensor_QInt8, SimpleWithHelper) {
911  Tensor t1 = test::AsTensor<qint8>({0, 1, 2, 3, 4, 5}, {2, 3});
912  Tensor t2(t1.dtype(), t1.shape());
913  t2.flat<qint8>() = t1.flat<qint8>() + qint8(-2);
914  Tensor t3 = test::AsTensor<qint8>({-2, -1, 0, 1, 2, 3}, {2, 3});
915  test::ExpectTensorEqual<qint8>(t2, t3);
916}
917
918TEST(Tensor_QUInt8, SimpleWithHelper) {
919  Tensor t1 = test::AsTensor<quint8>({0, 1, 2, 3, 4, 5}, {2, 3});
920  Tensor t2(t1.dtype(), t1.shape());
921  t2.flat<quint8>() = t1.flat<quint8>() + quint8(2);
922  Tensor t3 = test::AsTensor<quint8>({2, 3, 4, 5, 6, 7}, {2, 3});
923  test::ExpectTensorEqual<quint8>(t2, t3);
924}
925
926TEST(Tensor_Int64, SimpleWithHelper) {
927  Tensor t1 = test::AsTensor<int64>(
928      {0LL << 48, 1LL << 48, 2LL << 48, 3LL << 48, 4LL << 48, 5LL << 48},
929      {2, 3});
930  Tensor t2(t1.dtype(), t1.shape());
931  t2.flat<int64>() = t1.flat<int64>() * static_cast<int64>(2);
932  Tensor t3 = test::AsTensor<int64>(
933      {0LL << 48, 2LL << 48, 4LL << 48, 6LL << 48, 8LL << 48, 10LL << 48},
934      {2, 3});
935  test::ExpectTensorEqual<int64>(t2, t3);
936}
937
938TEST(Tensor_String, SimpleWithHelper) {
939  Tensor t1 = test::AsTensor<string>({"0", "1", "2", "3", "4", "5"}, {2, 3});
940  Tensor t2(DT_STRING, {2, 3});
941  for (int i = 0; i < 2; ++i) {
942    for (int j = 0; j < 3; ++j) {
943      t2.matrix<string>()(i, j) = strings::StrCat(i * 3 + j);
944    }
945  }
946
947  // Test with helper.
948  test::ExpectTensorEqual<string>(t1, t2);
949}
950
951TEST(Tensor_Bool, SimpleWithHelper) {
952  Tensor t1 =
953      test::AsTensor<bool>({false, true, false, true, false, true}, {2, 3});
954
955  Tensor t2(DT_BOOL, {2, 3});
956  for (int i = 0; i < 2; ++i) {
957    for (int j = 0; j < 3; ++j) {
958      t2.matrix<bool>()(i, j) = (((i + j) % 2) != 0);
959    }
960  }
961
962  // Test with helper.
963  test::ExpectTensorEqual<bool>(t1, t2);
964}
965
966TEST(Tensor_Complex, Simple64) {
967  Tensor t(DT_COMPLEX64, {4, 5, 3, 7});
968  t.flat<complex64>().setRandom();
969  TestCopies<complex64>(t);
970}
971
972TEST(Tensor_Complex, Simple128) {
973  Tensor t(DT_COMPLEX128, {4, 5, 3, 7});
974  t.flat<complex128>().setRandom();
975  TestCopies<complex128>(t);
976}
977
978TEST(Tensor_Complex, SimpleWithHelper64) {
979  {
980    Tensor t1 = test::AsTensor<complex64>({0,
981                                           {1, 1},
982                                           complex64(2),
983                                           complex64(3, 3),
984                                           complex64(0, 4),
985                                           complex64(2, 5)},
986                                          {2, 3});
987    Tensor t2(t1.dtype(), t1.shape());
988    t2.flat<complex64>() = t1.flat<complex64>() * complex64(0, 2);
989    Tensor t3 = test::AsTensor<complex64>(
990        {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
991        // shape
992        {2, 3});
993    test::ExpectTensorEqual<complex64>(t2, t3);
994  }
995
996  // Does some numeric operations for complex64 numbers.
997  {
998    const float PI = std::acos(-1);
999    const complex64 rotate_45 = std::polar(1.0f, PI / 4);
1000
1001    // x contains all the 8-th root of unity.
1002    Tensor x(DT_COMPLEX64, TensorShape({8}));
1003    for (int i = 0; i < 8; ++i) {
1004      x.vec<complex64>()(i) = MathUtil::IPow(rotate_45, i);
1005    }
1006
1007    // Shift the roots by 45 degree.
1008    Tensor y(DT_COMPLEX64, TensorShape({8}));
1009    y.vec<complex64>() = x.vec<complex64>() * rotate_45;
1010    Tensor y_expected(DT_COMPLEX64, TensorShape({8}));
1011    for (int i = 0; i < 8; ++i) {
1012      y_expected.vec<complex64>()(i) = MathUtil::IPow(rotate_45, i + 1);
1013    }
1014    test::ExpectTensorNear<complex64>(y, y_expected, 1e-5);
1015
1016    // Raise roots to the power of 8.
1017    Tensor z(DT_COMPLEX64, TensorShape({8}));
1018    z.vec<complex64>() = x.vec<complex64>().pow(8);
1019    Tensor z_expected(DT_COMPLEX64, TensorShape({8}));
1020    for (int i = 0; i < 8; ++i) {
1021      z_expected.vec<complex64>()(i) = 1;
1022    }
1023    test::ExpectTensorNear<complex64>(z, z_expected, 1e-5);
1024  }
1025}
1026
1027TEST(Tensor_Complex, SimpleWithHelper128) {
1028  {
1029    Tensor t1 = test::AsTensor<complex128>({0,
1030                                            {1, 1},
1031                                            complex128(2),
1032                                            complex128(3, 3),
1033                                            complex128(0, 4),
1034                                            complex128(2, 5)},
1035                                           {2, 3});
1036    Tensor t2(t1.dtype(), t1.shape());
1037    t2.flat<complex128>() = t1.flat<complex128>() * complex128(0, 2);
1038    Tensor t3 = test::AsTensor<complex128>(
1039        {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
1040        // shape
1041        {2, 3});
1042    test::ExpectTensorEqual<complex128>(t2, t3);
1043  }
1044
1045  // Does some numeric operations for complex128 numbers.
1046  {
1047    const double PI = std::acos(-1);
1048    const complex128 rotate_45 = std::polar(1.0, PI / 4);
1049
1050    // x contains all the 8-th root of unity.
1051    Tensor x(DT_COMPLEX128, TensorShape({8}));
1052    for (int i = 0; i < 8; ++i) {
1053      x.vec<complex128>()(i) = MathUtil::IPow(rotate_45, i);
1054    }
1055
1056    // Shift the roots by 45 degree.
1057    Tensor y(DT_COMPLEX128, TensorShape({8}));
1058    y.vec<complex128>() = x.vec<complex128>() * rotate_45;
1059    Tensor y_expected(DT_COMPLEX128, TensorShape({8}));
1060    for (int i = 0; i < 8; ++i) {
1061      y_expected.vec<complex128>()(i) = MathUtil::IPow(rotate_45, i + 1);
1062    }
1063    test::ExpectTensorNear<complex128>(y, y_expected, 1e-5);
1064
1065    // Raise roots to the power of 8.
1066    Tensor z(DT_COMPLEX128, TensorShape({8}));
1067    z.vec<complex128>() = x.vec<complex128>().pow(8);
1068    Tensor z_expected(DT_COMPLEX128, TensorShape({8}));
1069    for (int i = 0; i < 8; ++i) {
1070      z_expected.vec<complex128>()(i) = 1;
1071    }
1072    test::ExpectTensorNear<complex128>(z, z_expected, 1e-5);
1073  }
1074}
1075
1076// An allocator that always returns nullptr, for testing
1077// failures to allocate.
1078class DummyCPUAllocator : public Allocator {
1079 public:
1080  DummyCPUAllocator() = default;
1081  string Name() override { return "cpu"; }
1082  void* AllocateRaw(size_t alignment, size_t num_bytes) override {
1083    return nullptr;
1084  }
1085  void DeallocateRaw(void* ptr) override {}
1086};
1087
1088TEST(Tensor, SharesBufferWith) {
1089  Tensor a_empty;
1090  Tensor b_empty;
1091  Tensor a(DT_FLOAT, TensorShape({1}));
1092  Tensor b(DT_FLOAT, TensorShape({1}));
1093  Tensor copy(a);
1094  EXPECT_FALSE(a_empty.SharesBufferWith(a_empty));
1095  EXPECT_FALSE(a_empty.SharesBufferWith(b_empty));
1096  EXPECT_FALSE(a_empty.SharesBufferWith(a));
1097  EXPECT_FALSE(a_empty.SharesBufferWith(copy));
1098  EXPECT_TRUE(a.SharesBufferWith(a));
1099  EXPECT_FALSE(a.SharesBufferWith(b));
1100  EXPECT_TRUE(a.SharesBufferWith(copy));
1101}
1102
1103TEST(Tensor, FailureToAllocate) {
1104  TensorShape shape({1});
1105  DummyCPUAllocator allocator;
1106  {
1107    Tensor a(&allocator, DT_FLOAT, shape);
1108    ASSERT_FALSE(a.IsInitialized());
1109  }
1110
1111  // Float
1112  {
1113    Tensor t(DT_FLOAT, TensorShape({1}));
1114    t.vec<float>()(0) = 1.0;
1115    TensorProto proto;
1116    t.AsProtoField(&proto);
1117
1118    // FromProto should fail nicely.
1119    Tensor a(&allocator, DT_FLOAT, TensorShape({1}));
1120    ASSERT_FALSE(a.FromProto(&allocator, proto));
1121  }
1122
1123  // String
1124  {
1125    Tensor t(DT_STRING, TensorShape({1}));
1126    t.vec<string>()(0) = "foo";
1127    TensorProto proto;
1128    t.AsProtoField(&proto);
1129
1130    // FromProto should fail nicely.
1131    Tensor a(&allocator, DT_STRING, TensorShape({1}));
1132    ASSERT_FALSE(a.FromProto(&allocator, proto));
1133  }
1134
1135  // Half
1136  {
1137    Tensor t(DT_HALF, TensorShape({1}));
1138    t.vec<Eigen::half>()(0) = Eigen::half(1.0);
1139    TensorProto proto;
1140    t.AsProtoField(&proto);
1141
1142    // FromProto should fail nicely.
1143    Tensor a(&allocator, DT_HALF, TensorShape({1}));
1144    ASSERT_FALSE(a.FromProto(&allocator, proto));
1145  }
1146}
1147
1148// On the alignment.
1149//
1150// As of 2015/8, tensorflow::Tensor allocates its buffer with 32-byte
1151// alignment. Tensor::tensor/flat/vec/matrix methods requires the
1152// buffer satisfies Eigen::Aligned (e.g., 16-bytes aligned usually,
1153// and 32-bytes for AVX). Tensor::Slice requires the caller to ensure
1154// its result is aligned if the caller intends to use those methods.
1155// In this test case, we simply make sure each slice is 32-byte
1156// aligned: sizeof(float) * 4 * 2 = 32.
1157TEST(Tensor, Slice_Basic) {
1158  Tensor saved;
1159  {  // General
1160    Tensor x(DT_FLOAT, TensorShape({10, 4, 34}));
1161    // Fills in known values.
1162    for (int i = 0; i < 10; ++i) {
1163      x.Slice(i, i + 1).flat<float>().setConstant(i * 1.f);
1164    }
1165    // A simple slice along dim0.
1166    Tensor y = x.Slice(4, 8);
1167    EXPECT_TRUE(y.shape().IsSameSize(TensorShape({4, 4, 34})));
1168    auto tx = x.tensor<float, 3>();
1169    auto ty = y.tensor<float, 3>();
1170    for (int i = 0; i < 4; ++i) {
1171      for (int j = 0; j < 4; ++j) {
1172        for (int k = 0; k < 34; ++k) {
1173          EXPECT_EQ(ty(i, j, k), 4.0 + i);
1174          EXPECT_EQ(&tx(4 + i, j, k), &ty(i, j, k));
1175        }
1176      }
1177    }
1178    // A simple slice equivalent to identity.
1179    TestCopies<float>(y);
1180    y = x.Slice(0, 10);
1181    test::ExpectTensorEqual<float>(x, y);
1182    EXPECT_EQ(x.flat<float>().data(), y.flat<float>().data());
1183
1184    // A slice of a slice.
1185    auto z = x.Slice(4, 8).Slice(2, 3);
1186    auto tz = z.tensor<float, 3>();
1187    EXPECT_EQ(1, z.dim_size(0));
1188    for (int j = 0; j < 4; ++j) {
1189      for (int k = 0; k < 34; ++k) {
1190        EXPECT_EQ(tz(0, j, k), 6.0);
1191      }
1192    }
1193
1194    // x and y will be out of scope. But 'saved' should be alive.
1195    saved = z;
1196  }
1197  {
1198    EXPECT_EQ(1, saved.dim_size(0));
1199    auto tsaved = saved.tensor<float, 3>();
1200    for (int j = 0; j < 4; ++j) {
1201      for (int k = 0; k < 34; ++k) {
1202        EXPECT_EQ(tsaved(0, j, k), 6.0);
1203      }
1204    }
1205  }
1206  {  // Empty
1207    Tensor x(DT_FLOAT, TensorShape({10, 0, 34}));
1208    x.flat<float>().setRandom();
1209    Tensor y = x.Slice(4, 8);
1210    EXPECT_TRUE(y.shape().IsSameSize(TensorShape({4, 0, 34})));
1211  }
1212
1213  {
1214    // Test unaligned access via a Slice.
1215    Tensor x(DT_FLOAT, TensorShape({30}));
1216    x.flat<float>().setConstant(0.0);
1217
1218    // Take an unaligned slice.
1219    Tensor y = x.Slice(1, 13);
1220#if EIGEN_MAX_ALIGN_BYTES > 0
1221    EXPECT_FALSE(y.IsAligned());
1222#endif
1223    y.unaligned_flat<float>().setConstant(1.0);
1224    for (int64 i = 0; i < y.NumElements(); ++i) {
1225      EXPECT_EQ(1.0, y.unaligned_flat<float>()(i));
1226    }
1227  }
1228}
1229
1230template <typename T>
1231Tensor MkTensor(DataType dt, const TensorShape& shape,
1232                std::vector<T> init_values) {
1233  Tensor x(dt, shape);
1234  const int limit = x.NumElements();
1235  int vi = 0;
1236  for (int i = 0; i < limit; ++i) {
1237    x.flat<T>()(i) = init_values[vi++];
1238    if (vi >= init_values.size()) vi = 0;
1239  }
1240  return x;
1241}
1242
1243TEST(SummarizeValue, Uninitialized) {
1244  Tensor x(DT_INT32);
1245  TensorTestHelper::set_shape(&x, TensorShape({4, 4}));
1246  EXPECT_EQ(
1247      strings::StrCat("uninitialized Tensor of 16 elements of type ", DT_INT32),
1248      x.SummarizeValue(16));
1249}
1250
1251TEST(SummarizeValue, INT32) {
1252  Tensor x = MkTensor<int>(DT_INT32, TensorShape({5}), {1, 2, 3, 4, 0});
1253  EXPECT_EQ("1 2 3 4 0", x.SummarizeValue(16));
1254  x = MkTensor<int>(DT_INT32, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1255  EXPECT_EQ("[1 2][3 4]", x.SummarizeValue(16));
1256  x = MkTensor<int>(DT_INT32, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1257  EXPECT_EQ("[[[1]][[2]]][[[3]][[4]]]", x.SummarizeValue(16));
1258  EXPECT_EQ("[[[1]][[2]]][[[3]]]...", x.SummarizeValue(3));
1259  x = MkTensor<int>(DT_INT32, TensorShape({0}), {});
1260  EXPECT_EQ("", x.SummarizeValue(16));
1261}
1262
1263TEST(SummarizeValue, FLOAT) {
1264  Tensor x = MkTensor<float>(DT_FLOAT, TensorShape({5}), {1, 2, 3, 4, 0});
1265  EXPECT_EQ("1 2 3 4 0", x.SummarizeValue(16));
1266  x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1267  EXPECT_EQ("[1 2][3 4]", x.SummarizeValue(16));
1268  x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1269  EXPECT_EQ("[[[1]][[2]]][[[3]][[4]]]", x.SummarizeValue(16));
1270  EXPECT_EQ("[[[1]][[2]]][[[3]]]...", x.SummarizeValue(3));
1271  x = MkTensor<float>(DT_FLOAT, TensorShape({0}), {});
1272  EXPECT_EQ("", x.SummarizeValue(16));
1273}
1274
1275TEST(SummarizeValue, BOOL) {
1276  Tensor x = MkTensor<bool>(DT_BOOL, TensorShape({5}), {false, true, true});
1277  EXPECT_EQ("0 1 1 0 1", x.SummarizeValue(16));
1278  EXPECT_EQ("0 1 1...", x.SummarizeValue(3));
1279}
1280
1281TEST(SummarizeValue, STRING) {
1282  Tensor x = MkTensor<string>(DT_STRING, TensorShape({5}),
1283                              {"one", "two", "three", "four", "five"});
1284  EXPECT_EQ("one two three four five", x.SummarizeValue(16));
1285  x = MkTensor<string>(DT_STRING, TensorShape({5, 1, 5}),
1286                       {"one", "two", "three", "four", "five"});
1287  EXPECT_EQ("one two three four five one...", x.SummarizeValue(6));
1288}
1289
1290void BM_CreateAndDestroy(int iters) {
1291  TensorShape shape({10, 20});
1292  while (--iters) {
1293    Tensor t(DT_FLOAT, shape);
1294  }
1295}
1296BENCHMARK(BM_CreateAndDestroy);
1297
1298void BM_Assign(int iters) {
1299  Tensor a(DT_FLOAT, TensorShape({10, 20}));
1300  Tensor b(DT_FLOAT, TensorShape({10, 20}));
1301  bool a_to_b = true;
1302  while (--iters) {
1303    if (a_to_b) {
1304      b = a;
1305    } else {
1306      a = b;
1307    }
1308    a_to_b = !a_to_b;
1309  }
1310}
1311BENCHMARK(BM_Assign);
1312
1313// Ensure tensor_data() works on empty tensors
1314TEST(Tensor, EmptyTensorData) {
1315  Tensor empty;
1316  EXPECT_EQ(empty.tensor_data().size(), 0);
1317}
1318
1319// Benchmark create and destroy a tensor, with an allocated buffer.
1320void BM_CreateAndDestroyWithBuf(int iters) {
1321  TensorShape shape({10, 20});
1322  Allocator* allocator = cpu_allocator();
1323  while (--iters) {
1324    Tensor a(allocator, DT_FLOAT, shape);
1325  }
1326}
1327BENCHMARK(BM_CreateAndDestroyWithBuf);
1328
1329// Benchmark create+copy a tensor, with an allocated buffer.
1330void BM_CreateAndCopyCtrWithBuf(int iters) {
1331  TensorShape shape({10, 20});
1332  Allocator* allocator = cpu_allocator();
1333  while (--iters) {
1334    Tensor a(allocator, DT_FLOAT, shape);
1335    Tensor b(a);
1336  }
1337}
1338BENCHMARK(BM_CreateAndCopyCtrWithBuf);
1339
1340// Benchmark create+move a tensor, with an allocated buffer.
1341void BM_CreateAndMoveCtrWithBuf(int iters) {
1342  TensorShape shape({10, 20});
1343  Allocator* allocator = cpu_allocator();
1344  while (--iters) {
1345    Tensor a(allocator, DT_FLOAT, shape);
1346    Tensor b(std::move(a));
1347  }
1348}
1349BENCHMARK(BM_CreateAndMoveCtrWithBuf);
1350
1351}  // namespace
1352}  // namespace tensorflow
1353