1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Functional tests for SpacetoDepth op."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import numpy as np
22
23from tensorflow.python.framework import constant_op
24from tensorflow.python.framework import dtypes
25from tensorflow.python.framework import ops
26from tensorflow.python.framework import test_util
27from tensorflow.python.ops import array_ops
28from tensorflow.python.ops import gen_array_ops
29from tensorflow.python.ops import gradient_checker
30from tensorflow.python.ops import math_ops
31from tensorflow.python.platform import test
32from tensorflow.python.platform import tf_logging
33
34
35class SpaceToDepthTest(test.TestCase):
36
37  def _testOne(self, inputs, block_size, outputs):
38    input_nhwc = math_ops.to_float(inputs)
39    with self.test_session(use_gpu=False):
40      # test NHWC (default) on CPU
41      x_tf = array_ops.space_to_depth(input_nhwc, block_size)
42      self.assertAllEqual(x_tf.eval(), outputs)
43    if test.is_gpu_available():
44      with self.test_session(use_gpu=True):
45        # test NHWC (default) on GPU
46        x_tf = array_ops.space_to_depth(input_nhwc, block_size)
47        self.assertAllEqual(x_tf.eval(), outputs)
48        # test NCHW on GPU
49        input_nchw = test_util.NHWCToNCHW(input_nhwc)
50        output_nchw = array_ops.space_to_depth(
51            input_nchw, block_size, data_format="NCHW")
52        output_nhwc = test_util.NCHWToNHWC(output_nchw)
53        self.assertAllEqual(output_nhwc.eval(), outputs)
54
55  def testBasic(self):
56    x_np = [[[[1], [2]], [[3], [4]]]]
57    block_size = 2
58    x_out = [[[[1, 2, 3, 4]]]]
59    self._testOne(x_np, block_size, x_out)
60
61  # Tests for larger input dimensions. To make sure elements are
62  # correctly ordered spatially.
63  def testLargerInput2x2(self):
64    x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
65             [[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
66    block_size = 2
67    x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
68                                             [13, 14, 15, 16]]]]
69    self._testOne(x_np, block_size, x_out)
70
71  # Tests for larger input dimensions. To make sure elements are
72  # correctly ordered in depth. Here, larger block size.
73  def testLargerInput4x4(self):
74    x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
75             [[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
76    block_size = 4
77    x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
78    self._testOne(x_np, block_size, x_out)
79
80  # Tests for larger input depths.
81  # To make sure elements are properly interleaved in depth.
82  def testDepthInterleaved(self):
83    x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
84    block_size = 2
85    x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
86    self._testOne(x_np, block_size, x_out)
87
88  # Tests for larger input depths. Here an odd depth.
89  # To make sure elements are properly interleaved in depth.
90  def testDepthInterleavedDepth3(self):
91    x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
92    block_size = 2
93    x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
94    self._testOne(x_np, block_size, x_out)
95
96  # Tests for larger input dimensions AND for larger input depths.
97  # To make sure elements are properly interleaved in depth and ordered
98  # spatially.
99  def testDepthInterleavedLarge(self):
100    x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
101             [[3, 30], [4, 40], [7, 70], [8, 80]],
102             [[9, 90], [10, 100], [13, 130], [14, 140]],
103             [[11, 110], [12, 120], [15, 150], [16, 160]]]]
104    block_size = 2
105    x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
106              [[9, 90, 10, 100, 11, 110, 12, 120],
107               [13, 130, 14, 140, 15, 150, 16, 160]]]]
108    self._testOne(x_np, block_size, x_out)
109
110  def testBlockSize2Batch10(self):
111    block_size = 2
112
113    def batch_input_elt(i):
114      return [[[1 * i], [2 * i], [5 * i], [6 * i]],
115              [[3 * i], [4 * i], [7 * i], [8 * i]],
116              [[9 * i], [10 * i], [13 * i], [14 * i]],
117              [[11 * i], [12 * i], [15 * i], [16 * i]]]
118
119    def batch_output_elt(i):
120      return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
121              [[9 * i, 10 * i, 11 * i, 12 * i],
122               [13 * i, 14 * i, 15 * i, 16 * i]]]
123
124    batch_size = 10
125    x_np = [batch_input_elt(i) for i in range(batch_size)]
126    x_out = [batch_output_elt(i) for i in range(batch_size)]
127    self._testOne(x_np, block_size, x_out)
128
129  # Tests for different width and height.
130  def testNonSquare(self):
131    x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
132             [[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
133    block_size = 2
134    x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
135              [[9, 90, 10, 100, 11, 110, 12, 120]]]]
136    self._testOne(x_np, block_size, x_out)
137
138  # Error handling:
139
140  def testInputWrongDimMissingDepth(self):
141    # The input is missing the last dimension ("depth")
142    x_np = [[[1, 2], [3, 4]]]
143    block_size = 2
144    with self.assertRaises(ValueError):
145      out_tf = array_ops.space_to_depth(x_np, block_size)
146      out_tf.eval()
147
148  def testInputWrongDimMissingBatch(self):
149    # The input is missing the first dimension ("batch")
150    x_np = [[[1], [2]], [[3], [4]]]
151    block_size = 2
152    with self.assertRaises(ValueError):
153      _ = array_ops.space_to_depth(x_np, block_size)
154
155  def testBlockSize0(self):
156    # The block size is 0.
157    x_np = [[[[1], [2]], [[3], [4]]]]
158    block_size = 0
159    with self.assertRaises(ValueError):
160      out_tf = array_ops.space_to_depth(x_np, block_size)
161      out_tf.eval()
162
163  def testBlockSizeOne(self):
164    # The block size is 1. The block size needs to be > 1.
165    x_np = [[[[1], [2]], [[3], [4]]]]
166    block_size = 1
167    with self.assertRaises(ValueError):
168      out_tf = array_ops.space_to_depth(x_np, block_size)
169      out_tf.eval()
170
171  def testBlockSizeLarger(self):
172    # The block size is too large for this input.
173    x_np = [[[[1], [2]], [[3], [4]]]]
174    block_size = 10
175    with self.assertRaises(ValueError):
176      out_tf = array_ops.space_to_depth(x_np, block_size)
177      out_tf.eval()
178
179  def testBlockSizeNotDivisibleWidth(self):
180    # The block size divides width but not height.
181    x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
182    block_size = 3
183    with self.assertRaises(ValueError):
184      _ = array_ops.space_to_depth(x_np, block_size)
185
186  def testBlockSizeNotDivisibleHeight(self):
187    # The block size divides height but not width.
188    x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
189    block_size = 3
190    with self.assertRaises(ValueError):
191      _ = array_ops.space_to_depth(x_np, block_size)
192
193  def testBlockSizeNotDivisibleBoth(self):
194    # The block size does not divide neither width or height.
195    x_np = [[[[1], [2]], [[3], [4]]]]
196    block_size = 3
197    with self.assertRaises(ValueError):
198      _ = array_ops.space_to_depth(x_np, block_size)
199
200  def testUnknownShape(self):
201    t = array_ops.space_to_depth(
202        array_ops.placeholder(dtypes.float32), block_size=4)
203    self.assertEqual(4, t.get_shape().ndims)
204
205  def spaceToDepthUsingTranspose(self, tensor, block_size, data_format):
206    block_size_sq = block_size * block_size
207    if data_format == "NHWC":
208      b, ih, iw, ic = tensor.shape.as_list()
209      assert ih % block_size == 0, (ih, block_size)
210      assert iw % block_size == 0, (iw, block_size)
211      ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
212      tensor = array_ops.reshape(tensor,
213                                 [b, oh, block_size, ow, block_size, ic])
214      tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
215      tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
216    elif data_format == "NCHW":
217      b, ic, ih, iw = tensor.shape.as_list()
218      assert ih % block_size == 0, (ih, block_size)
219      assert iw % block_size == 0, (iw, block_size)
220      ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
221      tensor = array_ops.reshape(tensor,
222                                 [b, ic, oh, block_size, ow, block_size])
223      tensor = array_ops.transpose(tensor, [0, 3, 5, 1, 2, 4])
224      tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
225    return tensor
226
227  def compareToTranspose(self, batch_size, out_height, out_width, in_channels,
228                         block_size, data_format, use_gpu):
229    in_height = out_height * block_size
230    in_width = out_width * block_size
231    nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
232    nchw_input_shape = [batch_size, in_channels, in_height, in_width]
233    total_size = np.prod(nhwc_input_shape)
234
235    if data_format == "NCHW_VECT_C":
236      # Initialize the input tensor with qint8 values that circle -127..127.
237      x = [((f + 128) % 255) - 127 for f in range(total_size)]
238      t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)
239      expected = self.spaceToDepthUsingTranspose(t, block_size, "NHWC")
240      t = test_util.NHWCToNCHW_VECT_C(t)
241      t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
242      t = array_ops.space_to_depth(t, block_size, data_format="NCHW_VECT_C")
243      t = gen_array_ops.dequantize(t, -128, 127)
244      actual = test_util.NCHW_VECT_CToNHWC(t)
245    else:
246      # Initialize the input tensor with ascending whole numbers as floats.
247      x = [f * 1.0 for f in range(total_size)]
248      shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
249      t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
250      expected = self.spaceToDepthUsingTranspose(t, block_size, data_format)
251      actual = array_ops.space_to_depth(t, block_size, data_format=data_format)
252
253    with self.test_session(use_gpu=use_gpu) as sess:
254      actual_vals, expected_vals = sess.run([actual, expected])
255      self.assertTrue(np.array_equal(actual_vals, expected_vals))
256
257  def testAgainstTranspose(self):
258    self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", False)
259    self.compareToTranspose(1, 2, 3, 2, 2, "NHWC", False)
260    self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", False)
261
262    if not test.is_gpu_available():
263      tf_logging.info("skipping gpu tests since gpu not available")
264      return
265
266    self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", True)
267    self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", True)
268    self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", True)
269    self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", True)
270    self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", True)
271
272    self.compareToTranspose(3, 2, 3, 4, 2, "NCHW_VECT_C", True)
273    self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", True)
274    self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", True)
275
276
277class SpaceToDepthGradientTest(test.TestCase):
278
279  # Check the gradients.
280  def _checkGrad(self, x, block_size, data_format):
281    # NCHW is implemented for only GPU.
282    if data_format == "NCHW" and not test.is_gpu_available():
283      return
284
285    assert 4 == x.ndim
286    with self.test_session(use_gpu=True):
287      tf_x = ops.convert_to_tensor(x)
288      tf_y = array_ops.space_to_depth(tf_x, block_size, data_format=data_format)
289      epsilon = 1e-2
290      ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
291          tf_x,
292          x.shape,
293          tf_y,
294          tf_y.get_shape().as_list(),
295          x_init_value=x,
296          delta=epsilon)
297
298    self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
299
300  # Tests a gradient for space_to_depth of x which is a four dimensional
301  # tensor of shape [b, h * block_size, w * block_size, d].
302  def _compare(self, b, h, w, d, block_size, data_format):
303    block_size_sq = block_size * block_size
304    data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
305        np.float32)
306    if data_format == "NHWC":
307      x = data.reshape([b, h * block_size, w * block_size, d])
308    else:
309      x = data.reshape([b, d, h * block_size, w * block_size])
310
311    self._checkGrad(x, block_size, data_format)
312
313  # Don't use very large numbers as dimensions here as the result is tensor
314  # with cartesian product of the dimensions.
315  def testSmall(self):
316    block_size = 2
317    self._compare(1, 2, 3, 5, block_size, "NHWC")
318    self._compare(1, 2, 3, 5, block_size, "NCHW")
319
320  def testSmall2(self):
321    block_size = 2
322    self._compare(2, 4, 3, 2, block_size, "NHWC")
323    self._compare(2, 4, 3, 2, block_size, "NCHW")
324
325
326if __name__ == "__main__":
327  test.main()
328