1# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15
16"""Functional tests for DepthToSpace op."""
17
18from __future__ import absolute_import
19from __future__ import division
20from __future__ import print_function
21
22import numpy as np
23
24from tensorflow.python.framework import constant_op
25from tensorflow.python.framework import dtypes
26from tensorflow.python.framework import ops
27from tensorflow.python.framework import test_util
28from tensorflow.python.ops import array_ops
29from tensorflow.python.ops import gen_array_ops
30from tensorflow.python.ops import gradient_checker
31from tensorflow.python.ops import math_ops
32from tensorflow.python.platform import test
33from tensorflow.python.platform import tf_logging
34
35
36class DepthToSpaceTest(test.TestCase):
37
38  def _testOne(self, inputs, block_size, outputs):
39    input_nhwc = math_ops.to_float(inputs)
40    with self.test_session(use_gpu=False):
41      # test NHWC (default) on CPU
42      x_tf = array_ops.depth_to_space(input_nhwc, block_size)
43      self.assertAllEqual(x_tf.eval(), outputs)
44    if test.is_gpu_available():
45      with self.test_session(use_gpu=True):
46        # test NHWC (default) on GPU
47        x_tf = array_ops.depth_to_space(input_nhwc, block_size)
48        self.assertAllEqual(x_tf.eval(), outputs)
49        # test NCHW on GPU
50        input_nchw = test_util.NHWCToNCHW(input_nhwc)
51        output_nchw = array_ops.depth_to_space(
52            input_nchw, block_size, data_format="NCHW")
53        output_nhwc = test_util.NCHWToNHWC(output_nchw)
54        self.assertAllEqual(output_nhwc.eval(), outputs)
55
56  def testBasic(self):
57    x_np = [[[[1, 2, 3, 4]]]]
58    block_size = 2
59    x_out = [[[[1], [2]], [[3], [4]]]]
60    self._testOne(x_np, block_size, x_out)
61
62  # Tests for larger input dimensions. To make sure elements are
63  # correctly ordered spatially.
64  def testBlockSize2(self):
65    x_np = [[[[1, 2, 3, 4],
66              [5, 6, 7, 8]],
67             [[9, 10, 11, 12],
68              [13, 14, 15, 16]]]]
69    block_size = 2
70    x_out = [[[[1], [2], [5], [6]],
71              [[3], [4], [7], [8]],
72              [[9], [10], [13], [14]],
73              [[11], [12], [15], [16]]]]
74    self._testOne(x_np, block_size, x_out)
75
76  def testBlockSize2Batch10(self):
77    block_size = 2
78    def batch_input_elt(i):
79      return [[[1 * i, 2 * i, 3 * i, 4 * i],
80               [5 * i, 6 * i, 7 * i, 8 * i]],
81              [[9 * i, 10 * i, 11 * i, 12 * i],
82               [13 * i, 14 * i, 15 * i, 16 * i]]]
83    def batch_output_elt(i):
84      return [[[1 * i], [2 * i], [5 * i], [6 * i]],
85              [[3 * i], [4 * i], [7 * i], [8 * i]],
86              [[9 * i], [10 * i], [13 * i], [14 * i]],
87              [[11 * i], [12 * i], [15 * i], [16 * i]]]
88    batch_size = 10
89    x_np = [batch_input_elt(i) for i in range(batch_size)]
90    x_out = [batch_output_elt(i) for i in range(batch_size)]
91    self._testOne(x_np, block_size, x_out)
92
93  # Tests for different width and height.
94  def testNonSquare(self):
95    x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]],
96             [[5, 50, 6, 60, 7, 70, 8, 80]],
97             [[9, 90, 10, 100, 11, 110, 12, 120]]]]
98    block_size = 2
99    x_out = [[[[1, 10], [2, 20]],
100              [[3, 30], [4, 40]],
101              [[5, 50], [6, 60]],
102              [[7, 70], [8, 80]],
103              [[9, 90], [10, 100]],
104              [[11, 110], [12, 120]]]]
105    self._testOne(x_np, block_size, x_out)
106
107  # Tests for larger input dimensions. To make sure elements are
108  # correctly ordered spatially.
109  def testBlockSize4FlatInput(self):
110    x_np = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
111    block_size = 4
112    x_out = [[[[1], [2], [5], [6]],
113              [[3], [4], [7], [8]],
114              [[9], [10], [13], [14]],
115              [[11], [12], [15], [16]]]]
116    self._testOne(x_np, block_size, x_out)
117
118  # Tests for larger input depths.
119  # To make sure elements are properly interleaved in depth.
120  def testDepthInterleaved(self):
121    x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
122    block_size = 2
123    x_out = [[[[1, 10], [2, 20]],
124              [[3, 30], [4, 40]]]]
125    self._testOne(x_np, block_size, x_out)
126
127  # Tests for larger input depths. Here an odd depth.
128  # To make sure elements are properly interleaved in depth.
129  def testDepthInterleavedDepth3(self):
130    x_np = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
131    block_size = 2
132    x_out = [[[[1, 2, 3], [4, 5, 6]],
133              [[7, 8, 9], [10, 11, 12]]]]
134    self._testOne(x_np, block_size, x_out)
135
136  # Tests for larger input depths.
137  # To make sure elements are properly interleaved in depth.
138  def testDepthInterleavedLarger(self):
139    x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40],
140              [5, 50, 6, 60, 7, 70, 8, 80]],
141             [[9, 90, 10, 100, 11, 110, 12, 120],
142              [13, 130, 14, 140, 15, 150, 16, 160]]]]
143    block_size = 2
144    x_out = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
145              [[3, 30], [4, 40], [7, 70], [8, 80]],
146              [[9, 90], [10, 100], [13, 130], [14, 140]],
147              [[11, 110], [12, 120], [15, 150], [16, 160]]]]
148    self._testOne(x_np, block_size, x_out)
149
150  # Error handling:
151
152  # Tests for a block larger for the depth. In this case should raise an
153  # exception.
154  def testBlockSizeTooLarge(self):
155    x_np = [[[[1, 2, 3, 4],
156              [5, 6, 7, 8]],
157             [[9, 10, 11, 12],
158              [13, 14, 15, 16]]]]
159    block_size = 4
160    # Raise an exception, since th depth is only 4 and needs to be
161    # divisible by 16.
162    with self.assertRaises(ValueError):
163      out_tf = array_ops.depth_to_space(x_np, block_size)
164      out_tf.eval()
165
166  # Test when the block size is 0.
167  def testBlockSize0(self):
168    x_np = [[[[1], [2]],
169             [[3], [4]]]]
170    block_size = 0
171    with self.assertRaises(ValueError):
172      out_tf = array_ops.depth_to_space(x_np, block_size)
173      out_tf.eval()
174
175  # Test when the block size is 1. The block size should be > 1.
176  def testBlockSizeOne(self):
177    x_np = [[[[1, 1, 1, 1],
178              [2, 2, 2, 2]],
179             [[3, 3, 3, 3],
180              [4, 4, 4, 4]]]]
181    block_size = 1
182    with self.assertRaises(ValueError):
183      out_tf = array_ops.depth_to_space(x_np, block_size)
184      out_tf.eval()
185
186  def testBlockSizeLargerThanInput(self):
187    # The block size is too large for this input.
188    x_np = [[[[1], [2]],
189             [[3], [4]]]]
190    block_size = 10
191    with self.assertRaises(ValueError):
192      out_tf = array_ops.space_to_depth(x_np, block_size)
193      out_tf.eval()
194
195  def testBlockSizeNotDivisibleDepth(self):
196    # The depth is not divisible by the square of the block size.
197    x_np = [[[[1, 1, 1, 1],
198              [2, 2, 2, 2]],
199             [[3, 3, 3, 3],
200              [4, 4, 4, 4]]]]
201    block_size = 3
202    with self.assertRaises(ValueError):
203      _ = array_ops.space_to_depth(x_np, block_size)
204
205  def testUnknownShape(self):
206    t = array_ops.depth_to_space(
207        array_ops.placeholder(dtypes.float32), block_size=4)
208    self.assertEqual(4, t.get_shape().ndims)
209
210  def depthToSpaceUsingTranspose(self, tensor, block_size, data_format):
211    block_size_sq = block_size * block_size
212    if data_format == "NHWC":
213      b, ih, iw, ic = tensor.shape.as_list()
214      assert ic % block_size_sq == 0, (ic, block_size_sq)
215      ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
216      tensor = array_ops.reshape(tensor,
217                                 [b, ih, iw, block_size, block_size, oc])
218      tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
219      tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
220    elif data_format == "NCHW":
221      b, ic, ih, iw = tensor.shape.as_list()
222      assert ic % block_size_sq == 0, (ic, block_size_sq)
223      ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
224      tensor = array_ops.reshape(tensor,
225                                 [b, block_size, block_size, oc, ih, iw])
226      tensor = array_ops.transpose(tensor, [0, 3, 4, 1, 5, 2])
227      tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
228    return tensor
229
230  def compareToTranspose(self, batch_size, in_height, in_width, out_channels,
231                         block_size, data_format, use_gpu):
232    in_channels = out_channels * block_size * block_size
233    nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
234    nchw_input_shape = [batch_size, in_channels, in_height, in_width]
235    total_size = np.prod(nhwc_input_shape)
236
237    if data_format == "NCHW_VECT_C":
238      # Initialize the input tensor with qint8 values that circle -127..127.
239      x = [((f + 128) % 255) - 127 for f in range(total_size)]
240      t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)
241      expected = self.depthToSpaceUsingTranspose(t, block_size, "NHWC")
242      t = test_util.NHWCToNCHW_VECT_C(t)
243      t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
244      t = array_ops.depth_to_space(t, block_size, data_format="NCHW_VECT_C")
245      t = gen_array_ops.dequantize(t, -128, 127)
246      actual = test_util.NCHW_VECT_CToNHWC(t)
247    else:
248      # Initialize the input tensor with ascending whole numbers as floats.
249      x = [f * 1.0 for f in range(total_size)]
250      shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
251      t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
252      expected = self.depthToSpaceUsingTranspose(t, block_size, data_format)
253      actual = array_ops.depth_to_space(t, block_size, data_format=data_format)
254
255    with self.test_session(use_gpu=use_gpu) as sess:
256      actual_vals, expected_vals = sess.run([actual, expected])
257      self.assertTrue(np.array_equal(actual_vals, expected_vals))
258
259  def testAgainstTranspose(self):
260    self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", False)
261    self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", False)
262    self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", False)
263
264    if not test.is_gpu_available():
265      tf_logging.info("skipping gpu tests since gpu not available")
266      return
267
268    self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", True)
269    self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", True)
270    self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", True)
271    self.compareToTranspose(3, 2, 3, 2, 2, "NCHW", True)
272    self.compareToTranspose(3, 2, 3, 1, 3, "NCHW", True)
273    self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", True)
274    self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", True)
275    self.compareToTranspose(3, 200, 300, 32, 2, "NCHW", True)
276
277    self.compareToTranspose(3, 2, 3, 8, 2, "NCHW_VECT_C", True)
278    self.compareToTranspose(3, 2, 3, 4, 3, "NCHW_VECT_C", True)
279    self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", True)
280    self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", True)
281    self.compareToTranspose(3, 200, 300, 32, 2, "NCHW_VECT_C", True)
282
283
284class DepthToSpaceGradientTest(test.TestCase):
285
286  # Check the gradients.
287  def _checkGrad(self, x, block_size, data_format):
288    # NCHW is implemented for only GPU.
289    if data_format == "NCHW" and not test.is_gpu_available():
290      return
291
292    assert 4 == x.ndim
293    with self.test_session(use_gpu=True):
294      tf_x = ops.convert_to_tensor(x)
295      tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)
296
297      epsilon = 1e-2
298      ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
299          tf_x,
300          x.shape,
301          tf_y,
302          tf_y.get_shape().as_list(),
303          x_init_value=x,
304          delta=epsilon)
305      self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
306
307  # Tests a gradient for depth_to_space of x which is a four dimensional
308  # tensor of shape [b, h, w, d * block_size * block_size].
309  def _compare(self, b, h, w, d, block_size, data_format):
310    block_size_sq = block_size * block_size
311    data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
312        np.float32)
313    if data_format == "NHWC":
314      x = data.reshape([b, h, w, d * block_size_sq])
315    else:
316      x = data.reshape([b, d * block_size_sq, h, w])
317
318    self._checkGrad(x, block_size, data_format)
319
320  # Don't use very large numbers as dimensions here, as the result is tensor
321  # with cartesian product of the dimensions.
322  def testSmall(self):
323    block_size = 2
324    self._compare(3, 2, 5, 3, block_size, "NHWC")
325    self._compare(3, 2, 5, 3, block_size, "NCHW")
326
327  def testSmall2(self):
328    block_size = 3
329    self._compare(1, 2, 3, 2, block_size, "NHWC")
330    self._compare(1, 2, 3, 2, block_size, "NCHW")
331
332
333if __name__ == "__main__":
334  test.main()
335