1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for tensorflow.ops.image_ops."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21import colorsys
22import functools
23import math
24import os
25import time
26
27import numpy as np
28from six.moves import xrange  # pylint: disable=redefined-builtin
29
30from tensorflow.core.protobuf import config_pb2
31from tensorflow.python.client import session
32from tensorflow.python.framework import constant_op
33from tensorflow.python.framework import dtypes
34from tensorflow.python.framework import errors
35from tensorflow.python.framework import ops
36from tensorflow.python.framework import test_util
37from tensorflow.python.ops import array_ops
38from tensorflow.python.ops import control_flow_ops
39from tensorflow.python.ops import gen_image_ops
40from tensorflow.python.ops import image_ops
41from tensorflow.python.ops import io_ops
42from tensorflow.python.ops import math_ops
43from tensorflow.python.ops import random_ops
44from tensorflow.python.ops import variables
45from tensorflow.python.platform import googletest
46from tensorflow.python.platform import test
47
48
49class RGBToHSVTest(test_util.TensorFlowTestCase):
50
51  def testBatch(self):
52    # Build an arbitrary RGB image
53    np.random.seed(7)
54    batch_size = 5
55    shape = (batch_size, 2, 7, 3)
56
57    for nptype in [np.float32, np.float64]:
58      inp = np.random.rand(*shape).astype(nptype)
59
60      # Convert to HSV and back, as a batch and individually
61      with self.test_session(use_gpu=True) as sess:
62        batch0 = constant_op.constant(inp)
63        batch1 = image_ops.rgb_to_hsv(batch0)
64        batch2 = image_ops.hsv_to_rgb(batch1)
65        split0 = array_ops.unstack(batch0)
66        split1 = list(map(image_ops.rgb_to_hsv, split0))
67        split2 = list(map(image_ops.hsv_to_rgb, split1))
68        join1 = array_ops.stack(split1)
69        join2 = array_ops.stack(split2)
70        batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
71
72      # Verify that processing batch elements together is the same as separate
73      self.assertAllClose(batch1, join1)
74      self.assertAllClose(batch2, join2)
75      self.assertAllClose(batch2, inp)
76
77  def testRGBToHSVRoundTrip(self):
78    data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
79    for nptype in [np.float32, np.float64]:
80      rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
81      with self.test_session(use_gpu=True):
82        hsv = image_ops.rgb_to_hsv(rgb_np)
83        rgb = image_ops.hsv_to_rgb(hsv)
84        rgb_tf = rgb.eval()
85      self.assertAllClose(rgb_tf, rgb_np)
86
87
88class RGBToYIQTest(test_util.TensorFlowTestCase):
89
90  def testBatch(self):
91    # Build an arbitrary RGB image
92    np.random.seed(7)
93    batch_size = 5
94    shape = (batch_size, 2, 7, 3)
95
96    for nptype in [np.float32, np.float64]:
97      inp = np.random.rand(*shape).astype(nptype)
98
99      # Convert to YIQ and back, as a batch and individually
100      with self.test_session(use_gpu=True) as sess:
101        batch0 = constant_op.constant(inp)
102        batch1 = image_ops.rgb_to_yiq(batch0)
103        batch2 = image_ops.yiq_to_rgb(batch1)
104        split0 = array_ops.unstack(batch0)
105        split1 = list(map(image_ops.rgb_to_yiq, split0))
106        split2 = list(map(image_ops.yiq_to_rgb, split1))
107        join1 = array_ops.stack(split1)
108        join2 = array_ops.stack(split2)
109        batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
110
111      # Verify that processing batch elements together is the same as separate
112      self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
113      self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
114      self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
115
116
117class RGBToYUVTest(test_util.TensorFlowTestCase):
118
119  def testBatch(self):
120    # Build an arbitrary RGB image
121    np.random.seed(7)
122    batch_size = 5
123    shape = (batch_size, 2, 7, 3)
124
125    for nptype in [np.float32, np.float64]:
126      inp = np.random.rand(*shape).astype(nptype)
127
128      # Convert to YUV and back, as a batch and individually
129      with self.test_session(use_gpu=True) as sess:
130        batch0 = constant_op.constant(inp)
131        batch1 = image_ops.rgb_to_yuv(batch0)
132        batch2 = image_ops.yuv_to_rgb(batch1)
133        split0 = array_ops.unstack(batch0)
134        split1 = list(map(image_ops.rgb_to_yuv, split0))
135        split2 = list(map(image_ops.yuv_to_rgb, split1))
136        join1 = array_ops.stack(split1)
137        join2 = array_ops.stack(split2)
138        batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
139
140      # Verify that processing batch elements together is the same as separate
141      self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
142      self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
143      self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
144
145
146class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
147
148  def _RGBToGrayscale(self, images):
149    is_batch = True
150    if len(images.shape) == 3:
151      is_batch = False
152      images = np.expand_dims(images, axis=0)
153    out_shape = images.shape[0:3] + (1,)
154    out = np.zeros(shape=out_shape, dtype=np.uint8)
155    for batch in xrange(images.shape[0]):
156      for y in xrange(images.shape[1]):
157        for x in xrange(images.shape[2]):
158          red = images[batch, y, x, 0]
159          green = images[batch, y, x, 1]
160          blue = images[batch, y, x, 2]
161          gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
162          out[batch, y, x, 0] = int(gray)
163    if not is_batch:
164      out = np.squeeze(out, axis=0)
165    return out
166
167  def _TestRGBToGrayscale(self, x_np):
168    y_np = self._RGBToGrayscale(x_np)
169
170    with self.test_session(use_gpu=True):
171      x_tf = constant_op.constant(x_np, shape=x_np.shape)
172      y = image_ops.rgb_to_grayscale(x_tf)
173      y_tf = y.eval()
174      self.assertAllEqual(y_tf, y_np)
175
176  def testBasicRGBToGrayscale(self):
177    # 4-D input with batch dimension.
178    x_np = np.array(
179        [[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
180    self._TestRGBToGrayscale(x_np)
181
182    # 3-D input with no batch dimension.
183    x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
184    self._TestRGBToGrayscale(x_np)
185
186  def testBasicGrayscaleToRGB(self):
187    # 4-D input with batch dimension.
188    x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
189    y_np = np.array(
190        [[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
191
192    with self.test_session(use_gpu=True):
193      x_tf = constant_op.constant(x_np, shape=x_np.shape)
194      y = image_ops.grayscale_to_rgb(x_tf)
195      y_tf = y.eval()
196      self.assertAllEqual(y_tf, y_np)
197
198    # 3-D input with no batch dimension.
199    x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
200    y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
201
202    with self.test_session(use_gpu=True):
203      x_tf = constant_op.constant(x_np, shape=x_np.shape)
204      y = image_ops.grayscale_to_rgb(x_tf)
205      y_tf = y.eval()
206      self.assertAllEqual(y_tf, y_np)
207
208  def testShapeInference(self):
209    # Shape inference works and produces expected output where possible
210    rgb_shape = [7, None, 19, 3]
211    gray_shape = rgb_shape[:-1] + [1]
212    with self.test_session(use_gpu=True):
213      rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
214      gray = image_ops.rgb_to_grayscale(rgb_tf)
215      self.assertEqual(gray_shape, gray.get_shape().as_list())
216
217    with self.test_session(use_gpu=True):
218      gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
219      rgb = image_ops.grayscale_to_rgb(gray_tf)
220      self.assertEqual(rgb_shape, rgb.get_shape().as_list())
221
222    # Shape inference does not break for unknown shapes
223    with self.test_session(use_gpu=True):
224      rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
225      gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
226      self.assertFalse(gray_unknown.get_shape())
227
228    with self.test_session(use_gpu=True):
229      gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
230      rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
231      self.assertFalse(rgb_unknown.get_shape())
232
233
234class AdjustGamma(test_util.TensorFlowTestCase):
235
236  def test_adjust_gamma_one(self):
237    """Same image should be returned for gamma equal to one"""
238    with self.test_session():
239      x_data = np.random.uniform(0, 255, (8, 8))
240      x_np = np.array(x_data, dtype=np.float32)
241
242      x = constant_op.constant(x_np, shape=x_np.shape)
243      y = image_ops.adjust_gamma(x, gamma=1)
244
245      y_tf = y.eval()
246      y_np = x_np
247
248      self.assertAllClose(y_tf, y_np, 1e-6)
249
250  def test_adjust_gamma_less_zero(self):
251    """White image should be returned for gamma equal to zero"""
252    with self.test_session():
253      x_data = np.random.uniform(0, 255, (8, 8))
254      x_np = np.array(x_data, dtype=np.float32)
255
256      x = constant_op.constant(x_np, shape=x_np.shape)
257
258      err_msg = "Gamma should be a non-negative real number."
259
260      try:
261        image_ops.adjust_gamma(x, gamma=-1)
262      except Exception as e:
263        if err_msg not in str(e):
264          raise
265      else:
266        raise AssertionError("Exception not raised: %s" % err_msg)
267
268  def test_adjust_gamma_less_zero_tensor(self):
269    """White image should be returned for gamma equal to zero"""
270    with self.test_session():
271      x_data = np.random.uniform(0, 255, (8, 8))
272      x_np = np.array(x_data, dtype=np.float32)
273
274      x = constant_op.constant(x_np, shape=x_np.shape)
275      y = constant_op.constant(-1.0, dtype=dtypes.float32)
276
277      image = image_ops.adjust_gamma(x, gamma=y)
278
279      err_msg = "Gamma should be a non-negative real number."
280      try:
281        image.eval()
282      except Exception as e:
283        if err_msg not in str(e):
284          raise
285      else:
286        raise AssertionError("Exception not raised: %s" % err_msg)
287
288  def test_adjust_gamma_zero(self):
289    """White image should be returned for gamma equal to zero"""
290    with self.test_session():
291      x_data = np.random.uniform(0, 255, (8, 8))
292      x_np = np.array(x_data, dtype=np.float32)
293
294      x = constant_op.constant(x_np, shape=x_np.shape)
295      y = image_ops.adjust_gamma(x, gamma=0)
296
297      y_tf = y.eval()
298
299      dtype = x.dtype.as_numpy_dtype
300      y_np = np.array([dtypes.dtype_range[dtype][1]] * x_np.size)
301      y_np = y_np.reshape((8, 8))
302
303      self.assertAllClose(y_tf, y_np, 1e-6)
304
305  def test_adjust_gamma_less_one(self):
306    """Verifying the output with expected results for gamma
307    correction with gamma equal to half"""
308    with self.test_session():
309      x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
310      y = image_ops.adjust_gamma(x_np, gamma=0.5)
311      y_tf = np.trunc(y.eval())
312
313      y_np = np.array(
314          [[0, 31, 45, 55, 63, 71, 78, 84], [
315              90, 95, 100, 105, 110, 115, 119, 123
316          ], [127, 131, 135, 139, 142, 146, 149, 153], [
317              156, 159, 162, 165, 168, 171, 174, 177
318          ], [180, 183, 186, 188, 191, 194, 196, 199], [
319              201, 204, 206, 209, 211, 214, 216, 218
320          ], [221, 223, 225, 228, 230, 232, 234, 236],
321           [238, 241, 243, 245, 247, 249, 251, 253]],
322          dtype=np.float32)
323
324      self.assertAllClose(y_tf, y_np, 1e-6)
325
326  def test_adjust_gamma_greater_one(self):
327    """Verifying the output with expected results for gamma
328    correction with gamma equal to two"""
329    with self.test_session():
330      x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
331      y = image_ops.adjust_gamma(x_np, gamma=2)
332      y_tf = np.trunc(y.eval())
333
334      y_np = np.array(
335          [[0, 0, 0, 0, 1, 1, 2, 3], [4, 5, 6, 7, 9, 10, 12, 14], [
336              16, 18, 20, 22, 25, 27, 30, 33
337          ], [36, 39, 42, 45, 49, 52, 56, 60], [64, 68, 72, 76, 81, 85, 90, 95],
338           [100, 105, 110, 116, 121, 127, 132, 138], [
339               144, 150, 156, 163, 169, 176, 182, 189
340           ], [196, 203, 211, 218, 225, 233, 241, 249]],
341          dtype=np.float32)
342
343      self.assertAllClose(y_tf, y_np, 1e-6)
344
345
346class AdjustHueTest(test_util.TensorFlowTestCase):
347
348  def testAdjustNegativeHue(self):
349    x_shape = [2, 2, 3]
350    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
351    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
352
353    delta = -0.25
354    y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
355    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
356
357    with self.test_session(use_gpu=True):
358      x = constant_op.constant(x_np, shape=x_shape)
359      y = image_ops.adjust_hue(x, delta)
360      y_tf = y.eval()
361      self.assertAllEqual(y_tf, y_np)
362
363  def testAdjustPositiveHue(self):
364    x_shape = [2, 2, 3]
365    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
366    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
367
368    delta = 0.25
369    y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
370    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
371
372    with self.test_session(use_gpu=True):
373      x = constant_op.constant(x_np, shape=x_shape)
374      y = image_ops.adjust_hue(x, delta)
375      y_tf = y.eval()
376      self.assertAllEqual(y_tf, y_np)
377
378  def testBatchAdjustHue(self):
379    x_shape = [2, 1, 2, 3]
380    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
381    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
382
383    delta = 0.25
384    y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
385    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
386
387    with self.test_session(use_gpu=True):
388      x = constant_op.constant(x_np, shape=x_shape)
389      y = image_ops.adjust_hue(x, delta)
390      y_tf = y.eval()
391      self.assertAllEqual(y_tf, y_np)
392
393  def _adjustHueNp(self, x_np, delta_h):
394    self.assertEqual(x_np.shape[-1], 3)
395    x_v = x_np.reshape([-1, 3])
396    y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
397    channel_count = x_v.shape[0]
398    for i in xrange(channel_count):
399      r = x_v[i][0]
400      g = x_v[i][1]
401      b = x_v[i][2]
402      h, s, v = colorsys.rgb_to_hsv(r, g, b)
403      h += delta_h
404      h = math.fmod(h + 10.0, 1.0)
405      r, g, b = colorsys.hsv_to_rgb(h, s, v)
406      y_v[i][0] = r
407      y_v[i][1] = g
408      y_v[i][2] = b
409    return y_v.reshape(x_np.shape)
410
411  def _adjustHueTf(self, x_np, delta_h):
412    with self.test_session(use_gpu=True):
413      x = constant_op.constant(x_np)
414      y = image_ops.adjust_hue(x, delta_h)
415      y_tf = y.eval()
416    return y_tf
417
418  def testAdjustRandomHue(self):
419    x_shapes = [
420        [2, 2, 3],
421        [4, 2, 3],
422        [2, 4, 3],
423        [2, 5, 3],
424        [1000, 1, 3],
425    ]
426    test_styles = [
427        "all_random",
428        "rg_same",
429        "rb_same",
430        "gb_same",
431        "rgb_same",
432    ]
433    for x_shape in x_shapes:
434      for test_style in test_styles:
435        x_np = np.random.rand(*x_shape) * 255.
436        delta_h = np.random.rand() * 2.0 - 1.0
437        if test_style == "all_random":
438          pass
439        elif test_style == "rg_same":
440          x_np[..., 1] = x_np[..., 0]
441        elif test_style == "rb_same":
442          x_np[..., 2] = x_np[..., 0]
443        elif test_style == "gb_same":
444          x_np[..., 2] = x_np[..., 1]
445        elif test_style == "rgb_same":
446          x_np[..., 1] = x_np[..., 0]
447          x_np[..., 2] = x_np[..., 0]
448        else:
449          raise AssertionError("Invalid test style: %s" % (test_style))
450        y_np = self._adjustHueNp(x_np, delta_h)
451        y_tf = self._adjustHueTf(x_np, delta_h)
452        self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
453
454  def testInvalidShapes(self):
455    fused = False
456    if not fused:
457      # The tests are known to pass with the fused adjust_hue. We will enable
458      # them when the fused implementation is the default.
459      return
460    x_np = np.random.rand(2, 3) * 255.
461    delta_h = np.random.rand() * 2.0 - 1.0
462    fused = False
463    with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
464      self._adjustHueTf(x_np, delta_h)
465    x_np = np.random.rand(4, 2, 4) * 255.
466    delta_h = np.random.rand() * 2.0 - 1.0
467    with self.assertRaisesOpError("input must have 3 channels"):
468      self._adjustHueTf(x_np, delta_h)
469
470
471class FlipImageBenchmark(test.Benchmark):
472
473  def _benchmarkFlipLeftRight(self, device, cpu_count):
474    image_shape = [299, 299, 3]
475    warmup_rounds = 100
476    benchmark_rounds = 1000
477    config = config_pb2.ConfigProto()
478    if cpu_count is not None:
479      config.inter_op_parallelism_threads = 1
480      config.intra_op_parallelism_threads = cpu_count
481    with session.Session("", graph=ops.Graph(), config=config) as sess:
482      with ops.device(device):
483        inputs = variables.Variable(
484            random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
485            trainable=False,
486            dtype=dtypes.float32)
487        run_op = image_ops.flip_left_right(inputs)
488        sess.run(variables.global_variables_initializer())
489        for i in xrange(warmup_rounds + benchmark_rounds):
490          if i == warmup_rounds:
491            start = time.time()
492          sess.run(run_op)
493    end = time.time()
494    step_time = (end - start) / benchmark_rounds
495    tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
496    print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
497          (tag, step_time * 1e6))
498    self.report_benchmark(
499        name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
500        iters=benchmark_rounds,
501        wall_time=step_time)
502
503  def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
504    image_shape = [299, 299, 3]
505    warmup_rounds = 100
506    benchmark_rounds = 1000
507    config = config_pb2.ConfigProto()
508    if cpu_count is not None:
509      config.inter_op_parallelism_threads = 1
510      config.intra_op_parallelism_threads = cpu_count
511    with session.Session("", graph=ops.Graph(), config=config) as sess:
512      with ops.device(device):
513        inputs = variables.Variable(
514            random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
515            trainable=False,
516            dtype=dtypes.float32)
517        run_op = image_ops.random_flip_left_right(inputs)
518        sess.run(variables.global_variables_initializer())
519        for i in xrange(warmup_rounds + benchmark_rounds):
520          if i == warmup_rounds:
521            start = time.time()
522          sess.run(run_op)
523    end = time.time()
524    step_time = (end - start) / benchmark_rounds
525    tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
526    print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
527          (tag, step_time * 1e6))
528    self.report_benchmark(
529        name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
530        iters=benchmark_rounds,
531        wall_time=step_time)
532
533  def benchmarkFlipLeftRightCpu1(self):
534    self._benchmarkFlipLeftRight("/cpu:0", 1)
535
536  def benchmarkFlipLeftRightCpuAll(self):
537    self._benchmarkFlipLeftRight("/cpu:0", None)
538
539  def benchmarkFlipLeftRightGpu(self):
540    self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
541
542  def benchmarkRandomFlipLeftRightCpu1(self):
543    self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
544
545  def benchmarkRandomFlipLeftRightCpuAll(self):
546    self._benchmarkRandomFlipLeftRight("/cpu:0", None)
547
548  def benchmarkRandomFlipLeftRightGpu(self):
549    self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
550
551
552class AdjustHueBenchmark(test.Benchmark):
553
554  def _benchmarkAdjustHue(self, device, cpu_count):
555    image_shape = [299, 299, 3]
556    warmup_rounds = 100
557    benchmark_rounds = 1000
558    config = config_pb2.ConfigProto()
559    if cpu_count is not None:
560      config.inter_op_parallelism_threads = 1
561      config.intra_op_parallelism_threads = cpu_count
562    with session.Session("", graph=ops.Graph(), config=config) as sess:
563      with ops.device(device):
564        inputs = variables.Variable(
565            random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
566            trainable=False,
567            dtype=dtypes.float32)
568        delta = constant_op.constant(0.1, dtype=dtypes.float32)
569        outputs = image_ops.adjust_hue(inputs, delta)
570        run_op = control_flow_ops.group(outputs)
571        sess.run(variables.global_variables_initializer())
572        for i in xrange(warmup_rounds + benchmark_rounds):
573          if i == warmup_rounds:
574            start = time.time()
575          sess.run(run_op)
576    end = time.time()
577    step_time = (end - start) / benchmark_rounds
578    tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
579    print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
580          (tag, step_time * 1e6))
581    self.report_benchmark(
582        name="benchmarkAdjustHue_299_299_3_%s" % (tag),
583        iters=benchmark_rounds,
584        wall_time=step_time)
585
586  def benchmarkAdjustHueCpu1(self):
587    self._benchmarkAdjustHue("/cpu:0", 1)
588
589  def benchmarkAdjustHueCpuAll(self):
590    self._benchmarkAdjustHue("/cpu:0", None)
591
592  def benchmarkAdjustHueGpu(self):
593    self._benchmarkAdjustHue(test.gpu_device_name(), None)
594
595
596class AdjustSaturationBenchmark(test.Benchmark):
597
598  def _benchmarkAdjustSaturation(self, device, cpu_count):
599    image_shape = [299, 299, 3]
600    warmup_rounds = 100
601    benchmark_rounds = 1000
602    config = config_pb2.ConfigProto()
603    if cpu_count is not None:
604      config.inter_op_parallelism_threads = 1
605      config.intra_op_parallelism_threads = cpu_count
606    with session.Session("", graph=ops.Graph(), config=config) as sess:
607      with ops.device(device):
608        inputs = variables.Variable(
609            random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
610            trainable=False,
611            dtype=dtypes.float32)
612        delta = constant_op.constant(0.1, dtype=dtypes.float32)
613        outputs = image_ops.adjust_saturation(inputs, delta)
614        run_op = control_flow_ops.group(outputs)
615        sess.run(variables.global_variables_initializer())
616        for _ in xrange(warmup_rounds):
617          sess.run(run_op)
618        start = time.time()
619        for _ in xrange(benchmark_rounds):
620          sess.run(run_op)
621    end = time.time()
622    step_time = (end - start) / benchmark_rounds
623    tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
624    print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
625          (tag, step_time * 1e6))
626    self.report_benchmark(
627        name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
628        iters=benchmark_rounds,
629        wall_time=step_time)
630
631  def benchmarkAdjustSaturationCpu1(self):
632    self._benchmarkAdjustSaturation("/cpu:0", 1)
633
634  def benchmarkAdjustSaturationCpuAll(self):
635    self._benchmarkAdjustSaturation("/cpu:0", None)
636
637  def benchmarkAdjustSaturationGpu(self):
638    self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
639
640
641class ResizeBilinearBenchmark(test.Benchmark):
642
643  def _benchmarkResize(self, image_size, num_channels):
644    batch_size = 1
645    num_ops = 1000
646    img = variables.Variable(
647        random_ops.random_normal(
648            [batch_size, image_size[0], image_size[1], num_channels]),
649        name="img")
650
651    deps = []
652    for _ in xrange(num_ops):
653      with ops.control_dependencies(deps):
654        resize_op = image_ops.resize_bilinear(
655            img, [299, 299], align_corners=False)
656        deps = [resize_op]
657      benchmark_op = control_flow_ops.group(*deps)
658
659    with session.Session() as sess:
660      sess.run(variables.global_variables_initializer())
661      results = self.run_op_benchmark(
662          sess,
663          benchmark_op,
664          name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
665                                              num_channels)))
666      print("%s   : %.2f ms/img" %
667            (results["name"],
668             1000 * results["wall_time"] / (batch_size * num_ops)))
669
670  def benchmarkSimilar3Channel(self):
671    self._benchmarkResize((183, 229), 3)
672
673  def benchmarkScaleUp3Channel(self):
674    self._benchmarkResize((141, 186), 3)
675
676  def benchmarkScaleDown3Channel(self):
677    self._benchmarkResize((749, 603), 3)
678
679  def benchmarkSimilar1Channel(self):
680    self._benchmarkResize((183, 229), 1)
681
682  def benchmarkScaleUp1Channel(self):
683    self._benchmarkResize((141, 186), 1)
684
685  def benchmarkScaleDown1Channel(self):
686    self._benchmarkResize((749, 603), 1)
687
688
689class ResizeBicubicBenchmark(test.Benchmark):
690
691  def _benchmarkResize(self, image_size, num_channels):
692    batch_size = 1
693    num_ops = 1000
694    img = variables.Variable(
695        random_ops.random_normal(
696            [batch_size, image_size[0], image_size[1], num_channels]),
697        name="img")
698
699    deps = []
700    for _ in xrange(num_ops):
701      with ops.control_dependencies(deps):
702        resize_op = image_ops.resize_bicubic(
703            img, [299, 299], align_corners=False)
704        deps = [resize_op]
705      benchmark_op = control_flow_ops.group(*deps)
706
707    with session.Session() as sess:
708      sess.run(variables.global_variables_initializer())
709      results = self.run_op_benchmark(
710          sess,
711          benchmark_op,
712          min_iters=20,
713          name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
714                                             num_channels)))
715      print("%s   : %.2f ms/img" %
716            (results["name"],
717             1000 * results["wall_time"] / (batch_size * num_ops)))
718
719  def benchmarkSimilar3Channel(self):
720    self._benchmarkResize((183, 229), 3)
721
722  def benchmarkScaleUp3Channel(self):
723    self._benchmarkResize((141, 186), 3)
724
725  def benchmarkScaleDown3Channel(self):
726    self._benchmarkResize((749, 603), 3)
727
728  def benchmarkSimilar1Channel(self):
729    self._benchmarkResize((183, 229), 1)
730
731  def benchmarkScaleUp1Channel(self):
732    self._benchmarkResize((141, 186), 1)
733
734  def benchmarkScaleDown1Channel(self):
735    self._benchmarkResize((749, 603), 1)
736
737  def benchmarkSimilar4Channel(self):
738    self._benchmarkResize((183, 229), 4)
739
740  def benchmarkScaleUp4Channel(self):
741    self._benchmarkResize((141, 186), 4)
742
743  def benchmarkScaleDown4Channel(self):
744    self._benchmarkResize((749, 603), 4)
745
746
747class ResizeAreaBenchmark(test.Benchmark):
748
749  def _benchmarkResize(self, image_size, num_channels):
750    batch_size = 1
751    num_ops = 1000
752    img = variables.Variable(
753        random_ops.random_normal(
754            [batch_size, image_size[0], image_size[1], num_channels]),
755        name="img")
756
757    deps = []
758    for _ in xrange(num_ops):
759      with ops.control_dependencies(deps):
760        resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
761        deps = [resize_op]
762      benchmark_op = control_flow_ops.group(*deps)
763
764    with session.Session() as sess:
765      sess.run(variables.global_variables_initializer())
766      results = self.run_op_benchmark(
767          sess,
768          benchmark_op,
769          name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
770                                          num_channels)))
771      print("%s   : %.2f ms/img" %
772            (results["name"],
773             1000 * results["wall_time"] / (batch_size * num_ops)))
774
775  def benchmarkSimilar3Channel(self):
776    self._benchmarkResize((183, 229), 3)
777
778  def benchmarkScaleUp3Channel(self):
779    self._benchmarkResize((141, 186), 3)
780
781  def benchmarkScaleDown3Channel(self):
782    self._benchmarkResize((749, 603), 3)
783
784  def benchmarkSimilar1Channel(self):
785    self._benchmarkResize((183, 229), 1)
786
787  def benchmarkScaleUp1Channel(self):
788    self._benchmarkResize((141, 186), 1)
789
790  def benchmarkScaleDown1Channel(self):
791    self._benchmarkResize((749, 603), 1)
792
793
794class AdjustSaturationTest(test_util.TensorFlowTestCase):
795
796  def testHalfSaturation(self):
797    x_shape = [2, 2, 3]
798    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
799    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
800
801    saturation_factor = 0.5
802    y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
803    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
804
805    with self.test_session(use_gpu=True):
806      x = constant_op.constant(x_np, shape=x_shape)
807      y = image_ops.adjust_saturation(x, saturation_factor)
808      y_tf = y.eval()
809      self.assertAllEqual(y_tf, y_np)
810
811  def testTwiceSaturation(self):
812    x_shape = [2, 2, 3]
813    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
814    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
815
816    saturation_factor = 2.0
817    y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
818    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
819
820    with self.test_session(use_gpu=True):
821      x = constant_op.constant(x_np, shape=x_shape)
822      y = image_ops.adjust_saturation(x, saturation_factor)
823      y_tf = y.eval()
824      self.assertAllEqual(y_tf, y_np)
825
826  def testBatchSaturation(self):
827    x_shape = [2, 1, 2, 3]
828    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
829    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
830
831    saturation_factor = 0.5
832    y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
833    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
834
835    with self.test_session(use_gpu=True):
836      x = constant_op.constant(x_np, shape=x_shape)
837      y = image_ops.adjust_saturation(x, saturation_factor)
838      y_tf = y.eval()
839      self.assertAllEqual(y_tf, y_np)
840
841  def _adjust_saturation(self, image, saturation_factor):
842    image = ops.convert_to_tensor(image, name="image")
843    orig_dtype = image.dtype
844    flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
845    saturation_adjusted_image = gen_image_ops.adjust_saturation(
846        flt_image, saturation_factor)
847    return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
848
849  def testHalfSaturationFused(self):
850    x_shape = [2, 2, 3]
851    x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
852    x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
853
854    saturation_factor = 0.5
855    y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
856    y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
857
858    with self.test_session(use_gpu=True):
859      x = constant_op.constant(x_np, shape=x_shape)
860      y = self._adjust_saturation(x, saturation_factor)
861      y_tf = y.eval()
862      self.assertAllEqual(y_tf, y_np)
863
864  def testTwiceSaturationFused(self):
865    x_shape = [2, 2, 3]
866    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
867    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
868
869    saturation_factor = 2.0
870    y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
871    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
872
873    with self.test_session(use_gpu=True):
874      x = constant_op.constant(x_np, shape=x_shape)
875      y = self._adjust_saturation(x, saturation_factor)
876      y_tf = y.eval()
877      self.assertAllEqual(y_tf, y_np)
878
879  def _adjustSaturationNp(self, x_np, scale):
880    self.assertEqual(x_np.shape[-1], 3)
881    x_v = x_np.reshape([-1, 3])
882    y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
883    channel_count = x_v.shape[0]
884    for i in xrange(channel_count):
885      r = x_v[i][0]
886      g = x_v[i][1]
887      b = x_v[i][2]
888      h, s, v = colorsys.rgb_to_hsv(r, g, b)
889      s *= scale
890      s = min(1.0, max(0.0, s))
891      r, g, b = colorsys.hsv_to_rgb(h, s, v)
892      y_v[i][0] = r
893      y_v[i][1] = g
894      y_v[i][2] = b
895    return y_v.reshape(x_np.shape)
896
897  def testAdjustRandomSaturation(self):
898    x_shapes = [
899        [2, 2, 3],
900        [4, 2, 3],
901        [2, 4, 3],
902        [2, 5, 3],
903        [1000, 1, 3],
904    ]
905    test_styles = [
906        "all_random",
907        "rg_same",
908        "rb_same",
909        "gb_same",
910        "rgb_same",
911    ]
912    with self.test_session(use_gpu=True):
913      for x_shape in x_shapes:
914        for test_style in test_styles:
915          x_np = np.random.rand(*x_shape) * 255.
916          scale = np.random.rand()
917          if test_style == "all_random":
918            pass
919          elif test_style == "rg_same":
920            x_np[..., 1] = x_np[..., 0]
921          elif test_style == "rb_same":
922            x_np[..., 2] = x_np[..., 0]
923          elif test_style == "gb_same":
924            x_np[..., 2] = x_np[..., 1]
925          elif test_style == "rgb_same":
926            x_np[..., 1] = x_np[..., 0]
927            x_np[..., 2] = x_np[..., 0]
928          else:
929            raise AssertionError("Invalid test style: %s" % (test_style))
930          y_baseline = self._adjustSaturationNp(x_np, scale)
931          y_fused = self._adjust_saturation(x_np, scale).eval()
932          self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
933
934
935class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
936
937  def testInvolutionLeftRight(self):
938    x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
939    with self.test_session(use_gpu=True):
940      x_tf = constant_op.constant(x_np, shape=x_np.shape)
941      y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
942      y_tf = y.eval()
943      self.assertAllEqual(y_tf, x_np)
944
945  def testInvolutionLeftRightWithBatch(self):
946    x_np = np.array([[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
947                    dtype=np.uint8).reshape([2, 2, 3, 1])
948    with self.test_session(use_gpu=True):
949      x_tf = constant_op.constant(x_np, shape=x_np.shape)
950      y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
951      y_tf = y.eval()
952      self.assertAllEqual(y_tf, x_np)
953
954  def testLeftRight(self):
955    x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
956    y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
957
958    with self.test_session(use_gpu=True):
959      x_tf = constant_op.constant(x_np, shape=x_np.shape)
960      y = image_ops.flip_left_right(x_tf)
961      self.assertTrue(y.op.name.startswith("flip_left_right"))
962      y_tf = y.eval()
963      self.assertAllEqual(y_tf, y_np)
964
965  def testLeftRightWithBatch(self):
966    x_np = np.array([[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
967                    dtype=np.uint8).reshape([2, 2, 3, 1])
968    y_np = np.array([[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
969                    dtype=np.uint8).reshape([2, 2, 3, 1])
970
971    with self.test_session(use_gpu=True):
972      x_tf = constant_op.constant(x_np, shape=x_np.shape)
973      y = image_ops.flip_left_right(x_tf)
974      y_tf = y.eval()
975      self.assertAllEqual(y_tf, y_np)
976
977
978  def testRandomFlipLeftRight(self):
979    x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
980    y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
981    seed = 42
982
983    with self.test_session(use_gpu=True):
984      x_tf = constant_op.constant(x_np, shape=x_np.shape)
985      y = image_ops.random_flip_left_right(x_tf)
986      self.assertTrue(y.op.name.startswith("random_flip_left_right"))
987
988      count_flipped = 0
989      count_unflipped = 0
990      for _ in range(100):
991        y_tf = y.eval()
992        if y_tf[0][0] == 1:
993          self.assertAllEqual(y_tf, x_np)
994          count_unflipped += 1
995        else:
996          self.assertAllEqual(y_tf, y_np)
997          count_flipped += 1
998
999      # 100 trials
1000      # Mean: 50
1001      # Std Dev: ~5
1002      # Six Sigma: 50 - (5 * 6) = 20
1003      self.assertGreaterEqual(count_flipped, 20)
1004      self.assertGreaterEqual(count_unflipped, 20)
1005
1006  def testInvolutionUpDown(self):
1007    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
1008
1009    with self.test_session(use_gpu=True):
1010      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1011      y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
1012      y_tf = y.eval()
1013      self.assertAllEqual(y_tf, x_np)
1014
1015  def testInvolutionUpDownWithBatch(self):
1016    x_np = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
1017                    dtype=np.uint8).reshape([2, 2, 3, 1])
1018
1019    with self.test_session(use_gpu=True):
1020      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1021      y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
1022      y_tf = y.eval()
1023      self.assertAllEqual(y_tf, x_np)
1024
1025  def testUpDown(self):
1026    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
1027    y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
1028
1029    with self.test_session(use_gpu=True):
1030      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1031      y = image_ops.flip_up_down(x_tf)
1032      self.assertTrue(y.op.name.startswith("flip_up_down"))
1033      y_tf = y.eval()
1034      self.assertAllEqual(y_tf, y_np)
1035
1036  def testUpDownWithBatch(self):
1037    x_np = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
1038                    dtype=np.uint8).reshape([2, 2, 3, 1])
1039    y_np = np.array([[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
1040                    dtype=np.uint8).reshape([2, 2, 3, 1])
1041
1042    with self.test_session(use_gpu=True):
1043      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1044      y = image_ops.flip_up_down(x_tf)
1045      y_tf = y.eval()
1046      self.assertAllEqual(y_tf, y_np)
1047
1048  def testRandomFlipUpDown(self):
1049    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
1050    y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
1051
1052    with self.test_session(use_gpu=True):
1053      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1054      y = image_ops.random_flip_up_down(x_tf, seed=42)
1055      self.assertTrue(y.op.name.startswith("random_flip_up_down"))
1056      count_flipped = 0
1057      count_unflipped = 0
1058      for _ in range(100):
1059        y_tf = y.eval()
1060        if y_tf[0][0] == 1:
1061          self.assertAllEqual(y_tf, x_np)
1062          count_unflipped += 1
1063        else:
1064          self.assertAllEqual(y_tf, y_np)
1065          count_flipped += 1
1066
1067      # 100 trials
1068      # Mean: 50
1069      # Std Dev: ~5
1070      # Six Sigma: 50 - (5 * 6) = 20
1071      self.assertGreaterEqual(count_flipped, 20)
1072      self.assertGreaterEqual(count_unflipped, 20)
1073
1074  def testInvolutionTranspose(self):
1075    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
1076
1077    with self.test_session(use_gpu=True):
1078      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1079      y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
1080      y_tf = y.eval()
1081      self.assertAllEqual(y_tf, x_np)
1082
1083  def testInvolutionTransposeWithBatch(self):
1084    x_np = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
1085                    dtype=np.uint8).reshape([2, 2, 3, 1])
1086
1087    with self.test_session(use_gpu=True):
1088      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1089      y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
1090      y_tf = y.eval()
1091      self.assertAllEqual(y_tf, x_np)
1092
1093  def testTranspose(self):
1094    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
1095    y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
1096
1097    with self.test_session(use_gpu=True):
1098      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1099      y = image_ops.transpose_image(x_tf)
1100      self.assertTrue(y.op.name.startswith("transpose_image"))
1101      y_tf = y.eval()
1102      self.assertAllEqual(y_tf, y_np)
1103
1104  def testTransposeWithBatch(self):
1105    x_np = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
1106                    dtype=np.uint8).reshape([2, 2, 3, 1])
1107
1108    y_np = np.array([[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
1109                    dtype=np.uint8).reshape([2, 3, 2, 1])
1110
1111    with self.test_session(use_gpu=True):
1112      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1113      y = image_ops.transpose_image(x_tf)
1114      y_tf = y.eval()
1115      self.assertAllEqual(y_tf, y_np)
1116
1117  def testPartialShapes(self):
1118    p_unknown_rank = array_ops.placeholder(dtypes.uint8)
1119    p_unknown_dims_3 = array_ops.placeholder(
1120        dtypes.uint8, shape=[None, None, None])
1121    p_unknown_dims_4 = array_ops.placeholder(
1122        dtypes.uint8, shape=[None, None, None, None])
1123    p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
1124    p_unknown_batch = array_ops.placeholder(dtypes.uint8,
1125                                            shape=[None, 64, 64, 3])
1126    p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
1127    p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
1128
1129    #Ops that support 3D input
1130    for op in [
1131        image_ops.flip_left_right, image_ops.flip_up_down,
1132        image_ops.random_flip_left_right, image_ops.random_flip_up_down,
1133        image_ops.transpose_image, image_ops.rot90
1134    ]:
1135      transformed_unknown_rank = op(p_unknown_rank)
1136      self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
1137      transformed_unknown_dims_3 = op(p_unknown_dims_3)
1138      self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
1139      transformed_unknown_width = op(p_unknown_width)
1140      self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
1141
1142      with self.assertRaisesRegexp(ValueError, "must be > 0"):
1143        op(p_zero_dim)
1144
1145    #Ops that support 4D input
1146    for op in [
1147        image_ops.flip_left_right, image_ops.flip_up_down,
1148        image_ops.transpose_image, image_ops.rot90
1149    ]:
1150      transformed_unknown_dims_4 = op(p_unknown_dims_4)
1151      self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
1152      transformed_unknown_batch = op(p_unknown_batch)
1153      self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
1154      with self.assertRaisesRegexp(ValueError,
1155                                   "must be at least three-dimensional"):
1156        op(p_wrong_rank)
1157
1158    for op in [
1159        image_ops.random_flip_left_right, image_ops.random_flip_up_down,
1160    ]:
1161      with self.assertRaisesRegexp(ValueError, "must be three-dimensional"):
1162        op(p_wrong_rank)
1163
1164
1165  def testRot90GroupOrder(self):
1166    image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
1167    with self.test_session(use_gpu=True):
1168      rotated = image
1169      for _ in xrange(4):
1170        rotated = image_ops.rot90(rotated)
1171      self.assertAllEqual(image, rotated.eval())
1172
1173  def testRot90GroupOrderWithBatch(self):
1174    image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
1175    with self.test_session(use_gpu=True):
1176      rotated = image
1177      for _ in xrange(4):
1178        rotated = image_ops.rot90(rotated)
1179      self.assertAllEqual(image, rotated.eval())
1180
1181  def testRot90NumpyEquivalence(self):
1182    image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
1183    with self.test_session(use_gpu=True):
1184      k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
1185      y_tf = image_ops.rot90(image, k_placeholder)
1186      for k in xrange(4):
1187        y_np = np.rot90(image, k=k)
1188        self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
1189
1190  def testRot90NumpyEquivalenceWithBatch(self):
1191    image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
1192    with self.test_session(use_gpu=True):
1193      k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
1194      y_tf = image_ops.rot90(image, k_placeholder)
1195      for k in xrange(4):
1196        y_np = np.rot90(image, k=k, axes=(1, 2))
1197        self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
1198
1199class RandomFlipTest(test_util.TensorFlowTestCase):
1200
1201  def testRandomLeftRight(self):
1202    x_np = np.array([0, 1], dtype=np.uint8).reshape([1, 2, 1])
1203    num_iterations = 500
1204
1205    hist = [0, 0]
1206    with self.test_session(use_gpu=True):
1207      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1208      y = image_ops.random_flip_left_right(x_tf)
1209      for _ in xrange(num_iterations):
1210        y_np = y.eval().flatten()[0]
1211        hist[y_np] += 1
1212
1213    # Ensure that each entry is observed within 4 standard deviations.
1214    four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
1215    self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
1216
1217  def testRandomUpDown(self):
1218    x_np = np.array([0, 1], dtype=np.uint8).reshape([2, 1, 1])
1219    num_iterations = 500
1220
1221    hist = [0, 0]
1222    with self.test_session(use_gpu=True):
1223      x_tf = constant_op.constant(x_np, shape=x_np.shape)
1224      y = image_ops.random_flip_up_down(x_tf)
1225      for _ in xrange(num_iterations):
1226        y_np = y.eval().flatten()[0]
1227        hist[y_np] += 1
1228
1229    # Ensure that each entry is observed within 4 standard deviations.
1230    four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
1231    self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
1232
1233
1234class AdjustContrastTest(test_util.TensorFlowTestCase):
1235
1236  def _testContrast(self, x_np, y_np, contrast_factor):
1237    with self.test_session(use_gpu=True):
1238      x = constant_op.constant(x_np, shape=x_np.shape)
1239      y = image_ops.adjust_contrast(x, contrast_factor)
1240      y_tf = y.eval()
1241      self.assertAllClose(y_tf, y_np, 1e-6)
1242
1243  def testDoubleContrastUint8(self):
1244    x_shape = [1, 2, 2, 3]
1245    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
1246    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
1247
1248    y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
1249    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
1250
1251    self._testContrast(x_np, y_np, contrast_factor=2.0)
1252
1253  def testDoubleContrastFloat(self):
1254    x_shape = [1, 2, 2, 3]
1255    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
1256    x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
1257
1258    y_data = [
1259        -45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
1260        134.75, 409.25, -116.5
1261    ]
1262    y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
1263
1264    self._testContrast(x_np, y_np, contrast_factor=2.0)
1265
1266  def testHalfContrastUint8(self):
1267    x_shape = [1, 2, 2, 3]
1268    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
1269    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
1270
1271    y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
1272    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
1273
1274    self._testContrast(x_np, y_np, contrast_factor=0.5)
1275
1276  def testBatchDoubleContrast(self):
1277    x_shape = [2, 1, 2, 3]
1278    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
1279    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
1280
1281    y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
1282    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
1283
1284    self._testContrast(x_np, y_np, contrast_factor=2.0)
1285
1286  def _adjustContrastNp(self, x_np, contrast_factor):
1287    mean = np.mean(x_np, (1, 2), keepdims=True)
1288    y_np = mean + contrast_factor * (x_np - mean)
1289    return y_np
1290
1291  def _adjustContrastTf(self, x_np, contrast_factor):
1292    with self.test_session(use_gpu=True):
1293      x = constant_op.constant(x_np)
1294      y = image_ops.adjust_contrast(x, contrast_factor)
1295      y_tf = y.eval()
1296    return y_tf
1297
1298  def testRandomContrast(self):
1299    x_shapes = [
1300        [1, 2, 2, 3],
1301        [2, 1, 2, 3],
1302        [1, 2, 2, 3],
1303        [2, 5, 5, 3],
1304        [2, 1, 1, 3],
1305    ]
1306    for x_shape in x_shapes:
1307      x_np = np.random.rand(*x_shape) * 255.
1308      contrast_factor = np.random.rand() * 2.0 + 0.1
1309      y_np = self._adjustContrastNp(x_np, contrast_factor)
1310      y_tf = self._adjustContrastTf(x_np, contrast_factor)
1311      self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
1312
1313
1314class AdjustBrightnessTest(test_util.TensorFlowTestCase):
1315
1316  def _testBrightness(self, x_np, y_np, delta):
1317    with self.test_session(use_gpu=True):
1318      x = constant_op.constant(x_np, shape=x_np.shape)
1319      y = image_ops.adjust_brightness(x, delta)
1320      y_tf = y.eval()
1321      self.assertAllClose(y_tf, y_np, 1e-6)
1322
1323  def testPositiveDeltaUint8(self):
1324    x_shape = [2, 2, 3]
1325    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
1326    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
1327
1328    y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
1329    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
1330
1331    self._testBrightness(x_np, y_np, delta=10. / 255.)
1332
1333  def testPositiveDeltaFloat(self):
1334    x_shape = [2, 2, 3]
1335    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
1336    x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
1337
1338    y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
1339    y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
1340
1341    self._testBrightness(x_np, y_np, delta=10. / 255.)
1342
1343  def testNegativeDelta(self):
1344    x_shape = [2, 2, 3]
1345    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
1346    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
1347
1348    y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
1349    y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
1350
1351    self._testBrightness(x_np, y_np, delta=-10. / 255.)
1352
1353
1354class PerImageWhiteningTest(test_util.TensorFlowTestCase):
1355
1356  def _NumpyPerImageWhitening(self, x):
1357    num_pixels = np.prod(x.shape)
1358    x2 = np.square(x).astype(np.float32)
1359    mn = np.mean(x)
1360    vr = np.mean(x2) - (mn * mn)
1361    stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))
1362
1363    y = x.astype(np.float32)
1364    y -= mn
1365    y /= stddev
1366    return y
1367
1368  def testBasic(self):
1369    x_shape = [13, 9, 3]
1370    x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
1371    y_np = self._NumpyPerImageWhitening(x_np)
1372
1373    with self.test_session(use_gpu=True):
1374      x = constant_op.constant(x_np, shape=x_shape)
1375      y = image_ops.per_image_standardization(x)
1376      self.assertTrue(y.op.name.startswith("per_image_standardization"))
1377      y_tf = y.eval()
1378      self.assertAllClose(y_tf, y_np, atol=1e-4)
1379
1380  def testUniformImage(self):
1381    im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
1382    im = constant_op.constant(im_np)
1383    whiten = image_ops.per_image_standardization(im)
1384    with self.test_session(use_gpu=True):
1385      whiten_np = whiten.eval()
1386      self.assertFalse(np.any(np.isnan(whiten_np)))
1387
1388
1389class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
1390
1391  def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
1392                         target_width, use_tensor_inputs):
1393    if use_tensor_inputs:
1394      offset_height = ops.convert_to_tensor(offset_height)
1395      offset_width = ops.convert_to_tensor(offset_width)
1396      target_height = ops.convert_to_tensor(target_height)
1397      target_width = ops.convert_to_tensor(target_width)
1398      x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
1399      feed_dict = {x_tensor: x}
1400    else:
1401      x_tensor = x
1402      feed_dict = {}
1403
1404    y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
1405                                       target_height, target_width)
1406    if not use_tensor_inputs:
1407      self.assertTrue(y.get_shape().is_fully_defined())
1408
1409    with self.test_session(use_gpu=True):
1410      return y.eval(feed_dict=feed_dict)
1411
1412  def _assertReturns(self,
1413                     x,
1414                     x_shape,
1415                     offset_height,
1416                     offset_width,
1417                     y,
1418                     y_shape,
1419                     use_tensor_inputs_options=None):
1420    use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
1421    target_height, target_width, _ = y_shape
1422    x = np.array(x).reshape(x_shape)
1423    y = np.array(y).reshape(y_shape)
1424
1425    for use_tensor_inputs in use_tensor_inputs_options:
1426      y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
1427                                     target_height, target_width,
1428                                     use_tensor_inputs)
1429      self.assertAllClose(y, y_tf)
1430
1431  def _assertRaises(self,
1432                    x,
1433                    x_shape,
1434                    offset_height,
1435                    offset_width,
1436                    target_height,
1437                    target_width,
1438                    err_msg,
1439                    use_tensor_inputs_options=None):
1440    use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
1441    x = np.array(x).reshape(x_shape)
1442
1443    for use_tensor_inputs in use_tensor_inputs_options:
1444      try:
1445        self._CropToBoundingBox(x, offset_height, offset_width, target_height,
1446                                target_width, use_tensor_inputs)
1447      except Exception as e:
1448        if err_msg not in str(e):
1449          raise
1450      else:
1451        raise AssertionError("Exception not raised: %s" % err_msg)
1452
1453  def _assertShapeInference(self, pre_shape, height, width, post_shape):
1454    image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
1455    y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
1456    self.assertEqual(y.get_shape().as_list(), post_shape)
1457
1458  def testNoOp(self):
1459    x_shape = [10, 10, 10]
1460    x = np.random.uniform(size=x_shape)
1461    self._assertReturns(x, x_shape, 0, 0, x, x_shape)
1462
1463  def testCrop(self):
1464    x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
1465    x_shape = [3, 3, 1]
1466
1467    offset_height, offset_width = [1, 0]
1468    y_shape = [2, 3, 1]
1469    y = [4, 5, 6, 7, 8, 9]
1470    self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
1471
1472    offset_height, offset_width = [0, 1]
1473    y_shape = [3, 2, 1]
1474    y = [2, 3, 5, 6, 8, 9]
1475    self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
1476
1477    offset_height, offset_width = [0, 0]
1478    y_shape = [2, 3, 1]
1479    y = [1, 2, 3, 4, 5, 6]
1480    self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
1481
1482    offset_height, offset_width = [0, 0]
1483    y_shape = [3, 2, 1]
1484    y = [1, 2, 4, 5, 7, 8]
1485    self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
1486
1487  def testShapeInference(self):
1488    self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
1489    self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
1490    self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
1491    self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
1492    self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
1493    self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
1494    self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
1495    self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
1496    self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
1497    self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
1498    self._assertShapeInference(None, 55, 66, [55, 66, None])
1499
1500  def testNon3DInput(self):
1501    # Input image is not 3D
1502    x = [0] * 15
1503    offset_height, offset_width = [0, 0]
1504    target_height, target_width = [2, 2]
1505
1506    for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
1507      self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
1508                         target_width,
1509                         "'image' must have either 3 or 4 dimensions.")
1510
1511  def testZeroLengthInput(self):
1512    # Input image has 0-length dimension(s).
1513    # Each line is a test configuration:
1514    #   x_shape, target_height, target_width
1515    test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
1516                   ([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
1517    offset_height, offset_width = [0, 0]
1518    x = []
1519
1520    for x_shape, target_height, target_width in test_config:
1521      self._assertRaises(
1522          x,
1523          x_shape,
1524          offset_height,
1525          offset_width,
1526          target_height,
1527          target_width,
1528          "all dims of 'image.shape' must be > 0",
1529          use_tensor_inputs_options=[False])
1530      # Multiple assertion could fail, but the evaluation order is arbitrary.
1531      # Match gainst generic pattern.
1532      self._assertRaises(
1533          x,
1534          x_shape,
1535          offset_height,
1536          offset_width,
1537          target_height,
1538          target_width,
1539          "assertion failed:",
1540          use_tensor_inputs_options=[True])
1541
1542  def testBadParams(self):
1543    x_shape = [4, 4, 1]
1544    x = np.zeros(x_shape)
1545
1546    # Each line is a test configuration:
1547    #   (offset_height, offset_width, target_height, target_width), err_msg
1548    test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"), ([
1549        0, -1, 3, 3
1550    ], "offset_width must be >= 0"), ([0, 0, 0, 3],
1551                                      "target_height must be > 0"),
1552                   ([0, 0, 3, 0], "target_width must be > 0"),
1553                   ([2, 0, 3, 3], "height must be >= target + offset"),
1554                   ([0, 2, 3, 3], "width must be >= target + offset"))
1555
1556    for params, err_msg in test_config:
1557      self._assertRaises(x, x_shape, *params, err_msg=err_msg)
1558
1559  def testNameScope(self):
1560    image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
1561    y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
1562    self.assertTrue(y.name.startswith("crop_to_bounding_box"))
1563
1564
1565class CentralCropTest(test_util.TensorFlowTestCase):
1566
1567  def _assertShapeInference(self, pre_shape, fraction, post_shape):
1568    image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
1569    y = image_ops.central_crop(image, fraction)
1570    if post_shape is None:
1571      self.assertEqual(y.get_shape().dims, None)
1572    else:
1573      self.assertEqual(y.get_shape().as_list(), post_shape)
1574
1575  def testNoOp(self):
1576    x_shape = [13, 9, 3]
1577    x_np = np.ones(x_shape, dtype=np.float32)
1578    with self.test_session(use_gpu=True):
1579      x = constant_op.constant(x_np, shape=x_shape)
1580      y = image_ops.central_crop(x, 1.0)
1581      y_tf = y.eval()
1582      self.assertAllEqual(y_tf, x_np)
1583      self.assertEqual(y.op.name, x.op.name)
1584
1585  def testCropping(self):
1586    x_shape = [4, 8, 1]
1587    x_np = np.array(
1588        [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
1589         [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
1590        dtype=np.int32).reshape(x_shape)
1591    y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
1592    with self.test_session(use_gpu=True):
1593      x = constant_op.constant(x_np, shape=x_shape)
1594      y = image_ops.central_crop(x, 0.5)
1595      y_tf = y.eval()
1596      self.assertAllEqual(y_tf, y_np)
1597      self.assertAllEqual(y_tf.shape, y_np.shape)
1598
1599  def testCropping2(self):
1600    # Test case for 10315
1601    x_shape = [240, 320, 3]
1602    x_np = np.zeros(x_shape, dtype=np.int32)
1603    y_np = np.zeros([80, 106, 3], dtype=np.int32)
1604    with self.test_session(use_gpu=True):
1605      x = array_ops.placeholder(shape=x_shape, dtype=dtypes.int32)
1606      y = image_ops.central_crop(x, 0.33)
1607      y_tf = y.eval(feed_dict={x: x_np})
1608      self.assertAllEqual(y_tf, y_np)
1609      self.assertAllEqual(y_tf.shape, y_np.shape)
1610
1611  def testShapeInference(self):
1612    # Test no-op fraction=1.0
1613    self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
1614    self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
1615    self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
1616    self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
1617    self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
1618    self._assertShapeInference([None, None, None], 1.0, [None, None, None])
1619    self._assertShapeInference(None, 1.0, None)
1620    # TODO(toddw): Currently central_crop() doesn't infer the result shape even
1621    # when it's possible.  If we change it to do so, we can test as follows:
1622    #
1623    # self._assertShapeInference([50, 60, 3], 0.5, [25, 30, 3])
1624    # self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
1625    # self._assertShapeInference([50, None, 3], 0.5, [25, None, 3])
1626    # self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
1627    # self._assertShapeInference([50, 60, None], 0.5, [25, 30, None])
1628    # self._assertShapeInference([None, None, None], 0.5, [None, None, None])
1629    # self._assertShapeInference(None, 0.5, None)
1630
1631  def testError(self):
1632    x_shape = [13, 9, 3]
1633    x_np = np.ones(x_shape, dtype=np.float32)
1634    with self.test_session(use_gpu=True):
1635      x = constant_op.constant(x_np, shape=x_shape)
1636      with self.assertRaises(ValueError):
1637        _ = image_ops.central_crop(x, 0.0)
1638      with self.assertRaises(ValueError):
1639        _ = image_ops.central_crop(x, 1.01)
1640
1641  def testNameScope(self):
1642    x_shape = [13, 9, 3]
1643    x_np = np.ones(x_shape, dtype=np.float32)
1644    with self.test_session(use_gpu=True):
1645      y = image_ops.central_crop(x_np, 1.0)
1646      self.assertTrue(y.op.name.startswith("central_crop"))
1647
1648
1649class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
1650
1651  def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
1652                        target_width, use_tensor_inputs):
1653    if use_tensor_inputs:
1654      offset_height = ops.convert_to_tensor(offset_height)
1655      offset_width = ops.convert_to_tensor(offset_width)
1656      target_height = ops.convert_to_tensor(target_height)
1657      target_width = ops.convert_to_tensor(target_width)
1658      x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
1659      feed_dict = {x_tensor: x}
1660    else:
1661      x_tensor = x
1662      feed_dict = {}
1663
1664    y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
1665                                      target_height, target_width)
1666    if not use_tensor_inputs:
1667      self.assertTrue(y.get_shape().is_fully_defined())
1668
1669    with self.test_session(use_gpu=True):
1670      return y.eval(feed_dict=feed_dict)
1671
1672  def _assertReturns(self,
1673                     x,
1674                     x_shape,
1675                     offset_height,
1676                     offset_width,
1677                     y,
1678                     y_shape,
1679                     use_tensor_inputs_options=None):
1680    use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
1681    target_height, target_width, _ = y_shape
1682    x = np.array(x).reshape(x_shape)
1683    y = np.array(y).reshape(y_shape)
1684
1685    for use_tensor_inputs in use_tensor_inputs_options:
1686      y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
1687                                    target_height, target_width,
1688                                    use_tensor_inputs)
1689      self.assertAllClose(y, y_tf)
1690
1691  def _assertRaises(self,
1692                    x,
1693                    x_shape,
1694                    offset_height,
1695                    offset_width,
1696                    target_height,
1697                    target_width,
1698                    err_msg,
1699                    use_tensor_inputs_options=None):
1700    use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
1701    x = np.array(x).reshape(x_shape)
1702
1703    for use_tensor_inputs in use_tensor_inputs_options:
1704      try:
1705        self._PadToBoundingBox(x, offset_height, offset_width, target_height,
1706                               target_width, use_tensor_inputs)
1707      except Exception as e:
1708        if err_msg not in str(e):
1709          raise
1710      else:
1711        raise AssertionError("Exception not raised: %s" % err_msg)
1712
1713  def _assertShapeInference(self, pre_shape, height, width, post_shape):
1714    image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
1715    y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
1716    self.assertEqual(y.get_shape().as_list(), post_shape)
1717
1718  def testInt64(self):
1719    x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
1720    x_shape = [3, 3, 1]
1721
1722    y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
1723    y_shape = [4, 3, 1]
1724    x = np.array(x).reshape(x_shape)
1725    y = np.array(y).reshape(y_shape)
1726
1727    i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
1728    y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
1729    with self.test_session(use_gpu=True):
1730      self.assertAllClose(y, y_tf.eval())
1731
1732  def testNoOp(self):
1733    x_shape = [10, 10, 10]
1734    x = np.random.uniform(size=x_shape)
1735    offset_height, offset_width = [0, 0]
1736    self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
1737
1738  def testPadding(self):
1739    x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
1740    x_shape = [3, 3, 1]
1741
1742    offset_height, offset_width = [1, 0]
1743    y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
1744    y_shape = [4, 3, 1]
1745    self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
1746
1747    offset_height, offset_width = [0, 1]
1748    y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
1749    y_shape = [3, 4, 1]
1750    self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
1751
1752    offset_height, offset_width = [0, 0]
1753    y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
1754    y_shape = [4, 3, 1]
1755    self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
1756
1757    offset_height, offset_width = [0, 0]
1758    y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
1759    y_shape = [3, 4, 1]
1760    self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
1761
1762  def testShapeInference(self):
1763    self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
1764    self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
1765    self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
1766    self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
1767    self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
1768    self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
1769    self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
1770    self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
1771    self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
1772    self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
1773    self._assertShapeInference(None, 55, 66, [55, 66, None])
1774
1775  def testNon3DInput(self):
1776    # Input image is not 3D
1777    x = [0] * 15
1778    offset_height, offset_width = [0, 0]
1779    target_height, target_width = [2, 2]
1780
1781    for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
1782      self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
1783                         target_width,
1784                         "'image' must have either 3 or 4 dimensions.")
1785
1786  def testZeroLengthInput(self):
1787    # Input image has 0-length dimension(s).
1788    # Each line is a test configuration:
1789    #   x_shape, target_height, target_width
1790    test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
1791    offset_height, offset_width = [0, 0]
1792    x = []
1793
1794    for x_shape, target_height, target_width in test_config:
1795      self._assertRaises(
1796          x,
1797          x_shape,
1798          offset_height,
1799          offset_width,
1800          target_height,
1801          target_width,
1802          "all dims of 'image.shape' must be > 0",
1803          use_tensor_inputs_options=[False])
1804
1805      # The orignal error message does not contain back slashes. However, they
1806      # are added by either the assert op or the runtime. If this behavior
1807      # changes in the future, the match string will also needs to be changed.
1808      self._assertRaises(
1809          x,
1810          x_shape,
1811          offset_height,
1812          offset_width,
1813          target_height,
1814          target_width,
1815          "all dims of \\'image.shape\\' must be > 0",
1816          use_tensor_inputs_options=[True])
1817
1818  def testBadParams(self):
1819    x_shape = [3, 3, 1]
1820    x = np.zeros(x_shape)
1821
1822    # Each line is a test configuration:
1823    #   offset_height, offset_width, target_height, target_width, err_msg
1824    test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"),
1825                   (0, -1, 4, 4, "offset_width must be >= 0"),
1826                   (2, 0, 4, 4, "height must be <= target - offset"),
1827                   (0, 2, 4, 4, "width must be <= target - offset"))
1828
1829    for config_item in test_config:
1830      self._assertRaises(x, x_shape, *config_item)
1831
1832  def testNameScope(self):
1833    image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
1834    y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
1835    self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
1836
1837
1838class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
1839
1840  def _testSampleDistortedBoundingBox(self, image, bounding_box,
1841                                      min_object_covered, aspect_ratio_range,
1842                                      area_range):
1843    original_area = float(np.prod(image.shape))
1844    bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
1845                              (bounding_box[2] - bounding_box[0]))
1846
1847    image_size_np = np.array(image.shape, dtype=np.int32)
1848    bounding_box_np = (
1849        np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
1850
1851    aspect_ratios = []
1852    area_ratios = []
1853
1854    fraction_object_covered = []
1855
1856    num_iter = 1000
1857    with self.test_session(use_gpu=True):
1858      image_tf = constant_op.constant(image, shape=image.shape)
1859      image_size_tf = constant_op.constant(
1860          image_size_np, shape=image_size_np.shape)
1861      bounding_box_tf = constant_op.constant(
1862          bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
1863
1864      begin, size, _ = image_ops.sample_distorted_bounding_box(
1865          image_size=image_size_tf,
1866          bounding_boxes=bounding_box_tf,
1867          min_object_covered=min_object_covered,
1868          aspect_ratio_range=aspect_ratio_range,
1869          area_range=area_range)
1870      y = array_ops.strided_slice(image_tf, begin, begin + size)
1871
1872      for _ in xrange(num_iter):
1873        y_tf = y.eval()
1874        crop_height = y_tf.shape[0]
1875        crop_width = y_tf.shape[1]
1876        aspect_ratio = float(crop_width) / float(crop_height)
1877        area = float(crop_width * crop_height)
1878
1879        aspect_ratios.append(aspect_ratio)
1880        area_ratios.append(area / original_area)
1881        fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
1882
1883      # min_object_covered as tensor
1884      min_object_covered_placeholder = array_ops.placeholder(dtypes.float32)
1885      begin, size, _ = image_ops.sample_distorted_bounding_box(
1886          image_size=image_size_tf,
1887          bounding_boxes=bounding_box_tf,
1888          min_object_covered=min_object_covered_placeholder,
1889          aspect_ratio_range=aspect_ratio_range,
1890          area_range=area_range)
1891      y = array_ops.strided_slice(image_tf, begin, begin + size)
1892
1893      for _ in xrange(num_iter):
1894        y_tf = y.eval(feed_dict={
1895            min_object_covered_placeholder: min_object_covered
1896        })
1897        crop_height = y_tf.shape[0]
1898        crop_width = y_tf.shape[1]
1899        aspect_ratio = float(crop_width) / float(crop_height)
1900        area = float(crop_width * crop_height)
1901
1902        aspect_ratios.append(aspect_ratio)
1903        area_ratios.append(area / original_area)
1904        fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
1905
1906    # Ensure that each entry is observed within 3 standard deviations.
1907    # num_bins = 10
1908    # aspect_ratio_hist, _ = np.histogram(aspect_ratios,
1909    #                                     bins=num_bins,
1910    #                                     range=aspect_ratio_range)
1911    # mean = np.mean(aspect_ratio_hist)
1912    # stddev = np.sqrt(mean)
1913    # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
1914    # TODO(irving): Since the rejection probability is not independent of the
1915    # aspect ratio, the aspect_ratio random value is not exactly uniformly
1916    # distributed in [min_aspect_ratio, max_aspect_ratio).  This test should be
1917    # fixed to reflect the true statistical property, then tightened to enforce
1918    # a stricter bound.  Or, ideally, the sample_distorted_bounding_box Op
1919    # be fixed to not use rejection sampling and generate correctly uniform
1920    # aspect ratios.
1921    # self.assertAllClose(aspect_ratio_hist,
1922    #                     [mean] * num_bins, atol=3.6 * stddev)
1923
1924    # The resulting crop will not be uniformly distributed in area. In practice,
1925    # we find that the area skews towards the small sizes. Instead, we perform
1926    # a weaker test to ensure that the area ratios are merely within the
1927    # specified bounds.
1928    self.assertLessEqual(max(area_ratios), area_range[1])
1929    self.assertGreaterEqual(min(area_ratios), area_range[0])
1930
1931    # For reference, here is what the distribution of area ratios look like.
1932    area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
1933    print("area_ratio_hist ", area_ratio_hist)
1934
1935    # Ensure that fraction_object_covered is satisfied.
1936    # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
1937    # self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
1938
1939  def testWholeImageBoundingBox(self):
1940    height = 40
1941    width = 50
1942    image_size = [height, width, 1]
1943    bounding_box = [0.0, 0.0, 1.0, 1.0]
1944    image = np.arange(
1945        0, np.prod(image_size), dtype=np.int32).reshape(image_size)
1946    self._testSampleDistortedBoundingBox(
1947        image,
1948        bounding_box,
1949        min_object_covered=0.1,
1950        aspect_ratio_range=(0.75, 1.33),
1951        area_range=(0.05, 1.0))
1952
1953  def testWithBoundingBox(self):
1954    height = 40
1955    width = 50
1956    x_shape = [height, width, 1]
1957    image = np.zeros(x_shape, dtype=np.int32)
1958
1959    # Create an object with 1's in a region with area A and require that
1960    # the total pixel values >= 0.1 * A.
1961    min_object_covered = 0.1
1962
1963    xmin = 2
1964    ymin = 3
1965    xmax = 12
1966    ymax = 13
1967    for x in np.arange(xmin, xmax + 1, 1):
1968      for y in np.arange(ymin, ymax + 1, 1):
1969        image[x, y] = 1
1970
1971    # Bounding box is specified as (ymin, xmin, ymax, xmax) in
1972    # relative coordinates.
1973    bounding_box = (float(ymin) / height, float(xmin) / width,
1974                    float(ymax) / height, float(xmax) / width)
1975
1976    self._testSampleDistortedBoundingBox(
1977        image,
1978        bounding_box=bounding_box,
1979        min_object_covered=min_object_covered,
1980        aspect_ratio_range=(0.75, 1.33),
1981        area_range=(0.05, 1.0))
1982
1983  def testSampleDistortedBoundingBoxShape(self):
1984    with self.test_session(use_gpu=True):
1985      image_size = constant_op.constant(
1986          [40, 50, 1], shape=[3], dtype=dtypes.int32)
1987      bounding_box = constant_op.constant(
1988          [[[0.0, 0.0, 1.0, 1.0]]],
1989          shape=[1, 1, 4],
1990          dtype=dtypes.float32,
1991      )
1992      begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
1993          image_size=image_size,
1994          bounding_boxes=bounding_box,
1995          min_object_covered=0.1,
1996          aspect_ratio_range=(0.75, 1.33),
1997          area_range=(0.05, 1.0))
1998
1999      # Test that the shapes are correct.
2000      self.assertAllEqual([3], begin.get_shape().as_list())
2001      self.assertAllEqual([3], end.get_shape().as_list())
2002      self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
2003      # Actual run to make sure shape is correct inside Compute().
2004      begin = begin.eval()
2005      end = end.eval()
2006      bbox_for_drawing = bbox_for_drawing.eval()
2007
2008      begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
2009          image_size=image_size,
2010          bounding_boxes=bounding_box,
2011          min_object_covered=array_ops.placeholder(dtypes.float32),
2012          aspect_ratio_range=(0.75, 1.33),
2013          area_range=(0.05, 1.0))
2014
2015      # Test that the shapes are correct.
2016      self.assertAllEqual([3], begin.get_shape().as_list())
2017      self.assertAllEqual([3], end.get_shape().as_list())
2018      self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
2019
2020  def testDefaultMinObjectCovered(self):
2021    # By default min_object_covered=0.1 if not provided
2022    with self.test_session(use_gpu=True):
2023      image_size = constant_op.constant(
2024          [40, 50, 1], shape=[3], dtype=dtypes.int32)
2025      bounding_box = constant_op.constant(
2026          [[[0.0, 0.0, 1.0, 1.0]]],
2027          shape=[1, 1, 4],
2028          dtype=dtypes.float32,
2029      )
2030      begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
2031          image_size=image_size,
2032          bounding_boxes=bounding_box,
2033          aspect_ratio_range=(0.75, 1.33),
2034          area_range=(0.05, 1.0))
2035
2036      self.assertAllEqual([3], begin.get_shape().as_list())
2037      self.assertAllEqual([3], end.get_shape().as_list())
2038      self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
2039      # Actual run to make sure shape is correct inside Compute().
2040      begin = begin.eval()
2041      end = end.eval()
2042      bbox_for_drawing = bbox_for_drawing.eval()
2043
2044
2045class ResizeImagesTest(test_util.TensorFlowTestCase):
2046
2047  OPTIONS = [
2048      image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
2049      image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA
2050  ]
2051
2052  TYPES = [
2053      np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
2054      np.float32, np.float64
2055  ]
2056
2057  def _assertShapeInference(self, pre_shape, size, post_shape):
2058    # Try single image resize
2059    single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
2060    y = image_ops.resize_images(single_image, size)
2061    self.assertEqual(y.get_shape().as_list(), post_shape)
2062    # Try batch images resize with known batch size
2063    images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
2064    y = image_ops.resize_images(images, size)
2065    self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
2066    # Try batch images resize with unknown batch size
2067    images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
2068    y = image_ops.resize_images(images, size)
2069    self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
2070
2071  def shouldRunOnGPU(self, opt, nptype):
2072    if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
2073        nptype in [np.float32, np.float64]):
2074      return True
2075    else:
2076      return False
2077
2078  def testNoOp(self):
2079    img_shape = [1, 6, 4, 1]
2080    single_shape = [6, 4, 1]
2081    # This test is also conducted with int8, so 127 is the maximum
2082    # value that can be used.
2083    data = [
2084        127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
2085        50, 50, 100, 100, 50, 50, 100, 100
2086    ]
2087    target_height = 6
2088    target_width = 4
2089
2090    for nptype in self.TYPES:
2091      img_np = np.array(data, dtype=nptype).reshape(img_shape)
2092
2093      for opt in self.OPTIONS:
2094        with self.test_session(use_gpu=True) as sess:
2095          image = constant_op.constant(img_np, shape=img_shape)
2096          y = image_ops.resize_images(image, [target_height, target_width], opt)
2097          yshape = array_ops.shape(y)
2098          resized, newshape = sess.run([y, yshape])
2099          self.assertAllEqual(img_shape, newshape)
2100          self.assertAllClose(resized, img_np, atol=1e-5)
2101
2102      # Resizing with a single image must leave the shape unchanged also.
2103      with self.test_session(use_gpu=True):
2104        img_single = img_np.reshape(single_shape)
2105        image = constant_op.constant(img_single, shape=single_shape)
2106        y = image_ops.resize_images(image, [target_height, target_width],
2107                                    self.OPTIONS[0])
2108        yshape = array_ops.shape(y)
2109        newshape = yshape.eval()
2110        self.assertAllEqual(single_shape, newshape)
2111
2112  def testTensorArguments(self):
2113    img_shape = [1, 6, 4, 1]
2114    single_shape = [6, 4, 1]
2115    # This test is also conducted with int8, so 127 is the maximum
2116    # value that can be used.
2117    data = [
2118        127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
2119        50, 50, 100, 100, 50, 50, 100, 100
2120    ]
2121    new_size = array_ops.placeholder(dtypes.int32, shape=(2))
2122
2123    img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
2124
2125    for opt in self.OPTIONS:
2126      with self.test_session(use_gpu=True) as sess:
2127        image = constant_op.constant(img_np, shape=img_shape)
2128        y = image_ops.resize_images(image, new_size, opt)
2129        yshape = array_ops.shape(y)
2130        resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
2131        self.assertAllEqual(img_shape, newshape)
2132        self.assertAllClose(resized, img_np, atol=1e-5)
2133
2134    # Resizing with a single image must leave the shape unchanged also.
2135    with self.test_session(use_gpu=True):
2136      img_single = img_np.reshape(single_shape)
2137      image = constant_op.constant(img_single, shape=single_shape)
2138      y = image_ops.resize_images(image, new_size, self.OPTIONS[0])
2139      yshape = array_ops.shape(y)
2140      resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
2141      self.assertAllEqual(single_shape, newshape)
2142      self.assertAllClose(resized, img_single, atol=1e-5)
2143
2144    # Incorrect shape.
2145    with self.assertRaises(ValueError):
2146      new_size = constant_op.constant(4)
2147      _ = image_ops.resize_images(image, new_size,
2148                                  image_ops.ResizeMethod.BILINEAR)
2149    with self.assertRaises(ValueError):
2150      new_size = constant_op.constant([4])
2151      _ = image_ops.resize_images(image, new_size,
2152                                  image_ops.ResizeMethod.BILINEAR)
2153    with self.assertRaises(ValueError):
2154      new_size = constant_op.constant([1, 2, 3])
2155      _ = image_ops.resize_images(image, new_size,
2156                                  image_ops.ResizeMethod.BILINEAR)
2157
2158    # Incorrect dtypes.
2159    with self.assertRaises(ValueError):
2160      new_size = constant_op.constant([6.0, 4])
2161      _ = image_ops.resize_images(image, new_size,
2162                                  image_ops.ResizeMethod.BILINEAR)
2163    with self.assertRaises(ValueError):
2164      _ = image_ops.resize_images(image, [6, 4.0],
2165                                  image_ops.ResizeMethod.BILINEAR)
2166    with self.assertRaises(ValueError):
2167      _ = image_ops.resize_images(image, [None, 4],
2168                                  image_ops.ResizeMethod.BILINEAR)
2169    with self.assertRaises(ValueError):
2170      _ = image_ops.resize_images(image, [6, None],
2171                                  image_ops.ResizeMethod.BILINEAR)
2172
2173  def testReturnDtype(self):
2174    target_shapes = [[6, 4], [3, 2], [
2175        array_ops.placeholder(dtypes.int32),
2176        array_ops.placeholder(dtypes.int32)
2177    ]]
2178    for nptype in self.TYPES:
2179      image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
2180      for opt in self.OPTIONS:
2181        for target_shape in target_shapes:
2182          y = image_ops.resize_images(image, target_shape, opt)
2183          if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR or
2184              target_shape == image.shape[1:3]):
2185            expected_dtype = image.dtype
2186          else:
2187            expected_dtype = dtypes.float32
2188          self.assertEqual(y.dtype, expected_dtype)
2189
2190  def testSumTensor(self):
2191    img_shape = [1, 6, 4, 1]
2192    # This test is also conducted with int8, so 127 is the maximum
2193    # value that can be used.
2194    data = [
2195        127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
2196        50, 50, 100, 100, 50, 50, 100, 100
2197    ]
2198    # Test size where width is specified as a tensor which is a sum
2199    # of two tensors.
2200    width_1 = constant_op.constant(1)
2201    width_2 = constant_op.constant(3)
2202    width = math_ops.add(width_1, width_2)
2203    height = constant_op.constant(6)
2204
2205    img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
2206
2207    for opt in self.OPTIONS:
2208      with self.test_session() as sess:
2209        image = constant_op.constant(img_np, shape=img_shape)
2210        y = image_ops.resize_images(image, [height, width], opt)
2211        yshape = array_ops.shape(y)
2212        resized, newshape = sess.run([y, yshape])
2213        self.assertAllEqual(img_shape, newshape)
2214        self.assertAllClose(resized, img_np, atol=1e-5)
2215
2216  def testResizeDown(self):
2217    # This test is also conducted with int8, so 127 is the maximum
2218    # value that can be used.
2219    data = [
2220        127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
2221        50, 50, 100, 100, 50, 50, 100, 100
2222    ]
2223    expected_data = [127, 64, 64, 127, 50, 100]
2224    target_height = 3
2225    target_width = 2
2226
2227    # Test out 3-D and 4-D image shapes.
2228    img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
2229    target_shapes = [[1, target_height, target_width, 1],
2230                     [target_height, target_width, 1]]
2231
2232    for target_shape, img_shape in zip(target_shapes, img_shapes):
2233
2234      for nptype in self.TYPES:
2235        img_np = np.array(data, dtype=nptype).reshape(img_shape)
2236
2237        for opt in self.OPTIONS:
2238          if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
2239            with self.test_session(use_gpu=True):
2240              image = constant_op.constant(img_np, shape=img_shape)
2241              y = image_ops.resize_images(image, [target_height, target_width],
2242                                          opt)
2243              expected = np.array(expected_data).reshape(target_shape)
2244              resized = y.eval()
2245              self.assertAllClose(resized, expected, atol=1e-5)
2246
2247  def testResizeUpAlignCornersFalse(self):
2248    img_shape = [1, 3, 2, 1]
2249    data = [64, 32, 32, 64, 50, 100]
2250    target_height = 6
2251    target_width = 4
2252    expected_data = {}
2253    expected_data[image_ops.ResizeMethod.BILINEAR] = [
2254        64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
2255        41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
2256        100.0
2257    ]
2258    expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
2259        64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
2260        32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
2261        100.0
2262    ]
2263    expected_data[image_ops.ResizeMethod.AREA] = [
2264        64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
2265        32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
2266        100.0
2267    ]
2268
2269    for nptype in self.TYPES:
2270      for opt in [
2271          image_ops.ResizeMethod.BILINEAR,
2272          image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.AREA
2273      ]:
2274        with self.test_session(use_gpu=True):
2275          img_np = np.array(data, dtype=nptype).reshape(img_shape)
2276          image = constant_op.constant(img_np, shape=img_shape)
2277          y = image_ops.resize_images(
2278              image, [target_height, target_width], opt, align_corners=False)
2279          resized = y.eval()
2280          expected = np.array(expected_data[opt]).reshape(
2281              [1, target_height, target_width, 1])
2282          self.assertAllClose(resized, expected, atol=1e-05)
2283
2284  def testResizeUpAlignCornersTrue(self):
2285    img_shape = [1, 3, 2, 1]
2286    data = [6, 3, 3, 6, 6, 9]
2287    target_height = 5
2288    target_width = 4
2289    expected_data = {}
2290    expected_data[image_ops.ResizeMethod.BILINEAR] = [
2291        6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
2292        6.5, 7.5, 6.0, 7.0, 8.0, 9.0
2293    ]
2294    expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
2295        6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
2296        9.0, 9.0, 6.0, 6.0, 9.0, 9.0
2297    ]
2298    # TODO(b/37749740): Improve alignment of ResizeMethod.AREA when
2299    # align_corners=True.
2300    expected_data[image_ops.ResizeMethod.AREA] = [
2301        6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
2302        3.0, 6.0, 6.0, 6.0, 6.0, 9.0
2303    ]
2304
2305    for nptype in self.TYPES:
2306      for opt in [
2307          image_ops.ResizeMethod.BILINEAR,
2308          image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.AREA
2309      ]:
2310        with self.test_session(use_gpu=True):
2311          img_np = np.array(data, dtype=nptype).reshape(img_shape)
2312          image = constant_op.constant(img_np, shape=img_shape)
2313          y = image_ops.resize_images(
2314              image, [target_height, target_width], opt, align_corners=True)
2315          resized = y.eval()
2316          expected = np.array(expected_data[opt]).reshape(
2317              [1, target_height, target_width, 1])
2318          self.assertAllClose(resized, expected, atol=1e-05)
2319
2320  def testResizeUpBicubic(self):
2321    img_shape = [1, 6, 6, 1]
2322    data = [
2323        128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
2324        50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
2325        50, 50, 100, 100
2326    ]
2327    img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
2328
2329    target_height = 8
2330    target_width = 8
2331    expected_data = [
2332        128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
2333        55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
2334        105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
2335        75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
2336    ]
2337
2338    with self.test_session(use_gpu=True):
2339      image = constant_op.constant(img_np, shape=img_shape)
2340      y = image_ops.resize_images(image, [target_height, target_width],
2341                                  image_ops.ResizeMethod.BICUBIC)
2342      resized = y.eval()
2343      expected = np.array(expected_data).reshape(
2344          [1, target_height, target_width, 1])
2345      self.assertAllClose(resized, expected, atol=1)
2346
2347  def testResizeDownArea(self):
2348    img_shape = [1, 6, 6, 1]
2349    data = [
2350        128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
2351        10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
2352    ]
2353    img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
2354
2355    target_height = 4
2356    target_width = 4
2357    expected_data = [
2358        73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
2359    ]
2360
2361    with self.test_session(use_gpu=True):
2362      image = constant_op.constant(img_np, shape=img_shape)
2363      y = image_ops.resize_images(image, [target_height, target_width],
2364                                  image_ops.ResizeMethod.AREA)
2365      expected = np.array(expected_data).reshape(
2366          [1, target_height, target_width, 1])
2367      resized = y.eval()
2368      self.assertAllClose(resized, expected, atol=1)
2369
2370  def testCompareNearestNeighbor(self):
2371    if test.is_gpu_available():
2372      input_shape = [1, 5, 6, 3]
2373      target_height = 8
2374      target_width = 12
2375      for nptype in [np.float32, np.float64]:
2376        for align_corners in [True, False]:
2377          img_np = np.arange(
2378              0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
2379          with self.test_session(use_gpu=True):
2380            image = constant_op.constant(img_np, shape=input_shape)
2381            new_size = constant_op.constant([target_height, target_width])
2382            out_op = image_ops.resize_images(
2383                image,
2384                new_size,
2385                image_ops.ResizeMethod.NEAREST_NEIGHBOR,
2386                align_corners=align_corners)
2387            gpu_val = out_op.eval()
2388          with self.test_session(use_gpu=False):
2389            image = constant_op.constant(img_np, shape=input_shape)
2390            new_size = constant_op.constant([target_height, target_width])
2391            out_op = image_ops.resize_images(
2392                image,
2393                new_size,
2394                image_ops.ResizeMethod.NEAREST_NEIGHBOR,
2395                align_corners=align_corners)
2396            cpu_val = out_op.eval()
2397          self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
2398
2399  def testCompareBilinear(self):
2400    if test.is_gpu_available():
2401      input_shape = [1, 5, 6, 3]
2402      target_height = 8
2403      target_width = 12
2404      for nptype in [np.float32, np.float64]:
2405        for align_corners in [True, False]:
2406          img_np = np.arange(
2407              0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
2408          value = {}
2409          for use_gpu in [True, False]:
2410            with self.test_session(use_gpu=use_gpu):
2411              image = constant_op.constant(img_np, shape=input_shape)
2412              new_size = constant_op.constant([target_height, target_width])
2413              out_op = image_ops.resize_images(
2414                  image,
2415                  new_size,
2416                  image_ops.ResizeMethod.BILINEAR,
2417                  align_corners=align_corners)
2418              value[use_gpu] = out_op.eval()
2419          self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
2420
2421  def testShapeInference(self):
2422    self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
2423    self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
2424    self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
2425    self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
2426    self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
2427    self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
2428    self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
2429    self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
2430    self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
2431    self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
2432    self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
2433    self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
2434    self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
2435    self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
2436    self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
2437    self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
2438    self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
2439    self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
2440
2441  def testNameScope(self):
2442    img_shape = [1, 3, 2, 1]
2443    with self.test_session(use_gpu=True):
2444      single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
2445      y = image_ops.resize_images(single_image, [55, 66])
2446      self.assertTrue(y.op.name.startswith("resize_images"))
2447
2448
2449class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
2450
2451  def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
2452                                use_tensor_inputs):
2453    if use_tensor_inputs:
2454      target_height = ops.convert_to_tensor(target_height)
2455      target_width = ops.convert_to_tensor(target_width)
2456      x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
2457      feed_dict = {x_tensor: x}
2458    else:
2459      x_tensor = x
2460      feed_dict = {}
2461
2462    y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
2463                                                target_width)
2464    if not use_tensor_inputs:
2465      self.assertTrue(y.get_shape().is_fully_defined())
2466
2467    with self.test_session(use_gpu=True):
2468      return y.eval(feed_dict=feed_dict)
2469
2470  def _assertReturns(self,
2471                     x,
2472                     x_shape,
2473                     y,
2474                     y_shape,
2475                     use_tensor_inputs_options=None):
2476    use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
2477    target_height, target_width, _ = y_shape
2478    x = np.array(x).reshape(x_shape)
2479    y = np.array(y).reshape(y_shape)
2480
2481    for use_tensor_inputs in use_tensor_inputs_options:
2482      y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
2483                                            use_tensor_inputs)
2484      self.assertAllClose(y, y_tf)
2485
2486  def _assertRaises(self,
2487                    x,
2488                    x_shape,
2489                    target_height,
2490                    target_width,
2491                    err_msg,
2492                    use_tensor_inputs_options=None):
2493    use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
2494    x = np.array(x).reshape(x_shape)
2495
2496    for use_tensor_inputs in use_tensor_inputs_options:
2497      try:
2498        self._ResizeImageWithCropOrPad(x, target_height, target_width,
2499                                       use_tensor_inputs)
2500      except Exception as e:
2501        if err_msg not in str(e):
2502          raise
2503      else:
2504        raise AssertionError("Exception not raised: %s" % err_msg)
2505
2506  def _assertShapeInference(self, pre_shape, height, width, post_shape):
2507    image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
2508    y = image_ops.resize_image_with_crop_or_pad(image, height, width)
2509    self.assertEqual(y.get_shape().as_list(), post_shape)
2510
2511  def testNoOp(self):
2512    x_shape = [10, 10, 10]
2513    x = np.random.uniform(size=x_shape)
2514
2515    self._assertReturns(x, x_shape, x, x_shape)
2516
2517  def testPad(self):
2518    # Pad even along col.
2519    x = [1, 2, 3, 4, 5, 6, 7, 8]
2520    x_shape = [2, 4, 1]
2521
2522    y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
2523    y_shape = [2, 6, 1]
2524
2525    self._assertReturns(x, x_shape, y, y_shape)
2526
2527    # Pad odd along col.
2528    x = [1, 2, 3, 4, 5, 6, 7, 8]
2529    x_shape = [2, 4, 1]
2530
2531    y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
2532    y_shape = [2, 7, 1]
2533
2534    self._assertReturns(x, x_shape, y, y_shape)
2535
2536    # Pad even along row.
2537    x = [1, 2, 3, 4, 5, 6, 7, 8]
2538    x_shape = [2, 4, 1]
2539
2540    y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
2541    y_shape = [4, 4, 1]
2542
2543    self._assertReturns(x, x_shape, y, y_shape)
2544
2545    # Pad odd along row.
2546    x = [1, 2, 3, 4, 5, 6, 7, 8]
2547    x_shape = [2, 4, 1]
2548
2549    y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
2550    y_shape = [5, 4, 1]
2551
2552    self._assertReturns(x, x_shape, y, y_shape)
2553
2554  def testCrop(self):
2555    # Crop even along col.
2556    x = [1, 2, 3, 4, 5, 6, 7, 8]
2557    x_shape = [2, 4, 1]
2558
2559    y = [2, 3, 6, 7]
2560    y_shape = [2, 2, 1]
2561
2562    self._assertReturns(x, x_shape, y, y_shape)
2563
2564    # Crop odd along col.
2565    x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
2566    x_shape = [2, 6, 1]
2567
2568    y = [2, 3, 4, 8, 9, 10]
2569    y_shape = [2, 3, 1]
2570
2571    self._assertReturns(x, x_shape, y, y_shape)
2572
2573    # Crop even along row.
2574    x = [1, 2, 3, 4, 5, 6, 7, 8]
2575    x_shape = [4, 2, 1]
2576
2577    y = [3, 4, 5, 6]
2578    y_shape = [2, 2, 1]
2579
2580    self._assertReturns(x, x_shape, y, y_shape)
2581
2582    # Crop odd along row.
2583    x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
2584    x_shape = [8, 2, 1]
2585
2586    y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
2587    y_shape = [5, 2, 1]
2588
2589    self._assertReturns(x, x_shape, y, y_shape)
2590
2591  def testCropAndPad(self):
2592    # Pad along row but crop along col.
2593    x = [1, 2, 3, 4, 5, 6, 7, 8]
2594    x_shape = [2, 4, 1]
2595
2596    y = [0, 0, 2, 3, 6, 7, 0, 0]
2597    y_shape = [4, 2, 1]
2598
2599    self._assertReturns(x, x_shape, y, y_shape)
2600
2601    # Crop along row but pad along col.
2602    x = [1, 2, 3, 4, 5, 6, 7, 8]
2603    x_shape = [4, 2, 1]
2604
2605    y = [0, 3, 4, 0, 0, 5, 6, 0]
2606    y_shape = [2, 4, 1]
2607
2608    self._assertReturns(x, x_shape, y, y_shape)
2609
2610  def testShapeInference(self):
2611    self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
2612    self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
2613    self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
2614    self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
2615    self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
2616    self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
2617    self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
2618    self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
2619    self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
2620    self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
2621    self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
2622    self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
2623    self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
2624    self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
2625    self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
2626    self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
2627    self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
2628    self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
2629    self._assertShapeInference(None, 55, 66, [55, 66, None])
2630
2631  def testNon3DInput(self):
2632    # Input image is not 3D
2633    x = [0] * 15
2634    target_height, target_width = [4, 4]
2635
2636    for x_shape in ([3, 5],):
2637      self._assertRaises(x, x_shape, target_height, target_width,
2638                         "'image' must have either 3 or 4 dimensions.")
2639
2640    for x_shape in ([1, 3, 5, 1, 1],):
2641      self._assertRaises(x, x_shape, target_height, target_width,
2642                         "'image' must have either 3 or 4 dimensions.")
2643
2644  def testZeroLengthInput(self):
2645    # Input image has 0-length dimension(s).
2646    target_height, target_width = [1, 1]
2647    x = []
2648
2649    for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
2650      self._assertRaises(
2651          x,
2652          x_shape,
2653          target_height,
2654          target_width,
2655          "all dims of 'image.shape' must be > 0",
2656          use_tensor_inputs_options=[False])
2657
2658      # The orignal error message does not contain back slashes. However, they
2659      # are added by either the assert op or the runtime. If this behavior
2660      # changes in the future, the match string will also needs to be changed.
2661      self._assertRaises(
2662          x,
2663          x_shape,
2664          target_height,
2665          target_width,
2666          "all dims of \\'image.shape\\' must be > 0",
2667          use_tensor_inputs_options=[True])
2668
2669  def testBadParams(self):
2670    x_shape = [4, 4, 1]
2671    x = np.zeros(x_shape)
2672
2673    # target_height <= 0
2674    target_height, target_width = [0, 5]
2675    self._assertRaises(x, x_shape, target_height, target_width,
2676                       "target_height must be > 0")
2677
2678    # target_width <= 0
2679    target_height, target_width = [5, 0]
2680    self._assertRaises(x, x_shape, target_height, target_width,
2681                       "target_width must be > 0")
2682
2683  def testNameScope(self):
2684    image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
2685    y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
2686    self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
2687
2688
2689def _SimpleColorRamp():
2690  """Build a simple color ramp RGB image."""
2691  w, h = 256, 200
2692  i = np.arange(h)[:, None]
2693  j = np.arange(w)
2694  image = np.empty((h, w, 3), dtype=np.uint8)
2695  image[:, :, 0] = i
2696  image[:, :, 1] = j
2697  image[:, :, 2] = (i + j) >> 1
2698  return image
2699
2700
2701class JpegTest(test_util.TensorFlowTestCase):
2702
2703  # TODO(irving): Add self.assertAverageLess or similar to test_util
2704  def averageError(self, image0, image1):
2705    self.assertEqual(image0.shape, image1.shape)
2706    image0 = image0.astype(int)  # Avoid overflow
2707    return np.abs(image0 - image1).sum() / np.prod(image0.shape)
2708
2709  def testExisting(self):
2710    # Read a real jpeg and verify shape
2711    path = ("tensorflow/core/lib/jpeg/testdata/"
2712            "jpeg_merge_test1.jpg")
2713    with self.test_session(use_gpu=True) as sess:
2714      jpeg0 = io_ops.read_file(path)
2715      image0 = image_ops.decode_jpeg(jpeg0)
2716      image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
2717      jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
2718      self.assertEqual(len(jpeg0), 3771)
2719      self.assertEqual(image0.shape, (256, 128, 3))
2720      self.assertLess(self.averageError(image0, image1), 1.4)
2721
2722  def testCmyk(self):
2723    # Confirm that CMYK reads in as RGB
2724    base = "tensorflow/core/lib/jpeg/testdata"
2725    rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
2726    cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
2727    shape = 256, 128, 3
2728    for channels in 3, 0:
2729      with self.test_session(use_gpu=True) as sess:
2730        rgb = image_ops.decode_jpeg(
2731            io_ops.read_file(rgb_path), channels=channels)
2732        cmyk = image_ops.decode_jpeg(
2733            io_ops.read_file(cmyk_path), channels=channels)
2734        rgb, cmyk = sess.run([rgb, cmyk])
2735        self.assertEqual(rgb.shape, shape)
2736        self.assertEqual(cmyk.shape, shape)
2737        error = self.averageError(rgb, cmyk)
2738        self.assertLess(error, 4)
2739
2740  def testCropAndDecodeJpeg(self):
2741    with self.test_session() as sess:
2742      # Encode it, then decode it, then encode it
2743      base = "tensorflow/core/lib/jpeg/testdata"
2744      jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
2745
2746      h, w, _ = 256, 128, 3
2747      crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
2748                      [h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
2749      for crop_window in crop_windows:
2750        # Explicit two stages: decode + crop.
2751        image1 = image_ops.decode_jpeg(jpeg0)
2752        y, x, h, w = crop_window
2753        image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
2754
2755        # Combined decode+crop.
2756        image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
2757
2758        # Combined decode+crop should have the same shape inference
2759        self.assertAllEqual(image1_crop.get_shape().as_list(),
2760                            image2.get_shape().as_list())
2761
2762        # CropAndDecode should be equal to DecodeJpeg+Crop.
2763        image1_crop, image2 = sess.run([image1_crop, image2])
2764        self.assertAllEqual(image1_crop, image2)
2765
2766  def testCropAndDecodeJpegWithInvalidCropWindow(self):
2767    with self.test_session() as sess:
2768      # Encode it, then decode it, then encode it
2769      base = "tensorflow/core/lib/jpeg/testdata"
2770      jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
2771
2772      h, w, _ = 256, 128, 3
2773      # Invalid crop windows.
2774      crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
2775                      [11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
2776                      [0, 0, h + 1, w], [0, 0, h, w + 1]]
2777      for crop_window in crop_windows:
2778        result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
2779        with self.assertRaisesWithPredicateMatch(
2780            errors.InvalidArgumentError,
2781            lambda e: "Invalid JPEG data or crop window" in str(e)):
2782          sess.run(result)
2783
2784  def testSynthetic(self):
2785    with self.test_session(use_gpu=True) as sess:
2786      # Encode it, then decode it, then encode it
2787      image0 = constant_op.constant(_SimpleColorRamp())
2788      jpeg0 = image_ops.encode_jpeg(image0)
2789      image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
2790      image2 = image_ops.decode_jpeg(
2791          image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
2792      jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
2793
2794      # The decoded-encoded image should be similar to the input
2795      self.assertLess(self.averageError(image0, image1), 0.6)
2796
2797      # We should be very close to a fixpoint
2798      self.assertLess(self.averageError(image1, image2), 0.02)
2799
2800      # Smooth ramps compress well (input size is 153600)
2801      self.assertGreaterEqual(len(jpeg0), 5000)
2802      self.assertLessEqual(len(jpeg0), 6000)
2803
2804  def testSyntheticFasterAlgorithm(self):
2805    with self.test_session(use_gpu=True) as sess:
2806      # Encode it, then decode it, then encode it
2807      image0 = constant_op.constant(_SimpleColorRamp())
2808      jpeg0 = image_ops.encode_jpeg(image0)
2809      image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
2810      image2 = image_ops.decode_jpeg(
2811          image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
2812      jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
2813
2814      # The decoded-encoded image should be similar to the input, but
2815      # note this is worse than the slower algorithm because it is
2816      # less accurate.
2817      self.assertLess(self.averageError(image0, image1), 0.95)
2818
2819      # Repeated compression / decompression will have a higher error
2820      # with a lossier algorithm.
2821      self.assertLess(self.averageError(image1, image2), 1.05)
2822
2823      # Smooth ramps compress well (input size is 153600)
2824      self.assertGreaterEqual(len(jpeg0), 5000)
2825      self.assertLessEqual(len(jpeg0), 6000)
2826
2827  def testDefaultDCTMethodIsIntegerFast(self):
2828    with self.test_session(use_gpu=True) as sess:
2829      # Compare decoding with both dct_option=INTEGER_FAST and
2830      # default.  They should be the same.
2831      image0 = constant_op.constant(_SimpleColorRamp())
2832      jpeg0 = image_ops.encode_jpeg(image0)
2833      image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
2834      image2 = image_ops.decode_jpeg(jpeg0)
2835      image1, image2 = sess.run([image1, image2])
2836
2837      # The images should be the same.
2838      self.assertAllClose(image1, image2)
2839
2840  def testShape(self):
2841    with self.test_session(use_gpu=True) as sess:
2842      jpeg = constant_op.constant("nonsense")
2843      for channels in 0, 1, 3:
2844        image = image_ops.decode_jpeg(jpeg, channels=channels)
2845        self.assertEqual(image.get_shape().as_list(),
2846                         [None, None, channels or None])
2847
2848  def testExtractJpegShape(self):
2849    # Read a real jpeg and verify shape.
2850    path = ("tensorflow/core/lib/jpeg/testdata/"
2851            "jpeg_merge_test1.jpg")
2852    with self.test_session(use_gpu=True) as sess:
2853      jpeg = io_ops.read_file(path)
2854      # Extract shape without decoding.
2855      [image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
2856      self.assertEqual(image_shape.tolist(), [256, 128, 3])
2857
2858  def testExtractJpegShapeforCmyk(self):
2859    # Read a cmyk jpeg image, and verify its shape.
2860    path = ("tensorflow/core/lib/jpeg/testdata/"
2861            "jpeg_merge_test1_cmyk.jpg")
2862    with self.test_session(use_gpu=True) as sess:
2863      jpeg = io_ops.read_file(path)
2864      [image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
2865      # Cmyk jpeg image has 4 channels.
2866      self.assertEqual(image_shape.tolist(), [256, 128, 4])
2867
2868
2869class PngTest(test_util.TensorFlowTestCase):
2870
2871  def testExisting(self):
2872    # Read some real PNGs, converting to different channel numbers
2873    prefix = "tensorflow/core/lib/png/testdata/"
2874    inputs = (1, "lena_gray.png"), (4, "lena_rgba.png")
2875    for channels_in, filename in inputs:
2876      for channels in 0, 1, 3, 4:
2877        with self.test_session(use_gpu=True) as sess:
2878          png0 = io_ops.read_file(prefix + filename)
2879          image0 = image_ops.decode_png(png0, channels=channels)
2880          png0, image0 = sess.run([png0, image0])
2881          self.assertEqual(image0.shape, (26, 51, channels or channels_in))
2882          if channels == channels_in:
2883            image1 = image_ops.decode_png(image_ops.encode_png(image0))
2884            self.assertAllEqual(image0, image1.eval())
2885
2886  def testSynthetic(self):
2887    with self.test_session(use_gpu=True) as sess:
2888      # Encode it, then decode it
2889      image0 = constant_op.constant(_SimpleColorRamp())
2890      png0 = image_ops.encode_png(image0, compression=7)
2891      image1 = image_ops.decode_png(png0)
2892      png0, image0, image1 = sess.run([png0, image0, image1])
2893
2894      # PNG is lossless
2895      self.assertAllEqual(image0, image1)
2896
2897      # Smooth ramps compress well, but not too well
2898      self.assertGreaterEqual(len(png0), 400)
2899      self.assertLessEqual(len(png0), 750)
2900
2901  def testSyntheticUint16(self):
2902    with self.test_session(use_gpu=True) as sess:
2903      # Encode it, then decode it
2904      image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
2905      png0 = image_ops.encode_png(image0, compression=7)
2906      image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
2907      png0, image0, image1 = sess.run([png0, image0, image1])
2908
2909      # PNG is lossless
2910      self.assertAllEqual(image0, image1)
2911
2912      # Smooth ramps compress well, but not too well
2913      self.assertGreaterEqual(len(png0), 800)
2914      self.assertLessEqual(len(png0), 1500)
2915
2916  def testSyntheticTwoChannel(self):
2917    with self.test_session(use_gpu=True) as sess:
2918      # Strip the b channel from an rgb image to get a two-channel image.
2919      gray_alpha = _SimpleColorRamp()[:, :, 0:2]
2920      image0 = constant_op.constant(gray_alpha)
2921      png0 = image_ops.encode_png(image0, compression=7)
2922      image1 = image_ops.decode_png(png0)
2923      png0, image0, image1 = sess.run([png0, image0, image1])
2924      self.assertEqual(2, image0.shape[-1])
2925      self.assertAllEqual(image0, image1)
2926
2927  def testSyntheticTwoChannelUint16(self):
2928    with self.test_session(use_gpu=True) as sess:
2929      # Strip the b channel from an rgb image to get a two-channel image.
2930      gray_alpha = _SimpleColorRamp()[:, :, 0:2]
2931      image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
2932      png0 = image_ops.encode_png(image0, compression=7)
2933      image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
2934      png0, image0, image1 = sess.run([png0, image0, image1])
2935      self.assertEqual(2, image0.shape[-1])
2936      self.assertAllEqual(image0, image1)
2937
2938  def testShape(self):
2939    with self.test_session(use_gpu=True):
2940      png = constant_op.constant("nonsense")
2941      for channels in 0, 1, 3:
2942        image = image_ops.decode_png(png, channels=channels)
2943        self.assertEqual(image.get_shape().as_list(),
2944                         [None, None, channels or None])
2945
2946
2947class GifTest(test_util.TensorFlowTestCase):
2948
2949  def _testValid(self, filename):
2950    # Read some real GIFs
2951    prefix = "tensorflow/core/lib/gif/testdata/"
2952    WIDTH = 20
2953    HEIGHT = 40
2954    STRIDE = 5
2955    shape = (12, HEIGHT, WIDTH, 3)
2956
2957    with self.test_session(use_gpu=True) as sess:
2958      gif0 = io_ops.read_file(prefix + filename)
2959      image0 = image_ops.decode_gif(gif0)
2960      gif0, image0 = sess.run([gif0, image0])
2961
2962      self.assertEqual(image0.shape, shape)
2963
2964      for frame_idx, frame in enumerate(image0):
2965        gt = np.zeros(shape[1:], dtype=np.uint8)
2966        start = frame_idx * STRIDE
2967        end = (frame_idx + 1) * STRIDE
2968        print(frame_idx)
2969        if end <= WIDTH:
2970          gt[:, start:end, :] = 255
2971        else:
2972          start -= WIDTH
2973          end -= WIDTH
2974          gt[start:end, :, :] = 255
2975
2976        self.assertAllClose(frame, gt)
2977
2978  def testValid(self):
2979    self._testValid("scan.gif")
2980    self._testValid("optimized.gif")
2981
2982  def testShape(self):
2983    with self.test_session(use_gpu=True) as sess:
2984      gif = constant_op.constant("nonsense")
2985      image = image_ops.decode_gif(gif)
2986      self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
2987
2988
2989class ConvertImageTest(test_util.TensorFlowTestCase):
2990
2991  def _convert(self, original, original_dtype, output_dtype, expected):
2992    x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
2993    y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
2994
2995    with self.test_session(use_gpu=True):
2996      image = constant_op.constant(x_np)
2997      y = image_ops.convert_image_dtype(image, output_dtype)
2998      self.assertTrue(y.dtype == output_dtype)
2999      self.assertAllClose(y.eval(), y_np, atol=1e-5)
3000      if output_dtype in [
3001          dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
3002      ]:
3003        y_saturate = image_ops.convert_image_dtype(
3004            image, output_dtype, saturate=True)
3005        self.assertTrue(y_saturate.dtype == output_dtype)
3006        self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5)
3007
3008  def testNoConvert(self):
3009    # Make sure converting to the same data type creates only an identity op
3010    with self.test_session(use_gpu=True):
3011      image = constant_op.constant([1], dtype=dtypes.uint8)
3012      image_ops.convert_image_dtype(image, dtypes.uint8)
3013      y = image_ops.convert_image_dtype(image, dtypes.uint8)
3014      self.assertEquals(y.op.type, "Identity")
3015      self.assertEquals(y.op.inputs[0], image)
3016
3017  def testConvertBetweenInteger(self):
3018    # Make sure converting to between integer types scales appropriately
3019    with self.test_session(use_gpu=True):
3020      self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
3021      self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
3022      self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
3023      self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
3024
3025  def testConvertBetweenFloat(self):
3026    # Make sure converting to between float types does nothing interesting
3027    with self.test_session(use_gpu=True):
3028      self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
3029                    [-1.0, 0, 1.0, 200000])
3030      self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
3031                    [-1.0, 0, 1.0, 200000])
3032
3033  def testConvertBetweenIntegerAndFloat(self):
3034    # Make sure converting from and to a float type scales appropriately
3035    with self.test_session(use_gpu=True):
3036      self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
3037                    [0, 1.0 / 255.0, 1])
3038      self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
3039                    [0, 1, 255])
3040
3041  def testConvertBetweenInt16AndInt8(self):
3042    with self.test_session(use_gpu=True):
3043      # uint8, uint16
3044      self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
3045      self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
3046      # int8, uint16
3047      self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
3048      self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
3049      # int16, uint16
3050      self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
3051      self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
3052
3053
3054class TotalVariationTest(test_util.TensorFlowTestCase):
3055  """Tests the function total_variation() in image_ops.
3056
3057  We test a few small handmade examples, as well as
3058  some larger examples using an equivalent numpy
3059  implementation of the total_variation() function.
3060
3061  We do NOT test for overflows and invalid / edge-case arguments.
3062  """
3063
3064  def _test(self, x_np, y_np):
3065    """Test that the TensorFlow implementation of
3066    total_variation(x_np) calculates the values in y_np.
3067
3068    Note that these may be float-numbers so we only test
3069    for approximate equality within some narrow error-bound.
3070    """
3071
3072    # Create a TensorFlow session.
3073    with self.test_session(use_gpu=True):
3074      # Add a constant to the TensorFlow graph that holds the input.
3075      x_tf = constant_op.constant(x_np, shape=x_np.shape)
3076
3077      # Add ops for calculating the total variation using TensorFlow.
3078      y = image_ops.total_variation(images=x_tf)
3079
3080      # Run the TensorFlow session to calculate the result.
3081      y_tf = y.eval()
3082
3083      # Assert that the results are as expected within
3084      # some small error-bound in case they are float-values.
3085      self.assertAllClose(y_tf, y_np)
3086
3087  def _total_variation_np(self, x_np):
3088    """Calculate the total variation of x_np using numpy.
3089    This implements the same function as TensorFlow but
3090    using numpy instead.
3091
3092    Args:
3093        x_np: Numpy array with 3 or 4 dimensions.
3094    """
3095
3096    dim = len(x_np.shape)
3097
3098    if dim == 3:
3099      # Calculate differences for neighboring pixel-values using slices.
3100      dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
3101      dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
3102
3103      # Sum for all axis.
3104      sum_axis = None
3105    elif dim == 4:
3106      # Calculate differences for neighboring pixel-values using slices.
3107      dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
3108      dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
3109
3110      # Only sum for the last 3 axis.
3111      sum_axis = (1, 2, 3)
3112    else:
3113      # This should not occur in this test-code.
3114      pass
3115
3116    tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
3117              np.sum(np.abs(dif2), axis=sum_axis)
3118
3119    return tot_var
3120
3121  def _test_tensorflow_vs_numpy(self, x_np):
3122    """Test the TensorFlow implementation against a numpy implementation.
3123
3124    Args:
3125        x_np: Numpy array with 3 or 4 dimensions.
3126    """
3127
3128    # Calculate the y-values using the numpy implementation.
3129    y_np = self._total_variation_np(x_np)
3130
3131    self._test(x_np, y_np)
3132
3133  def _generateArray(self, shape):
3134    """Generate an array of the given shape for use in testing.
3135    The numbers are calculated as the cumulative sum, which
3136    causes the difference between neighboring numbers to vary."""
3137
3138    # Flattened length of the array.
3139    flat_len = np.prod(shape)
3140
3141    a = np.array(range(flat_len), dtype=int)
3142    a = np.cumsum(a)
3143    a = a.reshape(shape)
3144
3145    return a
3146
3147  def testTotalVariationNumpy(self):
3148    """Test the TensorFlow implementation against a numpy implementation.
3149    The two implementations are very similar so it is possible that both
3150    have the same bug, which would not be detected by this test. It is
3151    therefore necessary to test with manually crafted data as well."""
3152
3153    # Generate a test-array.
3154    # This is an 'image' with 100x80 pixels and 3 color channels.
3155    a = self._generateArray(shape=(100, 80, 3))
3156
3157    # Test the TensorFlow implementation vs. numpy implementation.
3158    # We use a numpy implementation to check the results that are
3159    # calculated using TensorFlow are correct.
3160    self._test_tensorflow_vs_numpy(a)
3161    self._test_tensorflow_vs_numpy(a + 1)
3162    self._test_tensorflow_vs_numpy(-a)
3163    self._test_tensorflow_vs_numpy(1.1 * a)
3164
3165    # Expand to a 4-dim array.
3166    b = a[np.newaxis, :]
3167
3168    # Combine several variations of the image into a single 4-dim array.
3169    multi = np.vstack((b, b + 1, -b, 1.1 * b))
3170
3171    # Test that the TensorFlow function can also handle 4-dim arrays.
3172    self._test_tensorflow_vs_numpy(multi)
3173
3174  def testTotalVariationHandmade(self):
3175    """Test the total variation for a few handmade examples."""
3176
3177    # We create an image that is 2x2 pixels with 3 color channels.
3178    # The image is very small so we can check the result by hand.
3179
3180    # Red color channel.
3181    # The following are the sum of absolute differences between the pixels.
3182    # sum row dif = (4-1) + (7-2) = 3 + 5 = 8
3183    # sum col dif = (2-1) + (7-4) = 1 + 3 = 4
3184    r = [[1, 2], [4, 7]]
3185
3186    # Blue color channel.
3187    # sum row dif = 18 + 29 = 47
3188    # sum col dif = 7 + 18 = 25
3189    g = [[11, 18], [29, 47]]
3190
3191    # Green color channel.
3192    # sum row dif = 120 + 193 = 313
3193    # sum col dif = 47 + 120 = 167
3194    b = [[73, 120], [193, 313]]
3195
3196    # Combine the 3 color channels into a single 3-dim array.
3197    # The shape is (2, 2, 3) corresponding to (height, width and color).
3198    a = np.dstack((r, g, b))
3199
3200    # Total variation for this image.
3201    # Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
3202    tot_var = 564
3203
3204    # Calculate the total variation using TensorFlow and assert it is correct.
3205    self._test(a, tot_var)
3206
3207    # If we add 1 to all pixel-values then the total variation is unchanged.
3208    self._test(a + 1, tot_var)
3209
3210    # If we negate all pixel-values then the total variation is unchanged.
3211    self._test(-a, tot_var)
3212
3213    # Scale the pixel-values by a float. This scales the total variation as well.
3214    b = 1.1 * a
3215    self._test(b, 1.1 * tot_var)
3216
3217    # Scale by another float.
3218    c = 1.2 * a
3219    self._test(c, 1.2 * tot_var)
3220
3221    # Combine these 3 images into a single array of shape (3, 2, 2, 3)
3222    # where the first dimension is for the image-number.
3223    multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
3224
3225    # Check that TensorFlow correctly calculates the total variation
3226    # for each image individually and returns the correct array.
3227    self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
3228
3229
3230class FormatTest(test_util.TensorFlowTestCase):
3231
3232  def testFormats(self):
3233    prefix = "tensorflow/core/lib"
3234    paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
3235             "gif/testdata/lena.gif")
3236    decoders = {
3237        "jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
3238        "png": functools.partial(image_ops.decode_png, channels=3),
3239        "gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
3240    }
3241    with self.test_session():
3242      for path in paths:
3243        contents = io_ops.read_file(os.path.join(prefix, path)).eval()
3244        images = {}
3245        for name, decode in decoders.items():
3246          image = decode(contents).eval()
3247          self.assertEqual(image.ndim, 3)
3248          for prev_name, prev in images.items():
3249            print("path %s, names %s %s, shapes %s %s" %
3250                  (path, name, prev_name, image.shape, prev.shape))
3251            self.assertAllEqual(image, prev)
3252          images[name] = image
3253
3254  def testError(self):
3255    path = "tensorflow/core/lib/gif/testdata/scan.gif"
3256    with self.test_session():
3257      for decode in image_ops.decode_jpeg, image_ops.decode_png:
3258        with self.assertRaisesOpError(r"Got 12 frames"):
3259          decode(io_ops.read_file(path)).eval()
3260
3261
3262class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
3263
3264  def testSelectFromThreeClusters(self):
3265    boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
3266                [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
3267    scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
3268    max_output_size_np = 3
3269    iou_threshold_np = 0.5
3270    with self.test_session():
3271      boxes = constant_op.constant(boxes_np)
3272      scores = constant_op.constant(scores_np)
3273      max_output_size = constant_op.constant(max_output_size_np)
3274      iou_threshold = constant_op.constant(iou_threshold_np)
3275      selected_indices = image_ops.non_max_suppression(
3276          boxes, scores, max_output_size, iou_threshold).eval()
3277      self.assertAllClose(selected_indices, [3, 0, 5])
3278
3279  def testInvalidShape(self):
3280    # The boxes should be 2D of shape [num_boxes, 4].
3281    with self.assertRaisesRegexp(ValueError,
3282                                 "Shape must be rank 2 but is rank 1"):
3283      boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
3284      scores = constant_op.constant([0.9])
3285      image_ops.non_max_suppression(boxes, scores, 3, 0.5)
3286
3287    with self.assertRaisesRegexp(ValueError, "Dimension must be 4 but is 3"):
3288      boxes = constant_op.constant([[0.0, 0.0, 1.0]])
3289      scores = constant_op.constant([0.9])
3290      image_ops.non_max_suppression(boxes, scores, 3, 0.5)
3291
3292    # The boxes is of shape [num_boxes, 4], and the scores is
3293    # of shape [num_boxes]. So an error will thrown.
3294    with self.assertRaisesRegexp(
3295        ValueError, 'Dimensions must be equal, but are 1 and 2'):
3296      boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
3297      scores = constant_op.constant([0.9, 0.75])
3298      selected_indices = image_ops.non_max_suppression(
3299          boxes, scores, 3, 0.5)
3300
3301    # The scores should be 1D of shape [num_boxes].
3302    with self.assertRaisesRegexp(ValueError,
3303                                 "Shape must be rank 1 but is rank 2"):
3304      boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
3305      scores = constant_op.constant([[0.9]])
3306      image_ops.non_max_suppression(boxes, scores, 3, 0.5)
3307
3308    # The max_output_size should be a scaler (0-D).
3309    with self.assertRaisesRegexp(ValueError,
3310                                 "Shape must be rank 0 but is rank 1"):
3311      boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
3312      scores = constant_op.constant([0.9])
3313      image_ops.non_max_suppression(boxes, scores, [3], 0.5)
3314
3315    # The iou_threshold should be a scaler (0-D).
3316    with self.assertRaisesRegexp(ValueError,
3317                                 "Shape must be rank 0 but is rank 2"):
3318      boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
3319      scores = constant_op.constant([0.9])
3320      image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])
3321
3322
3323if __name__ == "__main__":
3324  googletest.main()
3325