pooling_ops_test.py revision f41959ccb2d9d4c722fe8fc3351401d53bcf4900
1"""Functional tests for pooling operations.""" 2import tensorflow.python.platform 3 4import numpy as np 5import tensorflow as tf 6 7from tensorflow.python.kernel_tests import gradient_checker as gc 8from tensorflow.python.ops import gen_nn_ops 9 10 11def GetInceptionMaxPoolShapes(): 12 """Iterator for some of the max pool ops in the Inception 2015 model. 13 14 Yields: 15 Tuple (name, input_size, filter_size, out_size, strides, padding) 16 """ 17 names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"] 18 input_sizes = [[32, 71, 71, 192], 19 [32, 35, 35, 288], [32, 17, 17, 1248], [32, 8, 8, 2048]] 20 filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1], 21 [1, 3, 3, 1], [1, 3, 3, 1]] 22 output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288], 23 [32, 8, 8, 1248], [32, 8, 8, 2048]] 24 strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1], 25 [1, 1, 1, 1]] 26 paddings = ["VALID", "VALID", "VALID", "SAME"] 27 for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes, 28 strides, paddings): 29 yield n, i, f, o, s, p 30 31 32class PoolingTest(tf.test.TestCase): 33 34 def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding, 35 expected, use_gpu): 36 """Verifies the output values of the pooling function. 37 38 Args: 39 pool_func: Function to be called, co.MaxPool, co.AvgPool, 40 or the Lua version. 41 input_sizes: Input tensor dimensions. 42 ksize: The kernel size dimensions 43 strides: The stride dimensions 44 padding: Padding type. 45 expected: An array containing the expected operation outputs. 46 use_gpu: Whether we are running on GPU. 47 """ 48 total_size = 1 49 for s in input_sizes: 50 total_size *= s 51 # Initializes the input tensor with array containing incrementing 52 # numbers from 1. 53 x = [f * 1.0 for f in range(1, total_size + 1)] 54 with self.test_session(use_gpu=use_gpu) as sess: 55 t = tf.constant(x, shape=input_sizes) 56 t = pool_func(t, ksize=ksize, strides=strides, padding=padding) 57 actual = t.eval() 58 self.assertAllClose(expected, actual.flatten()) 59 self.assertShapeEqual(actual, t) 60 61 def _testAvgPoolValidPadding(self, use_gpu): 62 expected_output = [7.0, 8.0, 9.0] 63 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3], 64 ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 65 padding="VALID", 66 expected=expected_output, use_gpu=use_gpu) 67 68 def _testAvgPoolSamePadding(self, use_gpu): 69 expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5] 70 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 4, 3], 71 ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 72 padding="SAME", 73 expected=expected_output, use_gpu=use_gpu) 74 75 def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu): 76 # input is: 77 # [1.0, 2.0 78 # 3.0 4.0] 79 # 80 # Window of [x, x] should do: 81 # [avg(1.0, 2.0), avg(2.0, padded0), 82 # avg(3.0, 4.0), avg(4.0, padded0)] 83 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1], 84 ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], 85 padding="SAME", 86 expected=[1.5, 2.0, 3.5, 4.0], use_gpu=use_gpu) 87 88 # Window of [x, 89 # x] should do: 90 # [avg(1.0, 3.0), avg(2.0, 4.0) 91 # avg(3.0, padded0), avg(4.0, padded0)] 92 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1], 93 ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1], 94 padding="SAME", 95 expected=[2.0, 3.0, 3.0, 4.0], use_gpu=use_gpu) 96 97 def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu): 98 self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2], 99 ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], 100 padding="SAME", 101 expected=[2.0, 3.0, 3.0, 4.0, 102 6.0, 7.0, 7.0, 8.0, 103 10.0, 11.0, 11.0, 12.0, 104 14.0, 15.0, 15.0, 16.0], 105 use_gpu=use_gpu) 106 self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2], 107 ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1], 108 padding="SAME", 109 expected=[3.0, 4.0, 5.0, 6.0, 110 5.0, 6.0, 7.0, 8.0, 111 11.0, 12.0, 13.0, 14.0, 112 13.0, 14.0, 15.0, 16.0], 113 use_gpu=use_gpu) 114 115 def _testAvgPoolValidPaddingUnevenStride(self, use_gpu): 116 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3], 117 ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], 118 padding="VALID", 119 expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0], 120 use_gpu=use_gpu) 121 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3], 122 ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1], 123 padding="VALID", 124 expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0], 125 use_gpu=use_gpu) 126 127 def _testAvgPoolSamePadding4(self, use_gpu): 128 expected_output = [11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 129 44.0, 45.0, 46.0, 51.0, 52.0, 53.0, 54.0] 130 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4], 131 ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 132 padding="SAME", 133 expected=expected_output, use_gpu=use_gpu) 134 135 def _testAvgPoolSamePaddingPacket4(self, use_gpu): 136 expected_output = [21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 137 45.0, 46.0, 47.0, 48.0, 51.0, 52.0, 53.0, 54.0] 138 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4], 139 ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], 140 padding="SAME", 141 expected=expected_output, use_gpu=use_gpu) 142 143 def _testAvgPoolSamePaddingPacket8(self, use_gpu): 144 expected_output = [73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 89.0, 145 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 105.0, 106.0, 146 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 117.0, 118.0, 147 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 201.0, 202.0, 148 203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 217.0, 218.0, 149 219.0, 220.0, 221.0, 222.0, 223.0, 224.0, 233.0, 234.0, 150 235.0, 236.0, 237.0, 238.0, 239.0, 240.0, 245.0, 246.0, 151 247.0, 248.0, 249.0, 250.0, 251.0, 252.0, 329.0, 330.0, 152 331.0, 332.0, 333.0, 334.0, 335.0, 336.0, 345.0, 346.0, 153 347.0, 348.0, 349.0, 350.0, 351.0, 352.0, 361.0, 362.0, 154 363.0, 364.0, 365.0, 366.0, 367.0, 368.0, 373.0, 374.0, 155 375.0, 376.0, 377.0, 378.0, 379.0, 380.0, 425.0, 426.0, 156 427.0, 428.0, 429.0, 430.0, 431.0, 432.0, 441.0, 442.0, 157 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 457.0, 458.0, 158 459.0, 460.0, 461.0, 462.0, 463.0, 464.0, 469.0, 470.0, 159 471.0, 472.0, 473.0, 474.0, 475.0, 476.0] 160 self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 8, 8, 8], 161 ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], 162 padding="SAME", 163 expected=expected_output, use_gpu=use_gpu) 164 165 def testAvgPooling(self): 166 for use_gpu in True, False: 167 self._testAvgPoolValidPadding(use_gpu) 168 self._testAvgPoolSamePadding(use_gpu) 169 self._testAvgPoolSamePaddingNonSquareWindow(use_gpu) 170 self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu) 171 self._testAvgPoolValidPaddingUnevenStride(use_gpu) 172 self._testAvgPoolSamePadding4(use_gpu) 173 self._testAvgPoolSamePaddingPacket4(use_gpu) 174 self._testAvgPoolSamePaddingPacket8(use_gpu) 175 176 def _testMaxPoolValidPadding(self, use_gpu): 177 expected_output = [13.0, 14.0, 15.0] 178 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 3, 3, 3], 179 ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 180 padding="VALID", 181 expected=expected_output, use_gpu=use_gpu) 182 183 def _testMaxPoolSamePadding(self, use_gpu): 184 expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0] 185 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 3, 3], 186 ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 187 padding="SAME", 188 expected=expected_output, use_gpu=use_gpu) 189 190 def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu): 191 # input is: 192 # [1.0, 2.0 193 # 3.0 4.0] 194 # 195 # Window of [x, x] should do: 196 # 197 # [max(1.0, 2.0), max(2.0, padded0), 198 # max(3.0, 4.0), max(4.0, padded0)] 199 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 1], 200 ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], 201 padding="SAME", 202 expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu) 203 204 def _testMaxPoolValidPaddingUnevenStride(self, use_gpu): 205 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1], 206 ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], 207 padding="VALID", 208 expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0], 209 use_gpu=use_gpu) 210 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1], 211 ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1], 212 padding="VALID", 213 expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0], 214 use_gpu=use_gpu) 215 216 def _testMaxPoolSamePaddingPacket4(self, use_gpu): 217 expected_output = [21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 218 54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0] 219 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 4], 220 ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 221 padding="SAME", 222 expected=expected_output, use_gpu=use_gpu) 223 224 def _testMaxPoolSamePaddingPacket8(self, use_gpu): 225 expected_output = [145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 226 161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 227 177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0, 228 185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0, 229 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0, 230 289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 231 305.0, 306.0, 307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 232 313.0, 314.0, 315.0, 316.0, 317.0, 318.0, 319.0, 320.0, 233 401.0, 402.0, 403.0, 404.0, 405.0, 406.0, 407.0, 408.0, 234 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0, 235 433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 236 441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 237 465.0, 466.0, 467.0, 468.0, 469.0, 470.0, 471.0, 472.0, 238 481.0, 482.0, 483.0, 484.0, 485.0, 486.0, 487.0, 488.0, 239 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0, 240 505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0] 241 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 8, 8, 8], 242 ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], 243 padding="SAME", 244 expected=expected_output, use_gpu=use_gpu) 245 246 def testMaxPooling(self): 247 for use_gpu in True, False: 248 self._testMaxPoolValidPadding(use_gpu) 249 self._testMaxPoolSamePadding(use_gpu) 250 self._testMaxPoolSamePaddingNonSquareWindow(use_gpu) 251 self._testMaxPoolValidPaddingUnevenStride(use_gpu) 252 self._testMaxPoolSamePaddingPacket4(use_gpu) 253 self._testMaxPoolSamePaddingPacket8(use_gpu) 254 255 # Tests for DepthwiseMaxPooling on CPU only. 256 def testDepthwiseMaxPool1x1DepthWindow1(self): 257 # input is: 258 # [1.0, ..., 10.0] along depth, 259 # 260 # We maxpool by depth in patches of 2. 261 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 1, 1, 10], 262 ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2], 263 padding="SAME", 264 expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False) 265 266 def testDepthwiseMaxPool2x2DepthWindow3(self): 267 # input is: 268 # 269 # a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2 270 # output. Each node has contiguous values, so the depthwise max 271 # should be multiples of 3.0. 272 self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 6], 273 ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3], 274 padding="SAME", 275 expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0], 276 use_gpu=False) 277 278 def _testDepthwiseMaxPoolInvalidConfig(self, in_size, ksize, strides, 279 error_msg, use_gpu=False): 280 t = tf.constant(1.0, shape=in_size) 281 with self.assertRaisesRegexp(ValueError, error_msg): 282 t = tf.nn.max_pool(t, ksize=ksize, strides=strides, padding="SAME") 283 284 def testDepthwiseMaxPoolInvalidConfigs(self): 285 self._testDepthwiseMaxPoolInvalidConfig( 286 [1, 2, 2, 4], [1, 2, 2, 2], 287 [1, 1, 1, 2], "exactly one of pooling across depth") 288 self._testDepthwiseMaxPoolInvalidConfig( 289 [1, 2, 2, 4], [1, 1, 1, 2], 290 [1, 1, 1, 1], "depth window to equal the depth stride") 291 self._testDepthwiseMaxPoolInvalidConfig( 292 [1, 2, 2, 4], [1, 1, 1, 3], 293 [1, 1, 1, 3], "evenly divide") 294 if tf.test.IsBuiltWithCuda(): 295 with self.test_session(use_gpu=True): 296 t = tf.constant(1.0, shape=[1, 2, 2, 4]) 297 with self.assertRaisesOpError("for CPU devices"): 298 tf.nn.max_pool(t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2], 299 padding="SAME").eval() 300 301 # The following are tests that verify that the CPU and GPU implementations 302 # produce the same resuts. 303 def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding): 304 tensor_input = np.random.rand(*input_shape).astype(np.float32) 305 with self.test_session(use_gpu=True): 306 t = tf.constant(tensor_input, shape=input_shape) 307 out_op, _ = tf.nn.max_pool_with_argmax(t, ksize, strides, padding) 308 gpu_val = out_op.eval() 309 with self.test_session(use_gpu=False): 310 t = tf.constant(tensor_input, shape=input_shape) 311 out_op = tf.nn.max_pool(t, ksize, strides, padding) 312 cpu_val = out_op.eval() 313 self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5) 314 315 def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides, 316 padding): 317 # Generate numbers in a narrow range, so that there are many duplicates 318 # in the input. 319 tensor_input = np.random.random_integers(0, 3, 320 input_shape).astype(np.float32) 321 tensor_output = np.random.rand(*output_shape).astype(np.float32) 322 with self.test_session(use_gpu=True): 323 t = tf.constant(tensor_input, shape=input_shape) 324 _, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding) 325 argmax = argmax_op.eval() 326 grad_in = tf.constant(tensor_output, shape=output_shape) 327 out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax, 328 ksize, strides, padding) 329 gpu_val = out_op.eval() 330 self.assertShapeEqual(gpu_val, out_op) 331 with self.test_session(use_gpu=False): 332 t = tf.constant(tensor_input, shape=input_shape) 333 out_op = tf.nn.max_pool(t, ksize, strides, padding) 334 orig_out = out_op.eval() 335 grad_in = tf.constant(tensor_output, shape=output_shape) 336 out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize, 337 strides, padding) 338 cpu_val = out_op.eval() 339 self.assertShapeEqual(cpu_val, out_op) 340 self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5) 341 342 def testMaxPoolingWithArgmax(self): 343 # MaxPoolWithArgMax is implemented only on GPU. 344 if not tf.test.IsBuiltWithCuda(): 345 return 346 tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0] 347 with self.test_session(use_gpu=True) as sess: 348 t = tf.constant(tensor_input, shape=[1, 3, 3, 1]) 349 out_op, argmax_op = tf.nn.max_pool_with_argmax(t, 350 ksize=[1, 2, 2, 1], 351 strides=[1, 1, 1, 1], 352 Targmax=tf.int64, 353 padding="VALID") 354 out, argmax = sess.run([out_op, argmax_op]) 355 self.assertShapeEqual(out, out_op) 356 self.assertShapeEqual(argmax, argmax_op) 357 self.assertAllClose(out.ravel(), [1.0, 1.0, 1.0, 1.0]) 358 self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5]) 359 360 def testMaxPoolingGradWithArgmax(self): 361 # MaxPoolWithArgMax is implemented only on GPU. 362 if not tf.test.IsBuiltWithCuda(): 363 return 364 orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0] 365 tensor_input = [11.0, 12.0, 13.0, 14.0] 366 tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64)) 367 with self.test_session(use_gpu=True) as sess: 368 orig_in = tf.constant(orig_input, shape=[1, 3, 3, 1]) 369 t = tf.constant(tensor_input, shape=[1, 2, 2, 1]) 370 argmax = tf.constant(tensor_argmax, shape=[1, 2, 2, 1], 371 dtype=tf.int64) 372 out_op = gen_nn_ops._max_pool_grad_with_argmax(orig_in, t, argmax, 373 ksize=[1, 2, 2, 1], 374 strides=[1, 1, 1, 1], 375 padding="VALID") 376 out = out_op.eval().flatten() 377 self.assertAllClose(out, [11.0, 12.0, 0.0, 13.0, 0.0, 378 14.0, 0.0, 0.0, 0.0]) 379 380 def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes, 381 window_rows, window_cols, row_stride, 382 col_stride, padding, use_gpu, 383 x_init_value=None): 384 """Verifies the gradients of the avg pooling function. 385 386 Args: 387 pool_func: Function to be called, co.MaxPool, co.AvgPool, 388 or the Lua version. 389 input_sizes: Input tensor dimensions. 390 output_sizes: Output tensor dimensions. 391 window_rows: kernel size in row dim 392 window_cols: kernel size in col dim 393 row_stride: Row Stride. 394 col_stride: Col Stride. 395 padding: Padding type. 396 use_gpu: whether we are running on GPU 397 x_init_value: Values to be passed to the gradient checker. 398 """ 399 total_size = 1 400 for s in input_sizes: 401 total_size *= s 402 # Initializes the input tensor with array containing incrementing 403 # numbers from 1. 404 x = [f * 1.0 for f in range(1, total_size + 1)] 405 with self.test_session(use_gpu=use_gpu): 406 input_tensor = tf.constant(x, shape=input_sizes, name="input") 407 if pool_func == tf.nn.avg_pool: 408 func_name = "avg_pool" 409 err_margin = 1e-4 410 else: 411 if x_init_value is None: 412 x_init_value = np.asfarray( 413 np.arange(1, total_size + 1), 414 dtype=np.float32).reshape(input_sizes) 415 func_name = "max_pool" 416 err_margin = 1e-3 417 t = pool_func(input_tensor, ksize=[1, window_rows, window_rows, 1], 418 strides=[1, row_stride, col_stride, 1], 419 padding=padding, name=func_name) 420 err = gc.ComputeGradientError( 421 input_tensor, input_sizes, t, output_sizes, 422 x_init_value=x_init_value, delta=1e-2) 423 print "%s gradient error = " % func_name, err 424 self.assertLess(err, err_margin) 425 426 def _testMaxPoolGradValidPadding1_1(self, use_gpu): 427 self._ConstructAndTestGradient( 428 tf.nn.max_pool, input_sizes=[1, 3, 3, 1], 429 output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1, 430 col_stride=1, padding="VALID", use_gpu=use_gpu) 431 432 def _testMaxPoolGradValidPadding2_1_6(self, use_gpu): 433 self._ConstructAndTestGradient( 434 tf.nn.max_pool, input_sizes=[2, 6, 6, 3], 435 output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1, 436 col_stride=1, padding="VALID", use_gpu=use_gpu) 437 438 def _testMaxPoolGradValidPadding2_1_7(self, use_gpu): 439 self._ConstructAndTestGradient( 440 tf.nn.max_pool, input_sizes=[2, 7, 7, 3], 441 output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1, 442 col_stride=1, padding="VALID", use_gpu=use_gpu) 443 444 def _testMaxPoolGradValidPadding2_2(self, use_gpu): 445 self._ConstructAndTestGradient( 446 tf.nn.max_pool, input_sizes=[2, 2, 2, 3], 447 output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2, 448 col_stride=2, padding="VALID", use_gpu=use_gpu) 449 450 def _testMaxPoolGradSamePadding1_1(self, use_gpu): 451 self._ConstructAndTestGradient( 452 tf.nn.max_pool, input_sizes=[2, 2, 4, 3], 453 output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1, 454 col_stride=1, padding="SAME", use_gpu=use_gpu) 455 456 def _testMaxPoolGradSamePadding2_1(self, use_gpu): 457 self._ConstructAndTestGradient( 458 tf.nn.max_pool, input_sizes=[2, 2, 4, 3], 459 output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1, 460 col_stride=1, padding="SAME", use_gpu=use_gpu) 461 462 def _testMaxPoolGradSamePadding2_2(self, use_gpu): 463 self._ConstructAndTestGradient( 464 tf.nn.max_pool, input_sizes=[2, 2, 4, 3], 465 output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2, 466 col_stride=2, padding="SAME", use_gpu=use_gpu) 467 468 def _testMaxPoolGradSamePadding3_1(self, use_gpu): 469 self._ConstructAndTestGradient( 470 tf.nn.max_pool, input_sizes=[1, 7, 7, 1], 471 output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1, 472 col_stride=1, padding="SAME", use_gpu=use_gpu) 473 474 def testMaxPoolGrad(self): 475 for use_gpu in True, False: 476 self._testMaxPoolGradValidPadding1_1(use_gpu=use_gpu) 477 self._testMaxPoolGradValidPadding2_1_6(use_gpu=use_gpu) 478 self._testMaxPoolGradValidPadding2_1_7(use_gpu=use_gpu) 479 self._testMaxPoolGradValidPadding2_2(use_gpu=use_gpu) 480 self._testMaxPoolGradSamePadding1_1(use_gpu=use_gpu) 481 self._testMaxPoolGradSamePadding2_1(use_gpu=use_gpu) 482 self._testMaxPoolGradSamePadding2_2(use_gpu=use_gpu) 483 self._testMaxPoolGradSamePadding3_1(use_gpu=use_gpu) 484 485 def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows, 486 window_cols, row_stride, col_stride, padding): 487 """Max Pooling Gradient. 488 489 Args: 490 orig_input: A float Tensor. The original input tensor. 491 orig_output: A float Tensor. The original output tensor. 492 grad: A float Tensor. 493 The 4D (batch x rows x cols x depth) output backprop. 494 window_rows: integer. Kernel size along rows dimension. 495 window_cols: integer. Kernel size along cols dimension. 496 row_stride: integer. Stride along rows dimension 497 col_stride: integer. Stride along cols dimension 498 padding: PoolingOpDef.Padding. Padding type. 499 500 Returns: 501 A Tensor. 502 """ 503 return gen_nn_ops._max_pool_grad( 504 orig_input, orig_output, grad, 505 [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], 506 padding) 507 508 def _testMaxPoolGradDirect(self, input_data, output_backprop, 509 expected_input_backprop, input_sizes, output_sizes, 510 window_rows, window_cols, row_stride, col_stride, 511 padding, use_gpu): 512 with self.test_session(use_gpu=use_gpu) as sess: 513 input_tensor = tf.constant(input_data, shape=input_sizes) 514 output_tensor = tf.nn.max_pool( 515 input_tensor, [1, window_rows, window_cols, 1], 516 [1, row_stride, col_stride, 1], padding) 517 output_backprop_tensor = tf.constant(output_backprop, 518 shape=output_sizes) 519 520 input_backprop_tensor = self._MaxPoolGrad( 521 input_tensor, output_tensor, output_backprop_tensor, 522 window_rows, window_cols, row_stride, col_stride, padding) 523 524 actual_input_backprop = input_backprop_tensor.eval() 525 self.assertShapeEqual(actual_input_backprop, input_backprop_tensor) 526 actual_input_backprop = actual_input_backprop.flatten() 527 actual_input_backprop = self._GetNdArray(actual_input_backprop) 528 529 actual_output = output_tensor.eval().flatten() 530 actual_output = self._GetNdArray(actual_output) 531 532 self.assertAllClose(expected_input_backprop, actual_input_backprop, 533 rtol=1e-6, atol=1e-6) 534 535 def _testMaxPoolGradDirect1_1(self): 536 input_data = [ 537 1.0, 1.0, 1.0, 1.0, 538 1.0, 1.0, 1.0, 1.0, 539 1.0, 1.0, 1.0, 1.0, 540 1.0, 1.0, 1.0, 1.0] 541 output_backprop = [ 542 11.0, 12.0, 13.0, 543 15.0, 16.0, 17.0, 544 19.0, 20.0, 21.0] 545 expected_input_backprop = [ 546 11.0, 12.0, 13.0, 0.0, 547 15.0, 16.0, 17.0, 0.0, 548 19.0, 20.0, 21.0, 0.0, 549 0.0, 0.0, 0.0, 0.0] 550 551 for use_gpu in True, False: 552 self._testMaxPoolGradDirect( 553 input_data, output_backprop, expected_input_backprop, 554 input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], 555 window_rows=2, window_cols=2, row_stride=1, col_stride=1, 556 padding="VALID", use_gpu=use_gpu) 557 558 def _testMaxPoolGradDirect1_2(self): 559 input_data = [ 560 1.0, 0.0, 1.0, 0.0, 561 0.0, 1.0, 0.0, 1.0, 562 1.0, 0.0, 1.0, 0.0, 563 0.0, 1.0, 0.0, 1.0] 564 output_backprop = [ 565 11.0, 12.0, 13.0, 566 15.0, 16.0, 17.0, 567 19.0, 20.0, 21.0] 568 expected_input_backprop = [ 569 11.0, 0.0, 25.0, 0.0, 570 0.0, 31.0, 0.0, 17.0, 571 19.0, 0.0, 41.0, 0.0, 572 0.0, 0.0, 0.0, 0.0] 573 574 for use_gpu in True, False: 575 self._testMaxPoolGradDirect( 576 input_data, output_backprop, expected_input_backprop, 577 input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], 578 window_rows=2, window_cols=2, row_stride=1, col_stride=1, 579 padding="VALID", use_gpu=use_gpu) 580 581 def _testMaxPoolGradDirect1_3(self): 582 input_data = [ 583 1.0, 0.0, 1.0, 0.0, 584 0.0, 1.0, 0.0, 1.0, 585 1.0, 0.0, 1.0, 0.0, 586 0.0, 1.0, 0.0, 1.0,] 587 output_backprop = [ 588 11.0, 12.0, 13.0, 14.0, 589 15.0, 16.0, 17.0, 18.0, 590 19.0, 20.0, 21.0, 22.0, 591 23.0, 24.0, 25.0, 26.0] 592 expected_input_backprop = [ 593 54, 0.0, 62, 0.0, 594 0.0, 60, 0.0, 22.0, 595 47, 0.0, 51, 0.0, 596 0.0, 0.0, 0.0, 0.0,] 597 598 for use_gpu in True, False: 599 self._testMaxPoolGradDirect( 600 input_data, output_backprop, expected_input_backprop, 601 input_sizes=[1, 4, 4, 1], output_sizes=[1, 4, 4, 1], 602 window_rows=3, window_cols=3, row_stride=1, col_stride=1, 603 padding="SAME", use_gpu=use_gpu) 604 605 def _testMaxPoolGradDirectWithNans2_1(self): 606 input_data = [float("nan")] * 16 607 output_backprop = [ 608 11.0, 12.0, 13.0, 609 15.0, 16.0, 17.0, 610 19.0, 20.0, 21.0] 611 # Test the CPU implementation, which propagates diffs in case of NaN 612 expected_input_backprop_tf_cpu = [ 613 11.0, 12.0, 13.0, 0.0, 614 15.0, 16.0, 17.0, 0.0, 615 19.0, 20.0, 21.0, 0.0, 616 0.0, 0.0, 0.0, 0.0] 617 self._testMaxPoolGradDirect( 618 input_data, output_backprop, expected_input_backprop_tf_cpu, 619 input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], 620 window_rows=2, window_cols=2, row_stride=1, col_stride=1, 621 padding="VALID", use_gpu=False) 622 623 if not tf.test.IsBuiltWithCuda(): 624 return 625 626 # Test the GPU implementation that uses cudnn for now. 627 # It does not propagate the diff in cases of NaNs 628 expected_input_backprop_cudnn = [ 629 0.0, 0.0, 0.0, 0.0, 630 0.0, 0.0, 0.0, 0.0, 631 0.0, 0.0, 0.0, 0.0, 632 0.0, 0.0, 0.0, 0.0] 633 self._testMaxPoolGradDirect( 634 input_data, output_backprop, expected_input_backprop_cudnn, 635 input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], 636 window_rows=2, window_cols=2, row_stride=1, col_stride=1, 637 padding="VALID", use_gpu=True) 638 639 def _testMaxPoolGradDirectWithNans2_2(self): 640 input_data = [float("nan")] * 16 641 output_backprop = [ 642 float("nan"), 12.0, 13.0, 643 15.0, float("nan"), 17.0, 644 19.0, 20.0, float("nan")] 645 # Test the CPU implementation, which propagates diffs in case of NaN 646 expected_input_backprop_tf_cpu = [ 647 float("nan"), 12.0, 13.0, 0.0, 648 15.0, float("nan"), 17.0, 0.0, 649 19.0, 20.0, float("nan"), 0.0, 650 0.0, 0.0, 0.0, 0.0] 651 self._testMaxPoolGradDirect( 652 input_data, output_backprop, expected_input_backprop_tf_cpu, 653 input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], 654 window_rows=2, window_cols=2, row_stride=1, col_stride=1, 655 padding="VALID", use_gpu=False) 656 657 if not tf.test.IsBuiltWithCuda(): 658 return 659 660 # Test the GPU implementation that uses cudnn for now. 661 # It does not propagate the diff in cases of NaNs 662 expected_input_backprop_cudnn = [ 663 0.0, 0.0, 0.0, 0.0, 664 0.0, 0.0, 0.0, 0.0, 665 0.0, 0.0, 0.0, 0.0, 666 0.0, 0.0, 0.0, 0.0] 667 self._testMaxPoolGradDirect( 668 input_data, output_backprop, expected_input_backprop_cudnn, 669 input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], 670 window_rows=2, window_cols=2, row_stride=1, col_stride=1, 671 padding="VALID", use_gpu=True) 672 673 def testMaxPoolGradDirect(self): 674 self._testMaxPoolGradDirect1_1() 675 self._testMaxPoolGradDirect1_2() 676 self._testMaxPoolGradDirect1_3() 677 self._testMaxPoolGradDirectWithNans2_1() 678 self._testMaxPoolGradDirectWithNans2_2() 679 680 def testAvgPoolGrad(self): 681 for use_gpu in False, True: 682 self._testAvgPoolGradValidPadding1_1(use_gpu) 683 self._testAvgPoolGradValidPadding2_1(use_gpu) 684 self._testAvgPoolGradValidPadding2_2(use_gpu) 685 self._testAvgPoolGradSamePadding1_1(use_gpu) 686 self._testAvgPoolGradSamePadding2_1(use_gpu) 687 self._testAvgPoolGradSamePadding2_2(use_gpu) 688 self._testAvgPoolGradSamePadding3_1(use_gpu) 689 690 def _testAvgPoolGradValidPadding1_1(self, use_gpu): 691 self._ConstructAndTestGradient( 692 tf.nn.avg_pool, input_sizes=[2, 3, 3, 3], 693 output_sizes=[2, 3, 3, 3], window_rows=1, window_cols=1, row_stride=1, 694 col_stride=1, padding="VALID", use_gpu=use_gpu) 695 696 def _testAvgPoolGradValidPadding2_1(self, use_gpu): 697 self._ConstructAndTestGradient( 698 tf.nn.avg_pool, input_sizes=[2, 3, 3, 3], 699 output_sizes=[2, 2, 2, 3], window_rows=2, window_cols=2, row_stride=1, 700 col_stride=1, padding="VALID", use_gpu=use_gpu) 701 702 def _testAvgPoolGradValidPadding2_2(self, use_gpu): 703 self._ConstructAndTestGradient( 704 tf.nn.avg_pool, input_sizes=[2, 2, 2, 3], 705 output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2, 706 col_stride=2, padding="VALID", use_gpu=use_gpu) 707 708 def _testAvgPoolGradSamePadding1_1(self, use_gpu): 709 self._ConstructAndTestGradient( 710 tf.nn.avg_pool, input_sizes=[2, 2, 4, 3], 711 output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1, 712 col_stride=1, padding="SAME", use_gpu=use_gpu) 713 714 def _testAvgPoolGradSamePadding2_1(self, use_gpu): 715 self._ConstructAndTestGradient( 716 tf.nn.avg_pool, input_sizes=[2, 2, 4, 3], 717 output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1, 718 col_stride=1, padding="SAME", use_gpu=use_gpu) 719 720 def _testAvgPoolGradSamePadding2_2(self, use_gpu): 721 self._ConstructAndTestGradient( 722 tf.nn.avg_pool, input_sizes=[2, 2, 4, 3], 723 output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2, 724 col_stride=2, padding="SAME", use_gpu=use_gpu) 725 726 def _testAvgPoolGradSamePadding3_1(self, use_gpu): 727 self._ConstructAndTestGradient( 728 tf.nn.avg_pool, input_sizes=[1, 7, 7, 1], 729 output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1, 730 col_stride=1, padding="SAME", use_gpu=use_gpu) 731 732 def testShapeFunctionEdgeCases(self): 733 # All shapes unknown. 734 for pool_func in [tf.nn.max_pool, tf.nn.avg_pool]: 735 p = tf.nn.max_pool(tf.placeholder(tf.float32), 736 ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], 737 padding="SAME") 738 self.assertEqual([None, None, None, None], p.get_shape().as_list()) 739 p, am = tf.nn.max_pool_with_argmax( 740 tf.placeholder(tf.float32), 741 ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], 742 padding="SAME") 743 self.assertEqual([None, None, None, None], p.get_shape().as_list()) 744 self.assertEqual([None, None, None, None], am.get_shape().as_list()) 745 746 # Incorrect input shape. 747 for pool_func in [tf.nn.max_pool, tf.nn.avg_pool, 748 tf.nn.max_pool_with_argmax]: 749 with self.assertRaises(ValueError): 750 pool_func(tf.placeholder(tf.float32, shape=[1, 3]), 751 ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME") 752 753 # Illegal strides. 754 for pool_func in [tf.nn.max_pool, tf.nn.avg_pool, 755 tf.nn.max_pool_with_argmax]: 756 with self.assertRaisesRegexp(ValueError, "strides in the batch"): 757 pool_func(tf.placeholder(tf.float32), 758 ksize=[1, 1, 1, 1], strides=[2, 1, 1, 1], padding="SAME") 759 with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"): 760 tf.nn.avg_pool(tf.placeholder(tf.float32), 761 ksize=[1, 1, 1, 1], strides=[1, 1, 1, 2], padding="SAME") 762 763 # Filter larger than input. 764 for pool_func in [tf.nn.max_pool, tf.nn.avg_pool, 765 tf.nn.max_pool_with_argmax]: 766 with self.assertRaisesRegexp(ValueError, 767 "filter must not be larger than the input"): 768 pool_func(tf.placeholder(tf.float32, 769 shape=[32, 20, 20, 3]), 770 ksize=[1, 20, 21, 1], strides=[1, 1, 1, 1], padding="SAME") 771 with self.assertRaisesRegexp(ValueError, 772 "filter must not be larger than the input"): 773 pool_func(tf.placeholder(tf.float32, 774 shape=[32, 20, 20, 3]), 775 ksize=[1, 21, 20, 1], strides=[1, 1, 1, 1], padding="SAME") 776 777 # Stride larger than filter. 778 for pool_func in [tf.nn.max_pool, tf.nn.avg_pool, 779 tf.nn.max_pool_with_argmax]: 780 with self.assertRaisesRegexp( 781 ValueError, "stride must be less than or equal to filter"): 782 pool_func(tf.placeholder(tf.float32, 783 shape=[32, 20, 20, 3]), 784 ksize=[1, 5, 3, 1], strides=[1, 5, 5, 1], padding="SAME") 785 with self.assertRaisesRegexp( 786 ValueError, "stride must be less than or equal to filter"): 787 pool_func(tf.placeholder(tf.float32, 788 shape=[32, 20, 20, 3]), 789 ksize=[1, 3, 5, 1], strides=[1, 5, 5, 1], padding="SAME") 790 791 792def GetMaxPoolFwdTest(input_size, filter_size, strides, padding): 793 def Test(self): 794 # MaxPoolWithArgMax is implemented only on GPU. 795 if not tf.test.IsBuiltWithCuda(): 796 return 797 self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding) 798 return Test 799 800 801def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding): 802 def Test(self): 803 # MaxPoolWithArgMax is implemented only on GPU. 804 if not tf.test.IsBuiltWithCuda(): 805 return 806 self._CompareMaxPoolingBk(input_size, output_size, 807 filter_size, strides, padding) 808 return Test 809 810 811if __name__ == "__main__": 812 for (name_, input_size_, filter_size_, output_size_, stride_, 813 padding_) in GetInceptionMaxPoolShapes(): 814 setattr(PoolingTest, "testMaxPoolFwd_" + name_, 815 GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_)) 816 setattr(PoolingTest, "testMaxPoolGrad_" + name_, 817 GetMaxPoolGradTest(input_size_, filter_size_, output_size_, 818 stride_, padding_)) 819 tf.test.main() 820