1# Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Tests for head.""" 16 17from __future__ import absolute_import 18from __future__ import division 19from __future__ import print_function 20 21from tensorflow.contrib.timeseries.python.timeseries import feature_keys 22from tensorflow.contrib.timeseries.python.timeseries import head as ts_head_lib 23from tensorflow.contrib.timeseries.python.timeseries import model 24from tensorflow.contrib.timeseries.python.timeseries import state_management 25 26from tensorflow.python.estimator import estimator_lib 27from tensorflow.python.framework import dtypes 28from tensorflow.python.framework import ops 29from tensorflow.python.ops import array_ops 30from tensorflow.python.ops import math_ops 31from tensorflow.python.ops import metrics 32from tensorflow.python.ops import variables 33from tensorflow.python.platform import test 34from tensorflow.python.training import coordinator as coordinator_lib 35from tensorflow.python.training import queue_runner_impl 36from tensorflow.python.training import training as train 37 38 39class HeadTest(test.TestCase): 40 41 def test_labels_provided_error(self): 42 model_fn = _stub_model_fn() 43 for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL, 44 estimator_lib.ModeKeys.PREDICT]: 45 with self.assertRaisesRegexp(ValueError, "labels"): 46 model_fn(features={}, labels={"a": "b"}, mode=mode) 47 48 def test_unknown_mode(self): 49 model_fn = _stub_model_fn() 50 with self.assertRaisesRegexp(ValueError, "Unknown mode 'Not a mode'"): 51 model_fn(features={}, labels={}, mode="Not a mode") 52 53 54class _TickerModel(object): 55 num_features = 1 56 dtype = dtypes.float32 57 58 def initialize_graph(self, input_statistics): 59 pass 60 61 def define_loss(self, features, mode): 62 del mode # unused 63 return model.ModelOutputs( 64 loss=features["ticker"], 65 end_state=(features["ticker"], features["ticker"]), 66 prediction_times=array_ops.zeros(()), 67 predictions={"ticker": features["ticker"]}) 68 69 70class EvaluationMetricsTests(test.TestCase): 71 72 def test_metrics_consistent(self): 73 # Tests that the identity metrics used to report in-sample predictions match 74 # the behavior of standard metrics. 75 g = ops.Graph() 76 with g.as_default(): 77 features = { 78 feature_keys.TrainEvalFeatures.TIMES: 79 array_ops.zeros((1, 1)), 80 feature_keys.TrainEvalFeatures.VALUES: 81 array_ops.zeros((1, 1, 1)), 82 "ticker": 83 array_ops.reshape( 84 math_ops.cast( 85 variables.Variable( 86 name="ticker", 87 initial_value=0, 88 dtype=dtypes.int64, 89 collections=[ops.GraphKeys.LOCAL_VARIABLES]) 90 .count_up_to(10), 91 dtype=dtypes.float32), (1, 1, 1)) 92 } 93 model_fn = ts_head_lib.time_series_regression_head( 94 model=_TickerModel(), 95 state_manager=state_management.PassthroughStateManager(), 96 optimizer=train.GradientDescentOptimizer(0.001)).create_estimator_spec 97 outputs = model_fn( 98 features=features, labels=None, mode=estimator_lib.ModeKeys.EVAL) 99 metric_update_ops = [ 100 metric[1] for metric in outputs.eval_metric_ops.values()] 101 loss_mean, loss_update = metrics.mean(outputs.loss) 102 metric_update_ops.append(loss_update) 103 with self.test_session() as sess: 104 coordinator = coordinator_lib.Coordinator() 105 queue_runner_impl.start_queue_runners(sess, coord=coordinator) 106 variables.local_variables_initializer().run() 107 sess.run(metric_update_ops) 108 loss_evaled, metric_evaled, nested_metric_evaled = sess.run( 109 (loss_mean, outputs.eval_metric_ops["ticker"][0], 110 outputs.eval_metric_ops[feature_keys.FilteringResults.STATE_TUPLE][ 111 0][0])) 112 # The custom model_utils metrics for in-sample predictions should be in 113 # sync with the Estimator's mean metric for model loss. 114 self.assertAllClose(0., loss_evaled) 115 self.assertAllClose((((0.,),),), metric_evaled) 116 self.assertAllClose((((0.,),),), nested_metric_evaled) 117 coordinator.request_stop() 118 coordinator.join() 119 120 121class _StubModel(object): 122 num_features = 3 123 dtype = dtypes.float64 124 125 def initialize_graph(self, input_statistics): 126 del input_statistics # unused 127 128 129def _stub_model_fn(): 130 return ts_head_lib.time_series_regression_head( 131 model=_StubModel(), 132 state_manager=state_management.PassthroughStateManager(), 133 optimizer=train.AdamOptimizer(0.001)).create_estimator_spec 134 135 136class TrainEvalFeatureCheckingTests(test.TestCase): 137 138 def test_no_time_feature(self): 139 model_fn = _stub_model_fn() 140 for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]: 141 with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format( 142 feature_keys.TrainEvalFeatures.TIMES)): 143 model_fn( 144 features={feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]}, 145 labels=None, 146 mode=mode) 147 148 def test_no_value_feature(self): 149 model_fn = _stub_model_fn() 150 for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]: 151 with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format( 152 feature_keys.TrainEvalFeatures.VALUES)): 153 model_fn( 154 features={feature_keys.TrainEvalFeatures.TIMES: [[1]]}, 155 labels=None, 156 mode=mode) 157 158 def test_bad_time_rank(self): 159 model_fn = _stub_model_fn() 160 for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]: 161 with self.assertRaisesRegexp(ValueError, 162 "Expected shape.*for feature '{}'".format( 163 feature_keys.TrainEvalFeatures.TIMES)): 164 model_fn( 165 features={ 166 feature_keys.TrainEvalFeatures.TIMES: [[[1]]], 167 feature_keys.TrainEvalFeatures.VALUES: [[[1.]]] 168 }, 169 labels=None, 170 mode=mode) 171 172 def test_bad_value_rank(self): 173 model_fn = _stub_model_fn() 174 for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]: 175 with self.assertRaisesRegexp(ValueError, 176 "Expected shape.*for feature '{}'".format( 177 feature_keys.TrainEvalFeatures.VALUES)): 178 model_fn( 179 features={ 180 feature_keys.TrainEvalFeatures.TIMES: [[1]], 181 feature_keys.TrainEvalFeatures.VALUES: [[1.]] 182 }, 183 labels=None, 184 mode=mode) 185 186 def test_bad_value_num_features(self): 187 model_fn = _stub_model_fn() 188 for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]: 189 with self.assertRaisesRegexp( 190 ValueError, "Expected shape.*, 3.*for feature '{}'".format( 191 feature_keys.TrainEvalFeatures.VALUES)): 192 model_fn( 193 features={ 194 feature_keys.TrainEvalFeatures.TIMES: [[1]], 195 feature_keys.TrainEvalFeatures.VALUES: [[[1.]]] 196 }, 197 labels=None, 198 mode=mode) 199 200 def test_bad_exogenous_shape(self): 201 model_fn = _stub_model_fn() 202 for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]: 203 with self.assertRaisesRegexp( 204 ValueError, 205 "Features must have shape.*for feature 'exogenous'"): 206 model_fn( 207 features={ 208 feature_keys.TrainEvalFeatures.TIMES: [[1]], 209 feature_keys.TrainEvalFeatures.VALUES: [[[1., 2., 3.]]], 210 "exogenous": [[1], [2]] 211 }, 212 labels=None, 213 mode=mode) 214 215 216class PredictFeatureCheckingTests(test.TestCase): 217 218 def test_no_time_feature(self): 219 model_fn = _stub_model_fn() 220 with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format( 221 feature_keys.PredictionFeatures.TIMES)): 222 model_fn( 223 features={ 224 feature_keys.PredictionFeatures.STATE_TUPLE: ([[[1.]]], 1.) 225 }, 226 labels=None, 227 mode=estimator_lib.ModeKeys.PREDICT) 228 229 def test_no_start_state_feature(self): 230 model_fn = _stub_model_fn() 231 with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format( 232 feature_keys.PredictionFeatures.STATE_TUPLE)): 233 model_fn( 234 features={feature_keys.PredictionFeatures.TIMES: [[1]]}, 235 labels=None, 236 mode=estimator_lib.ModeKeys.PREDICT) 237 238 def test_bad_time_rank(self): 239 model_fn = _stub_model_fn() 240 with self.assertRaisesRegexp(ValueError, 241 "Expected shape.*for feature '{}'".format( 242 feature_keys.PredictionFeatures.TIMES)): 243 model_fn( 244 features={ 245 feature_keys.PredictionFeatures.TIMES: 1, 246 feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.)) 247 }, 248 labels=None, 249 mode=estimator_lib.ModeKeys.PREDICT) 250 251 def test_bad_exogenous_shape(self): 252 model_fn = _stub_model_fn() 253 with self.assertRaisesRegexp( 254 ValueError, 255 "Features must have shape.*for feature 'exogenous'"): 256 model_fn( 257 features={ 258 feature_keys.PredictionFeatures.TIMES: [[1]], 259 feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.)), 260 "exogenous": 1. 261 }, 262 labels=None, 263 mode=estimator_lib.ModeKeys.PREDICT) 264 265 266if __name__ == "__main__": 267 test.main() 268