Searched refs:labels (Results 1 - 25 of 665) sorted by relevance

1234567891011>>

/external/annotation-tools/asmx/src/org/objectweb/asm/tree/
H A DTableSwitchInsnNode.java66 public List labels; field in class:TableSwitchInsnNode
74 * @param labels beginnings of the handler blocks. <tt>labels[i]</tt> is
81 final Label[] labels)
87 this.labels = new ArrayList();
88 if (labels != null) {
89 this.labels.addAll(Arrays.asList(labels));
94 Label[] labels = new Label[this.labels
77 TableSwitchInsnNode( final int min, final int max, final Label dflt, final Label[] labels) argument
[all...]
H A DLookupSwitchInsnNode.java61 public List labels; field in class:LookupSwitchInsnNode
68 * @param labels beginnings of the handler blocks. <tt>labels[i]</tt> is
74 final Label[] labels)
79 this.labels = new ArrayList(labels == null ? 0 : labels.length);
85 if (labels != null) {
86 this.labels.addAll(Arrays.asList(labels));
71 LookupSwitchInsnNode( final Label dflt, final int[] keys, final Label[] labels) argument
[all...]
/external/tensorflow/tensorflow/contrib/nn/python/ops/
H A Dcross_entropy.py29 labels,
32 """Computes softmax cross entropy between `logits` and `labels`.
43 need not be. All that is required is that each row of `labels` is
47 If using exclusive `labels` (wherein one and only
54 `logits` and `labels` must have the same shape `[batch_size, num_classes]`
59 labels: Each row `labels[i]` must be a valid probability distribution.
68 labels=labels, logits=logits, dim=dim, name=name)
76 labels,
[all...]
/external/tensorflow/tensorflow/contrib/boosted_trees/python/utils/
H A Dlosses.py28 def per_example_logistic_loss(labels, weights, predictions):
29 """Logistic loss given labels, example weights and predictions.
32 labels: Rank 2 (N, 1) tensor of per-example labels.
40 labels = math_ops.to_float(labels)
42 labels=labels, logits=predictions)
49 def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):
56 labels
[all...]
/external/tensorflow/tensorflow/python/ops/
H A Dconfusion_matrix.py38 labels, predictions, expected_rank_diff=0, name=None):
45 But, for example, if `labels` contains class IDs and `predictions` contains 1
47 `labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
48 `labels` if `rank(predictions) - rank(labels) == 0`, and
49 `predictions` if `rank(predictions) - rank(labels) == 2`.
55 labels: Label values, a `Tensor` whose dimensions match `predictions`.
57 expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
61 Tuple of `labels` and `predictions`, possibly with last dim squeezed.
64 [labels, prediction
[all...]
H A Dmetrics_impl.py53 def _remove_squeezable_dimensions(predictions, labels, weights):
56 Squeezes last dim of `predictions` or `labels` if their rank differs by 1
68 labels: Optional label `Tensor` whose dimensions match `predictions`.
73 Tuple of `predictions`, `labels` and `weights`. Each of them possibly has
77 if labels is not None:
78 labels, predictions = confusion_matrix.remove_squeezable_dimensions(
79 labels, predictions)
80 predictions.get_shape().assert_is_compatible_with(labels.get_shape())
83 return predictions, labels, None
89 return predictions, labels, weight
[all...]
/external/tensorflow/tensorflow/contrib/libsvm/python/kernel_tests/
H A Ddecode_libsvm_op_test.py37 sparse_features, labels = libsvm_ops.decode_libsvm(
42 self.assertAllEqual(labels.get_shape().as_list(), [3])
44 features, labels = sess.run([features, labels])
45 self.assertAllEqual(labels, [1, 1, 2])
55 sparse_features, labels = libsvm_ops.decode_libsvm(
60 self.assertAllEqual(labels.get_shape().as_list(), [3, 2])
62 features, labels = sess.run([features, labels])
63 self.assertAllEqual(labels, [[
[all...]
/external/toolchain-utils/crosperf/
H A Dresults_organizer_unittest.py8 We create some labels, benchmark_runs and then create a ResultsOrganizer,
137 labels = [mock_instance.label1, mock_instance.label2]
140 benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
142 benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
144 benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
146 benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
148 benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
150 benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
152 benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
154 benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[
[all...]
/external/tensorflow/tensorflow/contrib/kernel_methods/python/
H A Dlosses_test.py37 labels = constant_op.constant([0, 1])
39 _ = losses.sparse_multiclass_hinge_loss(labels, logits)
42 """An error is raised when labels have invalid shape."""
45 labels = constant_op.constant([1, 0], shape=(1, 1, 2))
47 _ = losses.sparse_multiclass_hinge_loss(labels, logits)
53 labels = constant_op.constant([1, 0], shape=(2,))
56 _ = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
59 """An error is raised when labels have invalid shape."""
62 labels = constant_op.constant([1, 0], dtype=dtypes.float32)
64 _ = losses.sparse_multiclass_hinge_loss(labels, logit
[all...]
H A Dlosses.py31 labels,
52 labels: `Tensor` of shape [batch_size] or [batch_size, 1]. Corresponds to
64 shape as `labels`; otherwise, it is a scalar.
67 ValueError: If `logits`, `labels` or `weights` have invalid or inconsistent
69 ValueError: If `labels` tensor has invalid dtype.
73 labels)) as scope:
85 # Check labels have valid type.
86 if labels.dtype != dtypes.int32 and labels.dtype != dtypes.int64:
88 'Invalid dtype for labels
[all...]
/external/tensorflow/tensorflow/contrib/sparsemax/python/ops/
H A Dsparsemax_loss.py28 def sparsemax_loss(logits, sparsemax, labels, name=None):
37 labels: A `Tensor`. Must have the same type as `logits`.
45 [logits, sparsemax, labels]) as name:
48 labels = ops.convert_to_tensor(labels, name="labels")
58 q_part = labels * (0.5 * labels - shifted_logits)
/external/tensorflow/tensorflow/contrib/metrics/python/ops/
H A Dconfusion_matrix_ops.py25 def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,
28 return cm.confusion_matrix(labels=labels, predictions=predictions,
/external/autotest/utils/
H A Dlabellib_unittest.py44 labels = ['webcam', 'pool:suites']
45 mapping = labellib.LabelsMapping(labels)
46 self.assertEqual(mapping.getlabels(), labels)
49 labels = ['webcam', 'pool:suites', 'pool:party']
50 mapping = labellib.LabelsMapping(labels)
54 labels = ['ohse:tsubame', 'webcam']
55 mapping = labellib.LabelsMapping(labels)
59 labels = ['webcam', 'exec', 'method']
60 mapping = labellib.LabelsMapping(labels)
64 labels
[all...]
/external/tensorflow/tensorflow/contrib/metrics/python/metrics/
H A Dclassification_test.py32 labels = array_ops.placeholder(dtypes.int32, shape=[None])
33 acc = classification.accuracy(pred, labels)
36 labels: [1, 1, 0, 0]})
42 labels = array_ops.placeholder(dtypes.bool, shape=[None])
43 acc = classification.accuracy(pred, labels)
46 labels: [1, 1, 0, 0]})
52 labels = array_ops.placeholder(dtypes.int64, shape=[None])
53 acc = classification.accuracy(pred, labels)
56 labels: [1, 1, 0, 0]})
62 labels
[all...]
H A Dclassification.py29 def accuracy(predictions, labels, weights=None, name=None):
30 """Computes the percentage of times that predictions matches labels.
34 matches 'labels'.
35 labels: the ground truth values, a `Tensor` of any shape and
47 if not (labels.dtype.is_integer or
48 labels.dtype in (dtypes.bool, dtypes.string)):
51 labels.dtype)
52 if not labels.dtype.is_compatible_with(predictions.dtype):
53 raise ValueError('Dtypes of predictions and labels should match. '
54 'Given: predictions (%r) and labels (
[all...]
/external/autotest/server/hosts/
H A Dafe_store.py54 return host_info.HostInfo(host.labels, host.attributes)
64 # copy of HostInfo from the AFE and then add/remove labels / attribtes
66 # parallel, we'll end up with corrupted labels / attributes.
69 list(set(old_info.labels) - set(new_info.labels)))
71 list(set(new_info.labels) - set(old_info.labels)))
75 def _remove_labels_on_afe(self, labels):
76 """Requests the AFE to remove the given labels.
78 @param labels
[all...]
/external/autotest/contrib/
H A Dprint_host_labels.py16 labels = host.get_labels() variable
18 print labels
/external/tensorflow/tensorflow/contrib/learn/python/learn/learn_io/
H A Dpandas_io.py114 def extract_pandas_labels(labels):
115 """Extract data from pandas.DataFrame for labels.
118 labels: `pandas.DataFrame` or `pandas.Series` containing one column of
119 labels to be extracted.
122 A numpy `ndarray` of labels from the DataFrame.
128 if isinstance(labels,
130 if len(labels.columns) > 1:
131 raise ValueError('Only one column for labels is allowed.')
133 bad_data = [column for column in labels
134 if labels[colum
[all...]
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/
H A Dlogistic_regressor.py38 `(features, labels, mode) -> (predictions, loss, train_op)`.
45 def _model_fn(features, labels, mode, params):
49 predictions, loss, train_op = model_fn(features, labels, mode)
52 labels=labels,
100 `(features, labels, mode) -> (predictions, loss, train_op)`.
109 labels which are the output of `input_fn` and
110 returns features and labels which will be fed
124 def _make_logistic_eval_metric_ops(labels, predictions, thresholds):
128 labels
[all...]
H A Dhead.py69 def _my_dnn_model_fn(features, labels, mode, params, config=None):
83 labels=labels,
97 labels=labels,
111 labels=labels,
142 labels=None,
156 labels: Labels `Tensor`, or `dict` of same.
194 label_dimension: Number of regression labels pe
[all...]
/external/python/cpython3/Lib/encodings/
H A Didna.py162 labels = result.split(b'.')
163 for label in labels[:-1]:
166 if len(labels[-1]) >= 64:
171 labels = dots.split(input)
172 if labels and not labels[-1]:
174 del labels[-1]
177 for label in labels:
204 labels = input.split(b".")
206 if labels an
[all...]
/external/tensorflow/tensorflow/contrib/learn/python/learn/ops/
H A Dlosses_ops.py32 def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
35 [tensor_in, labels]):
37 if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2:
39 return predictions, losses.mean_squared_error(labels, predictions)
45 labels,
55 This function requires labels to be passed in one-hot encoding.
59 labels: Tensor, [batch_size, n_classes], one-hot labels of the output
71 with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
75 return nn.softmax(logits), losses.softmax_cross_entropy(labels, logit
[all...]
/external/tensorflow/tensorflow/contrib/learn/python/learn/
H A Dmetric_spec_test.py35 def _fn0(predictions, labels, weights=None):
37 self.assertEqual("l1_value", labels)
93 def _fn(labels):
94 self.assertEqual(labels_, labels)
106 def _fn(labels, **kwargs):
107 self.assertEqual(labels_, labels)
120 def _fn(labels, predictions_by_another_name):
122 self.assertEqual(labels_, labels)
135 def _fn(predictions_by_another_name, labels):
137 self.assertEqual(labels_, labels)
[all...]
/external/tensorflow/tensorflow/contrib/losses/python/losses/
H A Dloss_ops.py264 def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
277 labels: The ground truth output tensor, same dimensions as 'predictions'.
286 ValueError: If the shape of `predictions` doesn't match that of `labels` or
290 [predictions, labels, weights]) as scope:
291 predictions.get_shape().assert_is_compatible_with(labels.get_shape())
293 labels = math_ops.to_float(labels)
294 losses = math_ops.abs(math_ops.subtract(predictions, labels))
300 "of the predictions and labels arguments has been changed.")
313 If `label_smoothing` is nonzero, smooth the labels toward
[all...]
/external/tensorflow/tensorflow/python/ops/losses/
H A Dlosses_impl.py219 labels, predictions, weights=1.0, scope=None,
233 labels: The ground truth output tensor, same dimensions as 'predictions'.
236 `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
244 shape as `labels`; otherwise, it is scalar.
248 `labels` or if the shape of `weights` is invalid or if `labels`
251 if labels is None:
252 raise ValueError("labels must not be None.")
256 (predictions, labels, weight
[all...]

Completed in 544 milliseconds

1234567891011>>