Searched refs:grad (Results 1 - 25 of 186) sorted by relevance

12345678

/external/tensorflow/tensorflow/core/kernels/
H A Dtyped_conditional_accumulator_base.h48 * grad: Gradient tensor to be added to the accumulator.
55 GradientTensorType* grad = nullptr; variable
56 bool is_valid = GetAndValidateTensorInputForApplyGrad(ctx, &grad);
59 AddToAccumGradFunction(ctx, grad);
61 AllocateAndAssignToAccumGradFunction(ctx, grad);
65 CleanUpGradTensor(grad); variable
75 OpKernelContext* ctx, GradientTensorType* grad) = 0;
78 GradientTensorType* grad) = 0;
/external/tensorflow/tensorflow/python/ops/
H A Dmath_grad.py39 def _SumGrad(op, grad):
49 grad = array_ops.reshape(grad, [1] * rank)
55 return [array_ops.tile(grad, input_shape), None]
63 grad = array_ops.reshape(grad, output_shape_kept_dims)
64 return [array_ops.tile(grad, tile_scaling), None]
67 def _MinOrMaxGrad(op, grad):
73 grad = array_ops.reshape(grad, output_shape_kept_dim
[all...]
H A Dcontrol_flow_grad.py35 def _SwitchGrad(op, *grad):
54 if grad[1] is not None:
56 control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1],
60 elif grad[0] is not None:
62 # the Exit branch, which is grad[0]. grad[1] is empty at this point.
63 # Use grad[0] for both inputs to merge for now, but update the second
65 merge_grad = merge([grad[0], grad[0]], name="b_switch")[0]
74 zero_grad = grad[
[all...]
H A Dtensor_array_grad.py81 def _TensorArrayReadGrad(op, grad):
86 grad: Gradient `Tensor` to TensorArrayRead.
90 force the write of `grad` to the gradient `TensorArray`.
92 # Note: the forward flow dependency in the call to grad() is necessary for
101 grad_source = _GetGradSource(grad)
104 .grad(source=grad_source, flow=flow))
105 w_g = g.write(index, grad)
120 A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
130 .grad(source=grad_source, flow=flow))
131 grad
[all...]
H A Dmanip_grad.py26 def _RollGrad(op, grad):
30 roll_grad = manip_ops.roll(grad, -shift, axis)
H A Darray_grad.py37 def _PackGrad(op, grad):
39 return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
48 def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
53 grad: `Tensor` or `IndexedSlices` representing the gradients with respect
100 # Degenerate concatenation, just return grad.
102 return grad + [None] if end_value_index <= dim_index else [None] + grad
108 if isinstance(grad, ops.Tensor):
117 out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
126 grad_context = control_flow_util.GetOutputContext(grad
[all...]
H A Dspectral_grad.py29 def _FFTSizeForGrad(grad, rank):
30 return math_ops.reduce_prod(array_ops.shape(grad)[-rank:])
34 def _FFTGrad(_, grad):
35 size = math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)
36 return spectral_ops.ifft(grad) * math_ops.complex(size, 0.)
40 def _IFFTGrad(_, grad):
41 rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)
42 return spectral_ops.fft(grad) * math_ops.complex(rsize, 0.)
46 def _FFT2DGrad(_, grad):
47 size = math_ops.cast(_FFTSizeForGrad(grad,
[all...]
H A Dimage_grad.py28 def _ResizeNearestNeighborGrad(op, grad):
33 grad: The tensor representing the gradient w.r.t. the output.
46 grad,
54 def _ResizeBilinearGrad(op, grad):
59 grad: The tensor representing the gradient w.r.t. the output.
66 grad, op.inputs[0], align_corners=op.get_attr("align_corners"))
72 def _ResizeBicubicGrad(op, grad):
77 grad: The tensor representing the gradient w.r.t. the output.
87 grad, op.inputs[0], align_corners=op.get_attr("align_corners"))
93 def _CropAndResizeGrad(op, grad)
[all...]
H A Dnn_grad.py34 def _Conv2DBackpropInputGrad(op, grad):
39 grad: the tensor representing the gradient w.r.t. the output
47 grad,
56 grad,
67 def _Conv2DBackpropFilterGrad(op, grad):
71 grad,
80 grad,
90 def _Conv3DGrad(op, grad):
96 grad,
103 grad,
[all...]
/external/tensorflow/tensorflow/contrib/opt/python/training/
H A Dmultitask_optimizer_wrapper.py34 def _is_all_zeros(grad):
35 all_zeros = math_ops.equal(math_ops.count_nonzero(grad), 0)
41 def wrapper(self, grad, *args, **kwargs): # pylint: disable=unused-argument
42 all_zeros = _is_all_zeros(grad)
44 lambda: fn(grad, *args, **kwargs))
127 def _replace_nonexisting_grad(grad):
128 if grad is None:
129 return grad
130 all_zeros = _is_all_zeros(grad)
133 lambda: array_ops.zeros([], dtype=dtypes.as_dtype(grad
[all...]
H A Dlazy_adam_optimizer.py49 def _apply_sparse(self, grad, var):
61 m_t = state_ops.scatter_update(m, grad.indices,
62 beta1_t * array_ops.gather(m, grad.indices) +
63 (1 - beta1_t) * grad.values,
68 v_t = state_ops.scatter_update(v, grad.indices,
69 beta2_t * array_ops.gather(v, grad.indices) +
70 (1 - beta2_t) * math_ops.square(grad.values),
74 m_t_slice = array_ops.gather(m_t, grad.indices)
75 v_t_slice = array_ops.gather(v_t, grad.indices)
77 var_update = state_ops.scatter_sub(var, grad
[all...]
H A Dnadam_optimizer.py34 def _apply_dense(self, grad, var):
48 grad,
52 def _resource_apply_dense(self, grad, var):
60 math_ops.cast(beta1_power, grad.dtype.base_dtype),
61 math_ops.cast(beta2_power, grad.dtype.base_dtype),
62 math_ops.cast(self._lr_t, grad.dtype.base_dtype),
63 math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
64 math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
65 math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
66 grad,
[all...]
/external/tensorflow/tensorflow/python/training/
H A Dgradient_descent.py47 def _apply_dense(self, grad, var):
51 grad,
54 def _resource_apply_dense(self, grad, handle):
57 grad.dtype.base_dtype),
58 grad, use_locking=self._use_locking)
60 def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
62 handle.handle, indices, -grad * self._learning_rate)
64 def _apply_sparse_duplicate_indices(self, grad, var):
66 grad.values *
68 grad
[all...]
H A Dproximal_gradient_descent.py61 def _apply_dense(self, grad, var):
67 grad,
70 def _resource_apply_dense(self, grad, var):
76 grad,
79 def _apply_sparse(self, grad, var):
85 grad.values,
86 grad.indices,
89 def _resource_apply_sparse(self, grad, var, indices):
92 math_ops.cast(self._learning_rate_tensor, grad.dtype),
93 math_ops.cast(self._l1_regularization_strength_tensor, grad
[all...]
H A Dadagrad.py84 def _apply_dense(self, grad, var):
90 grad,
93 def _resource_apply_dense(self, grad, var):
98 math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
99 grad,
102 def _apply_sparse(self, grad, var):
108 grad.values,
109 grad.indices,
112 def _resource_apply_sparse(self, grad, var, indices):
117 math_ops.cast(self._learning_rate_tensor, grad
[all...]
H A Dadadelta.py45 to better conditioning the grad update.
70 def _apply_dense(self, grad, var):
80 grad,
83 def _resource_apply_dense(self, grad, var):
90 math_ops.cast(self._lr_t, grad.dtype.base_dtype),
91 math_ops.cast(self._rho_t, grad.dtype.base_dtype),
92 math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
93 grad,
96 def _apply_sparse(self, grad, var):
106 grad
[all...]
H A Drmsprop.py130 def _apply_dense(self, grad, var):
144 grad,
155 grad,
158 def _resource_apply_dense(self, grad, var):
168 math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
169 math_ops.cast(self._decay_tensor, grad.dtype.base_dtype),
170 math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
171 math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype),
172 grad,
179 math_ops.cast(self._learning_rate_tensor, grad
[all...]
H A Dmomentum.py90 def _apply_dense(self, grad, var):
95 grad,
100 def _resource_apply_dense(self, grad, var):
104 math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
105 grad,
106 math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
110 def _apply_sparse(self, grad, var):
115 grad.values, grad.indices,
120 def _resource_apply_sparse(self, grad, va
[all...]
H A Dproximal_adagrad.py88 def _apply_dense(self, grad, var):
94 grad, use_locking=self._use_locking)
96 def _resource_apply_dense(self, grad, var):
102 grad, use_locking=self._use_locking)
104 def _apply_sparse(self, grad, var):
110 grad.values, grad.indices,
113 def _resource_apply_sparse(self, grad, var, indices):
117 math_ops.cast(self._learning_rate_tensor, grad.dtype),
118 math_ops.cast(self._l1_regularization_strength_tensor, grad
[all...]
H A Dftrl.py143 def _apply_dense(self, grad, var):
151 grad,
164 grad,
175 def _resource_apply_dense(self, grad, var):
183 grad,
196 grad,
207 def _apply_sparse(self, grad, var):
215 grad.values,
216 grad.indices,
229 grad
[all...]
/external/skia/tests/
H A DShaderOpacityTest.cpp62 auto grad = SkGradientShader::MakeLinear(pts, colors, pos, count, mode); local
63 REPORTER_ASSERT(reporter, grad);
64 REPORTER_ASSERT(reporter, grad->isOpaque());
69 grad = SkGradientShader::MakeLinear(pts, colors, pos, count, mode);
70 REPORTER_ASSERT(reporter, grad);
71 REPORTER_ASSERT(reporter, !grad->isOpaque());
76 grad = SkGradientShader::MakeLinear(pts, colors, pos, count, mode);
77 REPORTER_ASSERT(reporter, grad);
78 REPORTER_ASSERT(reporter, !grad->isOpaque());
83 grad
[all...]
/external/skqp/tests/
H A DShaderOpacityTest.cpp62 auto grad = SkGradientShader::MakeLinear(pts, colors, pos, count, mode); local
63 REPORTER_ASSERT(reporter, grad);
64 REPORTER_ASSERT(reporter, grad->isOpaque());
69 grad = SkGradientShader::MakeLinear(pts, colors, pos, count, mode);
70 REPORTER_ASSERT(reporter, grad);
71 REPORTER_ASSERT(reporter, !grad->isOpaque());
76 grad = SkGradientShader::MakeLinear(pts, colors, pos, count, mode);
77 REPORTER_ASSERT(reporter, grad);
78 REPORTER_ASSERT(reporter, !grad->isOpaque());
83 grad
[all...]
/external/tensorflow/tensorflow/contrib/training/python/training/
H A Dtraining.py281 for grad, var in grads_and_vars:
282 if grad is not None:
283 if isinstance(grad, ops.IndexedSlices):
284 grad_values = grad.values
286 grad_values = grad
309 for grad, var in gradients_to_variables:
310 if grad is not None:
311 if isinstance(grad, ops.IndexedSlices):
312 tmp = clip_ops.clip_by_norm(grad.values, max_norm)
313 grad
[all...]
/external/tensorflow/tensorflow/core/ops/
H A Dfunctional_grad.cc34 auto grad = FDH::FunctionRef("SymbolicGradient", local
53 {{"g", grad}, {"T", "$T"}, {"K", k}}}});
/external/tensorflow/tensorflow/examples/adding_an_op/
H A Dzero_out_grad_2.py28 def _zero_out_grad(op, grad):
34 grad: Gradient with respect to the output of the `zero_out` op.
42 first_grad = array_ops.reshape(grad, [-1])[0]

Completed in 1593 milliseconds

12345678