1/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#ifndef TENSORFLOW_KERNELS_TRAINING_OP_HELPERS_H_
17#define TENSORFLOW_KERNELS_TRAINING_OP_HELPERS_H_
18
19#include "tensorflow/core/framework/op_kernel.h"
20#include "tensorflow/core/kernels/dense_update_functor.h"
21#include "tensorflow/core/kernels/variable_ops.h"
22
23namespace tensorflow {
24
25mutex* GetTrainingVariableMutex(OpKernelContext* ctx, int input);
26
27std::vector<mutex_lock> MaybeLockVariableInputMutexesInOrder(
28    OpKernelContext* ctx, bool do_lock, const std::vector<int>& input_ids);
29
30void MaybeForwardRefInputToRefOutput(OpKernelContext* ctx, int input,
31                                     int output);
32
33// This is for use with ResourceVariables to ensure *tensor has a
34// reference count of 1 before you update it.
35// REQUIRES: If you pass in variable->tensor(), *variable->mu() must be held.
36template <typename Device, typename T>
37Status PrepareToUpdateVariable(OpKernelContext* ctx, Tensor* tensor) {
38  if (!tensor->RefCountIsOne()) {
39    // Tensor's buffer is in use by some read, so we need to copy before
40    // updating.
41    PersistentTensor unused;
42    Tensor* tmp;
43    AllocatorAttributes attr;
44    attr.set_gpu_compatible(true);
45    attr.set_nic_compatible(true);
46    TF_RETURN_IF_ERROR(ctx->allocate_persistent(
47        tensor->dtype(), tensor->shape(), &unused, &tmp, attr));
48    functor::DenseUpdate<Device, T, ASSIGN> copy_functor;
49    copy_functor(ctx->eigen_device<Device>(), tmp->flat<T>(),
50                 const_cast<const Tensor*>(tensor)->flat<T>());
51    *tensor = *tmp;
52  }
53  return Status::OK();
54}
55
56// This gives you `*out`, a tensor you can update, corresponding to a
57// variable passed as input index `input`.  This handles the
58// differences between reference and resource variables.  For resource
59// variables, we ensure `*out` has a reference count of 1 (using
60// PrepareToUpdateVariable() to copy if necessary) unless
61// sparse && !lock_held, in which case it never copies.
62template <typename Device, typename T>
63Status GetInputTensorFromVariable(OpKernelContext* ctx, int input,
64                                  bool lock_held, bool sparse, Tensor* out) {
65  if (ctx->input_dtype(input) == DT_RESOURCE) {
66    Var* var;
67    if (LookupResource(ctx, HandleFromInput(ctx, input), &var).ok()) {
68      core::ScopedUnref unref_var(var);
69      if (lock_held) {
70        TF_RETURN_IF_ERROR(
71            PrepareToUpdateVariable<Device, T>(ctx, var->tensor()));
72        *out = *var->tensor();
73      } else {
74        mutex_lock ml(*var->mu());
75        if (!sparse) {
76          TF_RETURN_IF_ERROR(
77              PrepareToUpdateVariable<Device, T>(ctx, var->tensor()));
78        }
79        *out = *var->tensor();
80      }
81      return Status::OK();
82    } else {
83      return errors::Internal("Invalid variable reference.");
84    }
85  }
86  *out = ctx->mutable_input(input, lock_held);
87  return Status::OK();
88}
89
90}  // end namespace tensorflow
91
92#endif  // TENSORFLOW_KERNELS_TRAINING_OP_HELPERS_H_
93