1/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_H_
17#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_H_
18
19#include <string>
20#include <vector>
21
22#include "tensorflow/core/distributed_runtime/worker_interface.h"
23#include "tensorflow/core/framework/device_attributes.pb.h"  // for DeviceLocality
24#include "tensorflow/core/lib/core/status.h"
25
26namespace tensorflow {
27typedef std::function<void(const Status&)> StatusCallback;
28
29class ChannelCache;
30class StepStats;
31
32class WorkerCacheInterface {
33 public:
34  virtual ~WorkerCacheInterface() {}
35
36  // Updates *workers with strings naming the remote worker tasks to
37  // which open channels have been established.
38  virtual void ListWorkers(std::vector<string>* workers) const = 0;
39
40  // If "target" names a remote task for which an RPC channel exists
41  // or can be constructed, returns a pointer to a WorkerInterface object
42  // wrapping that channel. The returned value must be destroyed by
43  // calling `this->ReleaseWorker(target, ret)`
44  // TODO(mrry): rename this to GetOrCreateWorker() or something that
45  // makes it more obvious that this method returns a potentially
46  // shared object.
47  virtual WorkerInterface* CreateWorker(const string& target) = 0;
48
49  // Release a worker previously returned by this->CreateWorker(target).
50  //
51  // TODO(jeff,sanjay): Consider moving target into WorkerInterface.
52  // TODO(jeff,sanjay): Unify all worker-cache impls and factor out a
53  //                    per-rpc-subsystem WorkerInterface creator.
54  virtual void ReleaseWorker(const string& target, WorkerInterface* worker) {
55    // Subclasses may override to reuse worker objects.
56    delete worker;
57  }
58
59  // Set *locality with the DeviceLocality of the specified remote device
60  // within its local environment.  Returns true if *locality
61  // was set, using only locally cached data.  Returns false
62  // if status data for that device was not available.  Never blocks.
63  virtual bool GetDeviceLocalityNonBlocking(const string& device,
64                                            DeviceLocality* locality) = 0;
65
66  // Set *locality with the DeviceLocality of the specified remote device
67  // within its local environment.  Callback gets Status::OK if *locality
68  // was set.
69  virtual void GetDeviceLocalityAsync(const string& device,
70                                      DeviceLocality* locality,
71                                      StatusCallback done) = 0;
72
73  // Start/stop logging activity.
74  virtual void SetLogging(bool active) {}
75
76  // Discard any saved log data.
77  virtual void ClearLogs() {}
78
79  // Return logs for the identified step in *ss.  Any returned data will no
80  // longer be stored.
81  virtual bool RetrieveLogs(int64 step_id, StepStats* ss) { return false; }
82};
83}  // namespace tensorflow
84#endif  // TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_H_
85