Searched defs:GPU (Results 1 - 5 of 5) sorted by relevance

/external/tensorflow/tensorflow/python/saved_model/
H A Dtag_constants.py37 GPU = "gpu" variable
38 tf_export("saved_model.tag_constants.GPU").export_constant(__name__, "GPU")
47 "GPU",
/external/llvm/lib/Target/AMDGPU/
H A DAMDGPUSubtarget.cpp38 StringRef GPU, StringRef FS) {
53 ParseSubtargetFeatures(GPU, FullFS);
70 AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS, argument
72 : AMDGPUGenSubtargetInfo(TT, GPU, FS),
119 InstrItins(getInstrItineraryForCPU(GPU)) {
120 initializeSubtargetDependencies(TT, GPU, FS);
181 R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS, argument
183 AMDGPUSubtarget(TT, GPU, FS, TM),
188 SISubtarget::SISubtarget(const Triple &TT, StringRef GPU, StringRef FS, argument
190 AMDGPUSubtarget(TT, GPU, F
37 initializeSubtargetDependencies(const Triple &TT, StringRef GPU, StringRef FS) argument
[all...]
H A DAMDGPUTargetMachine.cpp125 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) { argument
126 if (!GPU.empty())
127 return GPU;
129 // HSA only supports CI+, so change the default GPU to a CI for HSA.
185 StringRef GPU = getGPUName(F); local
188 SmallString<128> SubtargetKey(GPU);
197 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
226 StringRef GPU = getGPUName(F); local
229 SmallString<128> SubtargetKey(GPU);
238 I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, F
[all...]
/external/tensorflow/tensorflow/core/common_runtime/gpu/
H A Dprocess_state.h46 enum MemLoc { CPU, GPU }; enumerator in enum:tensorflow::ProcessState::MemDesc::MemLoc
59 // Query whether any GPU device has been created so far.
65 // Set the flag to indicate a GPU device has been created.
79 // Returns the one GPU allocator used for the indexed GPU.
80 // Note that this is a system GPU index, not (necessarily) a brain
/external/tensorflow/tensorflow/python/eager/
H A Dbenchmarks_test.py20 To run GPU benchmarks:
50 GPU = "/device:GPU:0" variable
87 # call func to maybe warm up the GPU
105 if device == GPU:
106 # Warmup the GPU
131 self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
138 GPU)
141 # int32's are kept on host memory even when executing on GPU.
144 self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
[all...]

Completed in 350 milliseconds