Searched refs:AllocateRaw (Results 1 - 25 of 44) sorted by relevance

12

/external/tensorflow/tensorflow/core/common_runtime/gpu/
H A Dgpu_managed_allocator.cc24 void* GpuManagedAllocator::AllocateRaw(size_t alignment, size_t num_bytes) { function in class:tensorflow::GpuManagedAllocator
H A Dgpu_managed_allocator.h30 void* AllocateRaw(size_t alignment, size_t num_bytes) override;
H A Dpool_allocator_test.cc38 EXPECT_EQ(nullptr, pool.AllocateRaw(4 /*alignment*/, 0 /*num_bytes*/));
63 void* p0 = pool.AllocateRaw(4, 0);
64 void* p4 = pool.AllocateRaw(4, 4);
65 void* p12 = pool.AllocateRaw(4, 12);
90 void* p = pool.AllocateRaw(alignment, 111);
106 void* p = pool.AllocateRaw(4, 64 << i);
118 void* p = pool.AllocateRaw(4, 64 << i);
137 void* p1_16 = pool.AllocateRaw(4, 16);
144 void* p2_16 = pool.AllocateRaw(4, 16); // Get it again.
153 void* p3_4 = pool.AllocateRaw(
[all...]
H A Dgpu_cudamalloc_allocator.h38 void* AllocateRaw(size_t alignment, size_t num_bytes) override;
H A Dgpu_bfc_allocator_test.cc55 void* raw = a.AllocateRaw(1, s);
88 void* raw = a.AllocateRaw(1, size);
105 void* out_of_memory_ptr = a.AllocateRaw(1, (1 << 30) + 1);
112 void* raw = a.AllocateRaw(1, size);
219 void* raw = a.AllocateRaw(1, size);
239 void* raw = a.AllocateRaw(1, size);
269 void* amem = a.AllocateRaw(1, 1);
270 void* bmem = b.AllocateRaw(1, 1 << 30);
285 void* p = a.AllocateRaw(1, bytes);
307 void* p = a.AllocateRaw(
[all...]
H A Dgpu_cudamalloc_allocator.cc38 void* GPUcudaMallocAllocator::AllocateRaw(size_t alignment, size_t num_bytes) { function in class:tensorflow::GPUcudaMallocAllocator
H A Dgpu_debug_allocator.h40 void* AllocateRaw(size_t alignment, size_t num_bytes) override;
72 void* AllocateRaw(size_t alignment, size_t num_bytes) override;
H A Dgpu_allocator_retry_test.cc37 void* AllocateRaw(size_t alignment, size_t num_bytes) { function in class:tensorflow::__anon26161::FakeAllocator
38 return retry_.AllocateRaw(
133 ptr = alloc_->AllocateRaw(16, 1);
H A Dgpu_debug_allocator.cc86 void* GPUDebugAllocator::AllocateRaw(size_t alignment, size_t num_bytes) { function in class:tensorflow::GPUDebugAllocator
89 void* allocated_ptr = base_allocator_->AllocateRaw(alignment, num_bytes);
169 void* GPUNanResetAllocator::AllocateRaw(size_t alignment, size_t num_bytes) { function in class:tensorflow::GPUNanResetAllocator
170 void* allocated_ptr = base_allocator_->AllocateRaw(alignment, num_bytes);
H A Dprocess_state.h145 void* AllocateRaw(size_t alignment, size_t num_bytes) override {
146 void* p = a_->AllocateRaw(alignment, num_bytes);
/external/tensorflow/tensorflow/core/common_runtime/
H A Dvisitable_allocator.h59 void* AllocateRaw(size_t alignment, size_t num_bytes) override {
60 return TrackingAllocator::AllocateRaw(alignment, num_bytes);
H A Dallocator_retry.h38 void* AllocateRaw(std::function<void*(size_t alignment, size_t num_bytes,
H A Dmkl_cpu_allocator.h109 inline void* AllocateRaw(size_t alignment, size_t num_bytes) override {
110 return allocator_->AllocateRaw(alignment, num_bytes);
134 return cpu_allocator()->AllocateRaw(kAlignment, size);
H A Dallocator_retry.cc26 void* AllocatorRetry::AllocateRaw( function in class:tensorflow::AllocatorRetry
/external/tensorflow/tensorflow/core/framework/
H A Dtracking_allocator.h37 // AllocateRaw by an Op (or work items spawned by the Op) will occur
60 void* AllocateRaw(size_t alignment, size_t num_bytes) override {
61 return AllocateRaw(alignment, num_bytes, AllocationAttributes());
63 void* AllocateRaw(size_t alignment, size_t num_bytes,
100 // the number of calls to AllocateRaw that have not yet been matched
H A Dtracking_allocator_test.cc30 void* AllocateRaw(size_t /*alignment*/, size_t num_bytes) override {
56 void* AllocateRaw(size_t /*alignment*/, size_t num_bytes) override {
74 void* p1 = ta->AllocateRaw(4, 4);
76 void* p2 = ta->AllocateRaw(4, 12);
91 p1 = ta->AllocateRaw(4, 4);
97 p2 = ta->AllocateRaw(4, 12);
123 void* p1 = ta->AllocateRaw(4, 12);
125 void* p2 = ta->AllocateRaw(4, 4);
149 void* p1 = ta->AllocateRaw(4, 12);
H A Dallocator.h87 virtual void* AllocateRaw(size_t alignment, size_t num_bytes) = 0;
93 virtual void* AllocateRaw(size_t alignment, size_t num_bytes, function in class:tensorflow::Allocator
97 return AllocateRaw(alignment, num_bytes);
101 // REQUIRES: "ptr" was previously returned by a call to AllocateRaw
124 void* p = AllocateRaw(kAllocatorAlignment, sizeof(T) * num_elements,
175 // when the buffer was returned by AllocateRaw. If non-zero, the
296 void* AllocateRaw(size_t alignment, size_t num_bytes) override {
297 return wrapped_->AllocateRaw(alignment, num_bytes);
300 void* AllocateRaw(size_t alignment, size_t num_bytes,
302 return wrapped_->AllocateRaw(alignmen
[all...]
H A Dallocator_test.cc90 void* raw = a->AllocateRaw(1, s);
181 void* p = a->AllocateRaw(1, bytes);
H A Dtracking_allocator.cc32 void* TrackingAllocator::AllocateRaw( function in class:tensorflow::TrackingAllocator
35 void* ptr = allocator_->AllocateRaw(alignment, num_bytes, allocation_attr);
36 // If memory is exhausted AllocateRaw returns nullptr, and we should
H A Dallocator.cc79 void* AllocateRaw(size_t alignment, size_t num_bytes) override {
/external/tensorflow/tensorflow/compiler/jit/
H A Dxla_device_context.h38 void* AllocateRaw(size_t alignment, size_t num_bytes) override;
/external/tensorflow/tensorflow/core/common_runtime/sycl/
H A Dsycl_allocator.h35 void* AllocateRaw(size_t alignment, size_t num_bytes) override;
H A Dsycl_allocator.cc38 void* SYCLAllocator::AllocateRaw(size_t alignment, size_t num_bytes) { function in class:tensorflow::SYCLAllocator
/external/tensorflow/tensorflow/core/kernels/
H A Dimmutable_constant_op.cc37 void* AllocateRaw(size_t alignment, size_t num_bytes) override {
/external/tensorflow/tensorflow/compiler/tf2xla/
H A Dxla_compilation_device.cc42 void* AllocateRaw(size_t alignment, size_t num_bytes) override {

Completed in 232 milliseconds

12