1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
6#define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
7
8#include <stdint.h>
9
10#include "base/bind.h"
11#include "base/macros.h"
12#include "base/memory/scoped_vector.h"
13#include "gpu/command_buffer/client/fenced_allocator.h"
14#include "gpu/command_buffer/common/buffer.h"
15#include "gpu/gpu_export.h"
16
17namespace gpu {
18
19class CommandBufferHelper;
20
21// Manages a shared memory segment.
22class GPU_EXPORT MemoryChunk {
23 public:
24  MemoryChunk(int32_t shm_id,
25              scoped_refptr<gpu::Buffer> shm,
26              CommandBufferHelper* helper,
27              const base::Closure& poll_callback);
28  ~MemoryChunk();
29
30  // Gets the size of the largest free block that is available without waiting.
31  unsigned int GetLargestFreeSizeWithoutWaiting() {
32    return allocator_.GetLargestFreeSize();
33  }
34
35  // Gets the size of the largest free block that can be allocated if the
36  // caller can wait.
37  unsigned int GetLargestFreeSizeWithWaiting() {
38    return allocator_.GetLargestFreeOrPendingSize();
39  }
40
41  // Gets the size of the chunk.
42  unsigned int GetSize() const {
43    return static_cast<unsigned int>(shm_->size());
44  }
45
46  // The shared memory id for this chunk.
47  int32_t shm_id() const {
48    return shm_id_;
49  }
50
51  // Allocates a block of memory. If the buffer is out of directly available
52  // memory, this function may wait until memory that was freed "pending a
53  // token" can be re-used.
54  //
55  // Parameters:
56  //   size: the size of the memory block to allocate.
57  //
58  // Returns:
59  //   the pointer to the allocated memory block, or NULL if out of
60  //   memory.
61  void* Alloc(unsigned int size) {
62    return allocator_.Alloc(size);
63  }
64
65  // Gets the offset to a memory block given the base memory and the address.
66  // It translates NULL to FencedAllocator::kInvalidOffset.
67  unsigned int GetOffset(void* pointer) {
68    return allocator_.GetOffset(pointer);
69  }
70
71  // Frees a block of memory.
72  //
73  // Parameters:
74  //   pointer: the pointer to the memory block to free.
75  void Free(void* pointer) {
76    allocator_.Free(pointer);
77  }
78
79  // Frees a block of memory, pending the passage of a token. That memory won't
80  // be re-allocated until the token has passed through the command stream.
81  //
82  // Parameters:
83  //   pointer: the pointer to the memory block to free.
84  //   token: the token value to wait for before re-using the memory.
85  void FreePendingToken(void* pointer, unsigned int token) {
86    allocator_.FreePendingToken(pointer, token);
87  }
88
89  // Frees any blocks whose tokens have passed.
90  void FreeUnused() {
91    allocator_.FreeUnused();
92  }
93
94  // Returns true if pointer is in the range of this block.
95  bool IsInChunk(void* pointer) const {
96    return pointer >= shm_->memory() &&
97           pointer <
98               reinterpret_cast<const int8_t*>(shm_->memory()) + shm_->size();
99  }
100
101  // Returns true of any memory in this chunk is in use.
102  bool InUse() {
103    return allocator_.InUse();
104  }
105
106  size_t bytes_in_use() const {
107    return allocator_.bytes_in_use();
108  }
109
110 private:
111  int32_t shm_id_;
112  scoped_refptr<gpu::Buffer> shm_;
113  FencedAllocatorWrapper allocator_;
114
115  DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
116};
117
118// Manages MemoryChunks.
119class GPU_EXPORT MappedMemoryManager {
120 public:
121  enum MemoryLimit {
122    kNoLimit = 0,
123  };
124
125  // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
126  // to be reclaimed before allocating more memory.
127  MappedMemoryManager(CommandBufferHelper* helper,
128                      const base::Closure& poll_callback,
129                      size_t unused_memory_reclaim_limit);
130
131  ~MappedMemoryManager();
132
133  unsigned int chunk_size_multiple() const {
134    return chunk_size_multiple_;
135  }
136
137  void set_chunk_size_multiple(unsigned int multiple) {
138    chunk_size_multiple_ = multiple;
139  }
140
141  // Allocates a block of memory
142  // Parameters:
143  //   size: size of memory to allocate.
144  //   shm_id: pointer to variable to receive the shared memory id.
145  //   shm_offset: pointer to variable to receive the shared memory offset.
146  // Returns:
147  //   pointer to allocated block of memory. NULL if failure.
148  void* Alloc(
149      unsigned int size, int32_t* shm_id, unsigned int* shm_offset);
150
151  // Frees a block of memory.
152  //
153  // Parameters:
154  //   pointer: the pointer to the memory block to free.
155  void Free(void* pointer);
156
157  // Frees a block of memory, pending the passage of a token. That memory won't
158  // be re-allocated until the token has passed through the command stream.
159  //
160  // Parameters:
161  //   pointer: the pointer to the memory block to free.
162  //   token: the token value to wait for before re-using the memory.
163  void FreePendingToken(void* pointer, int32_t token);
164
165  // Free Any Shared memory that is not in use.
166  void FreeUnused();
167
168  // Used for testing
169  size_t num_chunks() const {
170    return chunks_.size();
171  }
172
173  size_t bytes_in_use() const {
174    size_t bytes_in_use = 0;
175    for (size_t ii = 0; ii < chunks_.size(); ++ii) {
176      MemoryChunk* chunk = chunks_[ii];
177      bytes_in_use += chunk->bytes_in_use();
178    }
179    return bytes_in_use;
180  }
181
182  // Used for testing
183  size_t allocated_memory() const {
184    return allocated_memory_;
185  }
186
187 private:
188  typedef ScopedVector<MemoryChunk> MemoryChunkVector;
189
190  // size a chunk is rounded up to.
191  unsigned int chunk_size_multiple_;
192  CommandBufferHelper* helper_;
193  base::Closure poll_callback_;
194  MemoryChunkVector chunks_;
195  size_t allocated_memory_;
196  size_t max_free_bytes_;
197
198  DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
199};
200
201}  // namespace gpu
202
203#endif  // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
204
205