1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "gpu/command_buffer/client/mapped_memory.h"
6
7#include <list>
8#include "base/bind.h"
9#include "base/memory/scoped_ptr.h"
10#include "base/message_loop/message_loop.h"
11#include "gpu/command_buffer/client/cmd_buffer_helper.h"
12#include "gpu/command_buffer/service/command_buffer_service.h"
13#include "gpu/command_buffer/service/gpu_scheduler.h"
14#include "gpu/command_buffer/service/mocks.h"
15#include "gpu/command_buffer/service/transfer_buffer_manager.h"
16#include "testing/gtest/include/gtest/gtest.h"
17
18#if defined(OS_MACOSX)
19#include "base/mac/scoped_nsautorelease_pool.h"
20#endif
21
22namespace gpu {
23
24using testing::Return;
25using testing::Mock;
26using testing::Truly;
27using testing::Sequence;
28using testing::DoAll;
29using testing::Invoke;
30using testing::_;
31
32class MappedMemoryTestBase : public testing::Test {
33 protected:
34  static const unsigned int kBufferSize = 1024;
35
36  virtual void SetUp() {
37    api_mock_.reset(new AsyncAPIMock(true));
38    // ignore noops in the mock - we don't want to inspect the internals of the
39    // helper.
40    EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
41        .WillRepeatedly(Return(error::kNoError));
42    // Forward the SetToken calls to the engine
43    EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
44        .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
45                              Return(error::kNoError)));
46
47    {
48      TransferBufferManager* manager = new TransferBufferManager();
49      transfer_buffer_manager_.reset(manager);
50      EXPECT_TRUE(manager->Initialize());
51    }
52
53    command_buffer_.reset(
54        new CommandBufferService(transfer_buffer_manager_.get()));
55    EXPECT_TRUE(command_buffer_->Initialize());
56
57    gpu_scheduler_.reset(new GpuScheduler(
58        command_buffer_.get(), api_mock_.get(), NULL));
59    command_buffer_->SetPutOffsetChangeCallback(base::Bind(
60        &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
61    command_buffer_->SetGetBufferChangeCallback(base::Bind(
62        &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
63
64    api_mock_->set_engine(gpu_scheduler_.get());
65
66    helper_.reset(new CommandBufferHelper(command_buffer_.get()));
67    helper_->Initialize(kBufferSize);
68  }
69
70  int32 GetToken() {
71    return command_buffer_->GetLastState().token;
72  }
73
74#if defined(OS_MACOSX)
75  base::mac::ScopedNSAutoreleasePool autorelease_pool_;
76#endif
77  base::MessageLoop message_loop_;
78  scoped_ptr<AsyncAPIMock> api_mock_;
79  scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
80  scoped_ptr<CommandBufferService> command_buffer_;
81  scoped_ptr<GpuScheduler> gpu_scheduler_;
82  scoped_ptr<CommandBufferHelper> helper_;
83};
84
85#ifndef _MSC_VER
86const unsigned int MappedMemoryTestBase::kBufferSize;
87#endif
88
89namespace {
90void EmptyPoll() {
91}
92}
93
94// Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
95// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
96// it directly, not through the RPC mechanism), making sure Noops are ignored
97// and SetToken are properly forwarded to the engine.
98class MemoryChunkTest : public MappedMemoryTestBase {
99 protected:
100  static const int32 kShmId = 123;
101  virtual void SetUp() {
102    MappedMemoryTestBase::SetUp();
103    scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
104    shared_memory->CreateAndMapAnonymous(kBufferSize);
105    buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize);
106    chunk_.reset(new MemoryChunk(kShmId,
107                                 buffer_,
108                                 helper_.get(),
109                                 base::Bind(&EmptyPoll)));
110  }
111
112  virtual void TearDown() {
113    // If the GpuScheduler posts any tasks, this forces them to run.
114    base::MessageLoop::current()->RunUntilIdle();
115
116    MappedMemoryTestBase::TearDown();
117  }
118
119  uint8* buffer_memory() { return static_cast<uint8*>(buffer_->memory()); }
120
121  scoped_ptr<MemoryChunk> chunk_;
122  scoped_refptr<gpu::Buffer> buffer_;
123};
124
125#ifndef _MSC_VER
126const int32 MemoryChunkTest::kShmId;
127#endif
128
129TEST_F(MemoryChunkTest, Basic) {
130  const unsigned int kSize = 16;
131  EXPECT_EQ(kShmId, chunk_->shm_id());
132  EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
133  EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
134  EXPECT_EQ(kBufferSize, chunk_->GetSize());
135  void *pointer = chunk_->Alloc(kSize);
136  ASSERT_TRUE(pointer);
137  EXPECT_LE(buffer_->memory(), static_cast<uint8*>(pointer));
138  EXPECT_GE(kBufferSize,
139            static_cast<uint8*>(pointer) - buffer_memory() + kSize);
140  EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
141  EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
142  EXPECT_EQ(kBufferSize, chunk_->GetSize());
143
144  chunk_->Free(pointer);
145  EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
146  EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
147
148  uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize));
149  ASSERT_TRUE(pointer_char);
150  EXPECT_LE(buffer_memory(), pointer_char);
151  EXPECT_GE(buffer_memory() + kBufferSize, pointer_char + kSize);
152  EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
153  EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
154  chunk_->Free(pointer_char);
155  EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
156  EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
157}
158
159class MappedMemoryManagerTest : public MappedMemoryTestBase {
160 public:
161  MappedMemoryManager* manager() const {
162    return manager_.get();
163  }
164
165 protected:
166  virtual void SetUp() {
167    MappedMemoryTestBase::SetUp();
168    manager_.reset(new MappedMemoryManager(
169        helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
170  }
171
172  virtual void TearDown() {
173    // If the GpuScheduler posts any tasks, this forces them to run.
174    base::MessageLoop::current()->RunUntilIdle();
175    manager_.reset();
176    MappedMemoryTestBase::TearDown();
177  }
178
179  scoped_ptr<MappedMemoryManager> manager_;
180};
181
182TEST_F(MappedMemoryManagerTest, Basic) {
183  const unsigned int kSize = 1024;
184  // Check we can alloc.
185  int32 id1 = -1;
186  unsigned int offset1 = 0xFFFFFFFFU;
187  void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
188  ASSERT_TRUE(mem1);
189  EXPECT_NE(-1, id1);
190  EXPECT_EQ(0u, offset1);
191  // Check if we free and realloc the same size we get the same memory
192  int32 id2 = -1;
193  unsigned int offset2 = 0xFFFFFFFFU;
194  manager_->Free(mem1);
195  void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
196  EXPECT_EQ(mem1, mem2);
197  EXPECT_EQ(id1, id2);
198  EXPECT_EQ(offset1, offset2);
199  // Check if we allocate again we get different shared memory
200  int32 id3 = -1;
201  unsigned int offset3 = 0xFFFFFFFFU;
202  void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
203  ASSERT_TRUE(mem3 != NULL);
204  EXPECT_NE(mem2, mem3);
205  EXPECT_NE(id2, id3);
206  EXPECT_EQ(0u, offset3);
207  // Free 3 and allocate 2 half size blocks.
208  manager_->Free(mem3);
209  int32 id4 = -1;
210  int32 id5 = -1;
211  unsigned int offset4 = 0xFFFFFFFFU;
212  unsigned int offset5 = 0xFFFFFFFFU;
213  void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4);
214  void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5);
215  ASSERT_TRUE(mem4 != NULL);
216  ASSERT_TRUE(mem5 != NULL);
217  EXPECT_EQ(id3, id4);
218  EXPECT_EQ(id4, id5);
219  EXPECT_EQ(0u, offset4);
220  EXPECT_EQ(kSize / 2u, offset5);
221  manager_->Free(mem4);
222  manager_->Free(mem2);
223  manager_->Free(mem5);
224}
225
226TEST_F(MappedMemoryManagerTest, FreePendingToken) {
227  const unsigned int kSize = 128;
228  const unsigned int kAllocCount = (kBufferSize / kSize) * 2;
229  CHECK(kAllocCount * kSize == kBufferSize * 2);
230
231  // Allocate several buffers across multiple chunks.
232  void *pointers[kAllocCount];
233  for (unsigned int i = 0; i < kAllocCount; ++i) {
234    int32 id = -1;
235    unsigned int offset = 0xFFFFFFFFu;
236    pointers[i] = manager_->Alloc(kSize, &id, &offset);
237    EXPECT_TRUE(pointers[i]);
238    EXPECT_NE(id, -1);
239    EXPECT_NE(offset, 0xFFFFFFFFu);
240  }
241
242  // Free one successful allocation, pending fence.
243  int32 token = helper_.get()->InsertToken();
244  manager_->FreePendingToken(pointers[0], token);
245
246  // The way we hooked up the helper and engine, it won't process commands
247  // until it has to wait for something. Which means the token shouldn't have
248  // passed yet at this point.
249  EXPECT_GT(token, GetToken());
250  // Force it to read up to the token
251  helper_->Finish();
252  // Check that the token has indeed passed.
253  EXPECT_LE(token, GetToken());
254
255  // This allocation should use the spot just freed above.
256  int32 new_id = -1;
257  unsigned int new_offset = 0xFFFFFFFFu;
258  void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset);
259  EXPECT_TRUE(new_ptr);
260  EXPECT_EQ(new_ptr, pointers[0]);
261  EXPECT_NE(new_id, -1);
262  EXPECT_NE(new_offset, 0xFFFFFFFFu);
263
264  // Free up everything.
265  manager_->Free(new_ptr);
266  for (unsigned int i = 1; i < kAllocCount; ++i) {
267    manager_->Free(pointers[i]);
268  }
269}
270
271TEST_F(MappedMemoryManagerTest, FreeUnused) {
272  int32 id = -1;
273  unsigned int offset = 0xFFFFFFFFU;
274  void* m1 = manager_->Alloc(kBufferSize, &id, &offset);
275  void* m2 = manager_->Alloc(kBufferSize, &id, &offset);
276  ASSERT_TRUE(m1 != NULL);
277  ASSERT_TRUE(m2 != NULL);
278  EXPECT_EQ(2u, manager_->num_chunks());
279  manager_->FreeUnused();
280  EXPECT_EQ(2u, manager_->num_chunks());
281  manager_->Free(m2);
282  EXPECT_EQ(2u, manager_->num_chunks());
283  manager_->FreeUnused();
284  EXPECT_EQ(1u, manager_->num_chunks());
285  manager_->Free(m1);
286  EXPECT_EQ(1u, manager_->num_chunks());
287  manager_->FreeUnused();
288  EXPECT_EQ(0u, manager_->num_chunks());
289}
290
291TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
292  const unsigned int kSize = 1024;
293  manager_->set_chunk_size_multiple(kSize *  2);
294  // Check if we allocate less than the chunk size multiple we get
295  // chunks arounded up.
296  int32 id1 = -1;
297  unsigned int offset1 = 0xFFFFFFFFU;
298  void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
299  int32 id2 = -1;
300  unsigned int offset2 = 0xFFFFFFFFU;
301  void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
302  int32 id3 = -1;
303  unsigned int offset3 = 0xFFFFFFFFU;
304  void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
305  ASSERT_TRUE(mem1);
306  ASSERT_TRUE(mem2);
307  ASSERT_TRUE(mem3);
308  EXPECT_NE(-1, id1);
309  EXPECT_EQ(id1, id2);
310  EXPECT_NE(id2, id3);
311  EXPECT_EQ(0u, offset1);
312  EXPECT_EQ(kSize, offset2);
313  EXPECT_EQ(0u, offset3);
314
315  manager_->Free(mem1);
316  manager_->Free(mem2);
317  manager_->Free(mem3);
318}
319
320TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
321  const unsigned int kChunkSize = 2048;
322  // Reset the manager with a memory limit.
323  manager_.reset(new MappedMemoryManager(
324      helper_.get(), base::Bind(&EmptyPoll), kChunkSize));
325  manager_->set_chunk_size_multiple(kChunkSize);
326
327  // Allocate one chunk worth of memory.
328  int32 id1 = -1;
329  unsigned int offset1 = 0xFFFFFFFFU;
330  void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
331  ASSERT_TRUE(mem1);
332  EXPECT_NE(-1, id1);
333  EXPECT_EQ(0u, offset1);
334
335  // Allocate half a chunk worth of memory again.
336  // The same chunk will be used.
337  int32 id2 = -1;
338  unsigned int offset2 = 0xFFFFFFFFU;
339  void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2);
340  ASSERT_TRUE(mem2);
341  EXPECT_NE(-1, id2);
342  EXPECT_EQ(0u, offset2);
343
344  // Expect two chunks to be allocated, exceeding the limit,
345  // since all memory is in use.
346  EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
347
348  manager_->Free(mem1);
349  manager_->Free(mem2);
350}
351
352TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
353  const unsigned int kSize = 1024;
354  // Reset the manager with a memory limit.
355  manager_.reset(new MappedMemoryManager(
356      helper_.get(), base::Bind(&EmptyPoll), kSize));
357  const unsigned int kChunkSize = 2 * 1024;
358  manager_->set_chunk_size_multiple(kChunkSize);
359
360  // Allocate half a chunk worth of memory.
361  int32 id1 = -1;
362  unsigned int offset1 = 0xFFFFFFFFU;
363  void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
364  ASSERT_TRUE(mem1);
365  EXPECT_NE(-1, id1);
366  EXPECT_EQ(0u, offset1);
367
368  // Allocate half a chunk worth of memory again.
369  // The same chunk will be used.
370  int32 id2 = -1;
371  unsigned int offset2 = 0xFFFFFFFFU;
372  void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
373  ASSERT_TRUE(mem2);
374  EXPECT_NE(-1, id2);
375  EXPECT_EQ(kSize, offset2);
376
377  // Free one successful allocation, pending fence.
378  int32 token = helper_.get()->InsertToken();
379  manager_->FreePendingToken(mem2, token);
380
381  // The way we hooked up the helper and engine, it won't process commands
382  // until it has to wait for something. Which means the token shouldn't have
383  // passed yet at this point.
384  EXPECT_GT(token, GetToken());
385
386  // Since we didn't call helper_.finish() the token did not pass.
387  // We won't be able to claim the free memory without waiting and
388  // as we've already met the memory limit we'll have to wait
389  // on the token.
390  int32 id3 = -1;
391  unsigned int offset3 = 0xFFFFFFFFU;
392  void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
393  ASSERT_TRUE(mem3);
394  EXPECT_NE(-1, id3);
395  // It will reuse the space from the second allocation just freed.
396  EXPECT_EQ(kSize, offset3);
397
398  // Expect one chunk to be allocated
399  EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
400
401  manager_->Free(mem1);
402  manager_->Free(mem3);
403}
404
405namespace {
406void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) {
407  std::list<void*>::iterator it = list->begin();
408  while (it != list->end()) {
409    void* address = *it;
410    test->manager()->Free(address);
411    it = list->erase(it);
412  }
413}
414}
415
416TEST_F(MappedMemoryManagerTest, Poll) {
417  std::list<void*> unmanaged_memory_list;
418
419  const unsigned int kSize = 1024;
420  // Reset the manager with a memory limit.
421  manager_.reset(new MappedMemoryManager(
422      helper_.get(),
423      base::Bind(&Poll, this, &unmanaged_memory_list),
424      kSize));
425
426  // Allocate kSize bytes. Don't add the address to
427  // the unmanaged memory list, so that it won't be free:ed just yet.
428  int32 id1;
429  unsigned int offset1;
430  void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
431  EXPECT_EQ(manager_->bytes_in_use(), kSize);
432
433  // Allocate kSize more bytes, and make sure we grew.
434  int32 id2;
435  unsigned int offset2;
436  void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
437  EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
438
439  // Make the unmanaged buffer be released next time FreeUnused() is called
440  // in MappedMemoryManager/FencedAllocator. This happens for example when
441  // allocating new memory.
442  unmanaged_memory_list.push_back(mem1);
443
444  // Allocate kSize more bytes. This should poll unmanaged memory, which now
445  // should free the previously allocated unmanaged memory.
446  int32 id3;
447  unsigned int offset3;
448  void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
449  EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
450
451  manager_->Free(mem2);
452  manager_->Free(mem3);
453  EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0));
454}
455
456}  // namespace gpu
457