1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file contains the tests for the FencedAllocator class.
6
7#include "base/bind.h"
8#include "base/bind_helpers.h"
9#include "base/memory/aligned_memory.h"
10#include "base/message_loop/message_loop.h"
11#include "gpu/command_buffer/client/cmd_buffer_helper.h"
12#include "gpu/command_buffer/client/fenced_allocator.h"
13#include "gpu/command_buffer/service/cmd_buffer_engine.h"
14#include "gpu/command_buffer/service/command_buffer_service.h"
15#include "gpu/command_buffer/service/gpu_scheduler.h"
16#include "gpu/command_buffer/service/mocks.h"
17#include "gpu/command_buffer/service/transfer_buffer_manager.h"
18#include "testing/gtest/include/gtest/gtest.h"
19
20#if defined(OS_MACOSX)
21#include "base/mac/scoped_nsautorelease_pool.h"
22#endif
23
24namespace gpu {
25
26using testing::Return;
27using testing::Mock;
28using testing::Truly;
29using testing::Sequence;
30using testing::DoAll;
31using testing::Invoke;
32using testing::_;
33
34class BaseFencedAllocatorTest : public testing::Test {
35 protected:
36  static const unsigned int kBufferSize = 1024;
37  static const int kAllocAlignment = 16;
38
39  virtual void SetUp() {
40    api_mock_.reset(new AsyncAPIMock);
41    // ignore noops in the mock - we don't want to inspect the internals of the
42    // helper.
43    EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
44        .WillRepeatedly(Return(error::kNoError));
45    // Forward the SetToken calls to the engine
46    EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
47        .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
48                              Return(error::kNoError)));
49
50    {
51      TransferBufferManager* manager = new TransferBufferManager();
52      transfer_buffer_manager_.reset(manager);
53      EXPECT_TRUE(manager->Initialize());
54    }
55    command_buffer_.reset(
56        new CommandBufferService(transfer_buffer_manager_.get()));
57    EXPECT_TRUE(command_buffer_->Initialize());
58
59    gpu_scheduler_.reset(new GpuScheduler(
60        command_buffer_.get(), api_mock_.get(), NULL));
61    command_buffer_->SetPutOffsetChangeCallback(base::Bind(
62        &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
63    command_buffer_->SetGetBufferChangeCallback(base::Bind(
64        &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
65
66    api_mock_->set_engine(gpu_scheduler_.get());
67
68    helper_.reset(new CommandBufferHelper(command_buffer_.get()));
69    helper_->Initialize(kBufferSize);
70  }
71
72  int32 GetToken() {
73    return command_buffer_->GetState().token;
74  }
75
76#if defined(OS_MACOSX)
77  base::mac::ScopedNSAutoreleasePool autorelease_pool_;
78#endif
79  base::MessageLoop message_loop_;
80  scoped_ptr<AsyncAPIMock> api_mock_;
81  scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
82  scoped_ptr<CommandBufferService> command_buffer_;
83  scoped_ptr<GpuScheduler> gpu_scheduler_;
84  scoped_ptr<CommandBufferHelper> helper_;
85};
86
87#ifndef _MSC_VER
88const unsigned int BaseFencedAllocatorTest::kBufferSize;
89#endif
90
91// Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
92// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
93// it directly, not through the RPC mechanism), making sure Noops are ignored
94// and SetToken are properly forwarded to the engine.
95class FencedAllocatorTest : public BaseFencedAllocatorTest {
96 protected:
97  virtual void SetUp() {
98    BaseFencedAllocatorTest::SetUp();
99    allocator_.reset(new FencedAllocator(kBufferSize, helper_.get()));
100  }
101
102  virtual void TearDown() {
103    // If the GpuScheduler posts any tasks, this forces them to run.
104    base::MessageLoop::current()->RunUntilIdle();
105
106    EXPECT_TRUE(allocator_->CheckConsistency());
107
108    BaseFencedAllocatorTest::TearDown();
109  }
110
111  scoped_ptr<FencedAllocator> allocator_;
112};
113
114// Checks basic alloc and free.
115TEST_F(FencedAllocatorTest, TestBasic) {
116  allocator_->CheckConsistency();
117  EXPECT_FALSE(allocator_->InUse());
118
119  const unsigned int kSize = 16;
120  FencedAllocator::Offset offset = allocator_->Alloc(kSize);
121  EXPECT_TRUE(allocator_->InUse());
122  EXPECT_NE(FencedAllocator::kInvalidOffset, offset);
123  EXPECT_GE(kBufferSize, offset+kSize);
124  EXPECT_TRUE(allocator_->CheckConsistency());
125
126  allocator_->Free(offset);
127  EXPECT_FALSE(allocator_->InUse());
128  EXPECT_TRUE(allocator_->CheckConsistency());
129}
130
131// Test alloc 0 fails.
132TEST_F(FencedAllocatorTest, TestAllocZero) {
133  FencedAllocator::Offset offset = allocator_->Alloc(0);
134  EXPECT_EQ(FencedAllocator::kInvalidOffset, offset);
135  EXPECT_FALSE(allocator_->InUse());
136  EXPECT_TRUE(allocator_->CheckConsistency());
137}
138
139// Checks out-of-memory condition.
140TEST_F(FencedAllocatorTest, TestOutOfMemory) {
141  EXPECT_TRUE(allocator_->CheckConsistency());
142
143  const unsigned int kSize = 16;
144  const unsigned int kAllocCount = kBufferSize / kSize;
145  CHECK(kAllocCount * kSize == kBufferSize);
146
147  // Allocate several buffers to fill in the memory.
148  FencedAllocator::Offset offsets[kAllocCount];
149  for (unsigned int i = 0; i < kAllocCount; ++i) {
150    offsets[i] = allocator_->Alloc(kSize);
151    EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
152    EXPECT_GE(kBufferSize, offsets[i]+kSize);
153    EXPECT_TRUE(allocator_->CheckConsistency());
154  }
155
156  // This allocation should fail.
157  FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
158  EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
159  EXPECT_TRUE(allocator_->CheckConsistency());
160
161  // Free one successful allocation, reallocate with half the size
162  allocator_->Free(offsets[0]);
163  EXPECT_TRUE(allocator_->CheckConsistency());
164  offsets[0] = allocator_->Alloc(kSize/2);
165  EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
166  EXPECT_GE(kBufferSize, offsets[0]+kSize);
167  EXPECT_TRUE(allocator_->CheckConsistency());
168
169  // This allocation should fail as well.
170  offset_failed = allocator_->Alloc(kSize);
171  EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
172  EXPECT_TRUE(allocator_->CheckConsistency());
173
174  // Free up everything.
175  for (unsigned int i = 0; i < kAllocCount; ++i) {
176    allocator_->Free(offsets[i]);
177    EXPECT_TRUE(allocator_->CheckConsistency());
178  }
179}
180
181// Checks the free-pending-token mechanism.
182TEST_F(FencedAllocatorTest, TestFreePendingToken) {
183  EXPECT_TRUE(allocator_->CheckConsistency());
184
185  const unsigned int kSize = 16;
186  const unsigned int kAllocCount = kBufferSize / kSize;
187  CHECK(kAllocCount * kSize == kBufferSize);
188
189  // Allocate several buffers to fill in the memory.
190  FencedAllocator::Offset offsets[kAllocCount];
191  for (unsigned int i = 0; i < kAllocCount; ++i) {
192    offsets[i] = allocator_->Alloc(kSize);
193    EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
194    EXPECT_GE(kBufferSize, offsets[i]+kSize);
195    EXPECT_TRUE(allocator_->CheckConsistency());
196  }
197
198  // This allocation should fail.
199  FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
200  EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
201  EXPECT_TRUE(allocator_->CheckConsistency());
202
203  // Free one successful allocation, pending fence.
204  int32 token = helper_.get()->InsertToken();
205  allocator_->FreePendingToken(offsets[0], token);
206  EXPECT_TRUE(allocator_->CheckConsistency());
207
208  // The way we hooked up the helper and engine, it won't process commands
209  // until it has to wait for something. Which means the token shouldn't have
210  // passed yet at this point.
211  EXPECT_GT(token, GetToken());
212
213  // This allocation will need to reclaim the space freed above, so that should
214  // process the commands until the token is passed.
215  offsets[0] = allocator_->Alloc(kSize);
216  EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
217  EXPECT_GE(kBufferSize, offsets[0]+kSize);
218  EXPECT_TRUE(allocator_->CheckConsistency());
219  // Check that the token has indeed passed.
220  EXPECT_LE(token, GetToken());
221
222  // Free up everything.
223  for (unsigned int i = 0; i < kAllocCount; ++i) {
224    allocator_->Free(offsets[i]);
225    EXPECT_TRUE(allocator_->CheckConsistency());
226  }
227}
228
229// Checks the free-pending-token mechanism using FreeUnused
230TEST_F(FencedAllocatorTest, FreeUnused) {
231  EXPECT_TRUE(allocator_->CheckConsistency());
232
233  const unsigned int kSize = 16;
234  const unsigned int kAllocCount = kBufferSize / kSize;
235  CHECK(kAllocCount * kSize == kBufferSize);
236
237  // Allocate several buffers to fill in the memory.
238  FencedAllocator::Offset offsets[kAllocCount];
239  for (unsigned int i = 0; i < kAllocCount; ++i) {
240    offsets[i] = allocator_->Alloc(kSize);
241    EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
242    EXPECT_GE(kBufferSize, offsets[i]+kSize);
243    EXPECT_TRUE(allocator_->CheckConsistency());
244  }
245  EXPECT_TRUE(allocator_->InUse());
246
247  // No memory should be available.
248  EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
249
250  // Free one successful allocation, pending fence.
251  int32 token = helper_.get()->InsertToken();
252  allocator_->FreePendingToken(offsets[0], token);
253  EXPECT_TRUE(allocator_->CheckConsistency());
254
255  // Force the command buffer to process the token.
256  helper_->Finish();
257
258  // Tell the allocator to update what's available based on the current token.
259  allocator_->FreeUnused();
260
261  // Check that the new largest free size takes into account the unused block.
262  EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
263
264  // Free two more.
265  token = helper_.get()->InsertToken();
266  allocator_->FreePendingToken(offsets[1], token);
267  token = helper_.get()->InsertToken();
268  allocator_->FreePendingToken(offsets[2], token);
269  EXPECT_TRUE(allocator_->CheckConsistency());
270
271  // Check that nothing has changed.
272  EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
273
274  // Force the command buffer to process the token.
275  helper_->Finish();
276
277  // Tell the allocator to update what's available based on the current token.
278  allocator_->FreeUnused();
279
280  // Check that the new largest free size takes into account the unused blocks.
281  EXPECT_EQ(kSize * 3, allocator_->GetLargestFreeSize());
282  EXPECT_TRUE(allocator_->InUse());
283
284  // Free up everything.
285  for (unsigned int i = 3; i < kAllocCount; ++i) {
286    allocator_->Free(offsets[i]);
287    EXPECT_TRUE(allocator_->CheckConsistency());
288  }
289  EXPECT_FALSE(allocator_->InUse());
290}
291
292// Tests GetLargestFreeSize
293TEST_F(FencedAllocatorTest, TestGetLargestFreeSize) {
294  EXPECT_TRUE(allocator_->CheckConsistency());
295  EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
296
297  FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
298  ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
299  EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
300  allocator_->Free(offset);
301  EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
302
303  const unsigned int kSize = 16;
304  offset = allocator_->Alloc(kSize);
305  ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
306  // The following checks that the buffer is allocated "smartly" - which is
307  // dependent on the implementation. But both first-fit or best-fit would
308  // ensure that.
309  EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSize());
310
311  // Allocate 2 more buffers (now 3), and then free the first two. This is to
312  // ensure a hole. Note that this is dependent on the first-fit current
313  // implementation.
314  FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
315  ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
316  FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
317  ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
318  allocator_->Free(offset);
319  allocator_->Free(offset1);
320  EXPECT_EQ(kBufferSize - 3 * kSize, allocator_->GetLargestFreeSize());
321
322  offset = allocator_->Alloc(kBufferSize - 3 * kSize);
323  ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
324  EXPECT_EQ(2 * kSize, allocator_->GetLargestFreeSize());
325
326  offset1 = allocator_->Alloc(2 * kSize);
327  ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
328  EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
329
330  allocator_->Free(offset);
331  allocator_->Free(offset1);
332  allocator_->Free(offset2);
333}
334
335// Tests GetLargestFreeOrPendingSize
336TEST_F(FencedAllocatorTest, TestGetLargestFreeOrPendingSize) {
337  EXPECT_TRUE(allocator_->CheckConsistency());
338  EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
339
340  FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
341  ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
342  EXPECT_EQ(0u, allocator_->GetLargestFreeOrPendingSize());
343  allocator_->Free(offset);
344  EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
345
346  const unsigned int kSize = 16;
347  offset = allocator_->Alloc(kSize);
348  ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
349  // The following checks that the buffer is allocates "smartly" - which is
350  // dependent on the implementation. But both first-fit or best-fit would
351  // ensure that.
352  EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeOrPendingSize());
353
354  // Allocate 2 more buffers (now 3), and then free the first two. This is to
355  // ensure a hole. Note that this is dependent on the first-fit current
356  // implementation.
357  FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
358  ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
359  FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
360  ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
361  allocator_->Free(offset);
362  allocator_->Free(offset1);
363  EXPECT_EQ(kBufferSize - 3 * kSize,
364            allocator_->GetLargestFreeOrPendingSize());
365
366  // Free the last one, pending a token.
367  int32 token = helper_.get()->InsertToken();
368  allocator_->FreePendingToken(offset2, token);
369
370  // Now all the buffers have been freed...
371  EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
372  // .. but one is still waiting for the token.
373  EXPECT_EQ(kBufferSize - 3 * kSize,
374            allocator_->GetLargestFreeSize());
375
376  // The way we hooked up the helper and engine, it won't process commands
377  // until it has to wait for something. Which means the token shouldn't have
378  // passed yet at this point.
379  EXPECT_GT(token, GetToken());
380  // This allocation will need to reclaim the space freed above, so that should
381  // process the commands until the token is passed, but it will succeed.
382  offset = allocator_->Alloc(kBufferSize);
383  ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
384  // Check that the token has indeed passed.
385  EXPECT_LE(token, GetToken());
386  allocator_->Free(offset);
387
388  // Everything now has been freed...
389  EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
390  // ... for real.
391  EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
392}
393
394// Test fixture for FencedAllocatorWrapper test - Creates a
395// FencedAllocatorWrapper, using a CommandBufferHelper with a mock
396// AsyncAPIInterface for its interface (calling it directly, not through the
397// RPC mechanism), making sure Noops are ignored and SetToken are properly
398// forwarded to the engine.
399class FencedAllocatorWrapperTest : public BaseFencedAllocatorTest {
400 protected:
401  virtual void SetUp() {
402    BaseFencedAllocatorTest::SetUp();
403
404    // Though allocating this buffer isn't strictly necessary, it makes
405    // allocations point to valid addresses, so they could be used for
406    // something.
407    buffer_.reset(static_cast<char*>(base::AlignedAlloc(
408        kBufferSize, kAllocAlignment)));
409    allocator_.reset(new FencedAllocatorWrapper(kBufferSize, helper_.get(),
410                                                buffer_.get()));
411  }
412
413  virtual void TearDown() {
414    // If the GpuScheduler posts any tasks, this forces them to run.
415    base::MessageLoop::current()->RunUntilIdle();
416
417    EXPECT_TRUE(allocator_->CheckConsistency());
418
419    BaseFencedAllocatorTest::TearDown();
420  }
421
422  scoped_ptr<FencedAllocatorWrapper> allocator_;
423  scoped_ptr_malloc<char, base::ScopedPtrAlignedFree> buffer_;
424};
425
426// Checks basic alloc and free.
427TEST_F(FencedAllocatorWrapperTest, TestBasic) {
428  allocator_->CheckConsistency();
429
430  const unsigned int kSize = 16;
431  void *pointer = allocator_->Alloc(kSize);
432  ASSERT_TRUE(pointer);
433  EXPECT_LE(buffer_.get(), static_cast<char *>(pointer));
434  EXPECT_GE(kBufferSize, static_cast<char *>(pointer) - buffer_.get() + kSize);
435  EXPECT_TRUE(allocator_->CheckConsistency());
436
437  allocator_->Free(pointer);
438  EXPECT_TRUE(allocator_->CheckConsistency());
439
440  char *pointer_char = allocator_->AllocTyped<char>(kSize);
441  ASSERT_TRUE(pointer_char);
442  EXPECT_LE(buffer_.get(), pointer_char);
443  EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
444  allocator_->Free(pointer_char);
445  EXPECT_TRUE(allocator_->CheckConsistency());
446
447  unsigned int *pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
448  ASSERT_TRUE(pointer_uint);
449  EXPECT_LE(buffer_.get(), reinterpret_cast<char *>(pointer_uint));
450  EXPECT_GE(buffer_.get() + kBufferSize,
451            reinterpret_cast<char *>(pointer_uint + kSize));
452
453  // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
454  // directly, except from the remaining size.
455  EXPECT_EQ(kBufferSize - kSize * sizeof(*pointer_uint),
456            allocator_->GetLargestFreeSize());
457  allocator_->Free(pointer_uint);
458}
459
460// Test alloc 0 fails.
461TEST_F(FencedAllocatorWrapperTest, TestAllocZero) {
462  allocator_->CheckConsistency();
463
464  void *pointer = allocator_->Alloc(0);
465  ASSERT_FALSE(pointer);
466  EXPECT_TRUE(allocator_->CheckConsistency());
467}
468
469// Checks that allocation offsets are aligned to multiples of 16 bytes.
470TEST_F(FencedAllocatorWrapperTest, TestAlignment) {
471  allocator_->CheckConsistency();
472
473  const unsigned int kSize1 = 75;
474  void *pointer1 = allocator_->Alloc(kSize1);
475  ASSERT_TRUE(pointer1);
476  EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1) & (kAllocAlignment - 1), 0);
477  EXPECT_TRUE(allocator_->CheckConsistency());
478
479  const unsigned int kSize2 = 43;
480  void *pointer2 = allocator_->Alloc(kSize2);
481  ASSERT_TRUE(pointer2);
482  EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2) & (kAllocAlignment - 1), 0);
483  EXPECT_TRUE(allocator_->CheckConsistency());
484
485  allocator_->Free(pointer2);
486  EXPECT_TRUE(allocator_->CheckConsistency());
487
488  allocator_->Free(pointer1);
489  EXPECT_TRUE(allocator_->CheckConsistency());
490}
491
492// Checks out-of-memory condition.
493TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) {
494  allocator_->CheckConsistency();
495
496  const unsigned int kSize = 16;
497  const unsigned int kAllocCount = kBufferSize / kSize;
498  CHECK(kAllocCount * kSize == kBufferSize);
499
500  // Allocate several buffers to fill in the memory.
501  void *pointers[kAllocCount];
502  for (unsigned int i = 0; i < kAllocCount; ++i) {
503    pointers[i] = allocator_->Alloc(kSize);
504    EXPECT_TRUE(pointers[i]);
505    EXPECT_TRUE(allocator_->CheckConsistency());
506  }
507
508  // This allocation should fail.
509  void *pointer_failed = allocator_->Alloc(kSize);
510  EXPECT_FALSE(pointer_failed);
511  EXPECT_TRUE(allocator_->CheckConsistency());
512
513  // Free one successful allocation, reallocate with half the size
514  allocator_->Free(pointers[0]);
515  EXPECT_TRUE(allocator_->CheckConsistency());
516  pointers[0] = allocator_->Alloc(kSize/2);
517  EXPECT_TRUE(pointers[0]);
518  EXPECT_TRUE(allocator_->CheckConsistency());
519
520  // This allocation should fail as well.
521  pointer_failed = allocator_->Alloc(kSize);
522  EXPECT_FALSE(pointer_failed);
523  EXPECT_TRUE(allocator_->CheckConsistency());
524
525  // Free up everything.
526  for (unsigned int i = 0; i < kAllocCount; ++i) {
527    allocator_->Free(pointers[i]);
528    EXPECT_TRUE(allocator_->CheckConsistency());
529  }
530}
531
532// Checks the free-pending-token mechanism.
533TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) {
534  allocator_->CheckConsistency();
535
536  const unsigned int kSize = 16;
537  const unsigned int kAllocCount = kBufferSize / kSize;
538  CHECK(kAllocCount * kSize == kBufferSize);
539
540  // Allocate several buffers to fill in the memory.
541  void *pointers[kAllocCount];
542  for (unsigned int i = 0; i < kAllocCount; ++i) {
543    pointers[i] = allocator_->Alloc(kSize);
544    EXPECT_TRUE(pointers[i]);
545    EXPECT_TRUE(allocator_->CheckConsistency());
546  }
547
548  // This allocation should fail.
549  void *pointer_failed = allocator_->Alloc(kSize);
550  EXPECT_FALSE(pointer_failed);
551  EXPECT_TRUE(allocator_->CheckConsistency());
552
553  // Free one successful allocation, pending fence.
554  int32 token = helper_.get()->InsertToken();
555  allocator_->FreePendingToken(pointers[0], token);
556  EXPECT_TRUE(allocator_->CheckConsistency());
557
558  // The way we hooked up the helper and engine, it won't process commands
559  // until it has to wait for something. Which means the token shouldn't have
560  // passed yet at this point.
561  EXPECT_GT(token, GetToken());
562
563  // This allocation will need to reclaim the space freed above, so that should
564  // process the commands until the token is passed.
565  pointers[0] = allocator_->Alloc(kSize);
566  EXPECT_TRUE(pointers[0]);
567  EXPECT_TRUE(allocator_->CheckConsistency());
568  // Check that the token has indeed passed.
569  EXPECT_LE(token, GetToken());
570
571  // Free up everything.
572  for (unsigned int i = 0; i < kAllocCount; ++i) {
573    allocator_->Free(pointers[i]);
574    EXPECT_TRUE(allocator_->CheckConsistency());
575  }
576}
577
578}  // namespace gpu
579