1// Copyright (c) 2011 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file contains the implementation of the FencedAllocator class.
6
7#include "gpu/command_buffer/client/fenced_allocator.h"
8
9#include <algorithm>
10
11#include "gpu/command_buffer/client/cmd_buffer_helper.h"
12
13namespace gpu {
14
15namespace {
16
17// Allocation alignment, must be a power of two.
18const unsigned int kAllocAlignment = 16;
19
20// Round down to the largest multiple of kAllocAlignment no greater than |size|.
21unsigned int RoundDown(unsigned int size) {
22  return size & ~(kAllocAlignment - 1);
23}
24
25// Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
26unsigned int RoundUp(unsigned int size) {
27  return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
28}
29
30}  // namespace
31
32#ifndef _MSC_VER
33const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
34#endif
35
36FencedAllocator::FencedAllocator(unsigned int size,
37                                 CommandBufferHelper* helper,
38                                 const base::Closure& poll_callback)
39    : helper_(helper),
40      poll_callback_(poll_callback),
41      bytes_in_use_(0) {
42  Block block = { FREE, 0, RoundDown(size), kUnusedToken };
43  blocks_.push_back(block);
44}
45
46FencedAllocator::~FencedAllocator() {
47  // Free blocks pending tokens.
48  for (unsigned int i = 0; i < blocks_.size(); ++i) {
49    if (blocks_[i].state == FREE_PENDING_TOKEN) {
50      i = WaitForTokenAndFreeBlock(i);
51    }
52  }
53
54  DCHECK_EQ(blocks_.size(), 1u);
55  DCHECK_EQ(blocks_[0].state, FREE);
56}
57
58// Looks for a non-allocated block that is big enough. Search in the FREE
59// blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
60// blocks, waiting for them. The current implementation isn't smart about
61// optimizing what to wait for, just looks inside the block in order (first-fit
62// as well).
63FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
64  // size of 0 is not allowed because it would be inconsistent to only sometimes
65  // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
66  if (size == 0)  {
67    return kInvalidOffset;
68  }
69
70  // Round up the allocation size to ensure alignment.
71  size = RoundUp(size);
72
73  // Try first to allocate in a free block.
74  for (unsigned int i = 0; i < blocks_.size(); ++i) {
75    Block &block = blocks_[i];
76    if (block.state == FREE && block.size >= size) {
77      return AllocInBlock(i, size);
78    }
79  }
80
81  // No free block is available. Look for blocks pending tokens, and wait for
82  // them to be re-usable.
83  for (unsigned int i = 0; i < blocks_.size(); ++i) {
84    if (blocks_[i].state != FREE_PENDING_TOKEN)
85      continue;
86    i = WaitForTokenAndFreeBlock(i);
87    if (blocks_[i].size >= size)
88      return AllocInBlock(i, size);
89  }
90  return kInvalidOffset;
91}
92
93// Looks for the corresponding block, mark it FREE, and collapse it if
94// necessary.
95void FencedAllocator::Free(FencedAllocator::Offset offset) {
96  BlockIndex index = GetBlockByOffset(offset);
97  DCHECK_NE(blocks_[index].state, FREE);
98  Block &block = blocks_[index];
99
100  if (block.state == IN_USE)
101    bytes_in_use_ -= block.size;
102
103  block.state = FREE;
104  CollapseFreeBlock(index);
105}
106
107// Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
108void FencedAllocator::FreePendingToken(
109    FencedAllocator::Offset offset, int32 token) {
110  BlockIndex index = GetBlockByOffset(offset);
111  Block &block = blocks_[index];
112  if (block.state == IN_USE)
113    bytes_in_use_ -= block.size;
114  block.state = FREE_PENDING_TOKEN;
115  block.token = token;
116}
117
118// Gets the max of the size of the blocks marked as free.
119unsigned int FencedAllocator::GetLargestFreeSize() {
120  FreeUnused();
121  unsigned int max_size = 0;
122  for (unsigned int i = 0; i < blocks_.size(); ++i) {
123    Block &block = blocks_[i];
124    if (block.state == FREE)
125      max_size = std::max(max_size, block.size);
126  }
127  return max_size;
128}
129
130// Gets the size of the largest segment of blocks that are either FREE or
131// FREE_PENDING_TOKEN.
132unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
133  unsigned int max_size = 0;
134  unsigned int current_size = 0;
135  for (unsigned int i = 0; i < blocks_.size(); ++i) {
136    Block &block = blocks_[i];
137    if (block.state == IN_USE) {
138      max_size = std::max(max_size, current_size);
139      current_size = 0;
140    } else {
141      DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
142      current_size += block.size;
143    }
144  }
145  return std::max(max_size, current_size);
146}
147
148// Makes sure that:
149// - there is at least one block.
150// - there are no contiguous FREE blocks (they should have been collapsed).
151// - the successive offsets match the block sizes, and they are in order.
152bool FencedAllocator::CheckConsistency() {
153  if (blocks_.size() < 1) return false;
154  for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
155    Block &current = blocks_[i];
156    Block &next = blocks_[i + 1];
157    // This test is NOT included in the next one, because offset is unsigned.
158    if (next.offset <= current.offset)
159      return false;
160    if (next.offset != current.offset + current.size)
161      return false;
162    if (current.state == FREE && next.state == FREE)
163      return false;
164  }
165  return true;
166}
167
168// Returns false if all blocks are actually FREE, in which
169// case they would be coalesced into one block, true otherwise.
170bool FencedAllocator::InUse() {
171  return blocks_.size() != 1 || blocks_[0].state != FREE;
172}
173
174// Collapse the block to the next one, then to the previous one. Provided the
175// structure is consistent, those are the only blocks eligible for collapse.
176FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
177    BlockIndex index) {
178  if (index + 1 < blocks_.size()) {
179    Block &next = blocks_[index + 1];
180    if (next.state == FREE) {
181      blocks_[index].size += next.size;
182      blocks_.erase(blocks_.begin() + index + 1);
183    }
184  }
185  if (index > 0) {
186    Block &prev = blocks_[index - 1];
187    if (prev.state == FREE) {
188      prev.size += blocks_[index].size;
189      blocks_.erase(blocks_.begin() + index);
190      --index;
191    }
192  }
193  return index;
194}
195
196// Waits for the block's token, then mark the block as free, then collapse it.
197FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
198    BlockIndex index) {
199  Block &block = blocks_[index];
200  DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
201  helper_->WaitForToken(block.token);
202  block.state = FREE;
203  return CollapseFreeBlock(index);
204}
205
206// Frees any blocks pending a token for which the token has been read.
207void FencedAllocator::FreeUnused() {
208  // Free any potential blocks that has its lifetime handled outside.
209  poll_callback_.Run();
210
211  for (unsigned int i = 0; i < blocks_.size();) {
212    Block& block = blocks_[i];
213    if (block.state == FREE_PENDING_TOKEN &&
214        helper_->HasTokenPassed(block.token)) {
215      block.state = FREE;
216      i = CollapseFreeBlock(i);
217    } else {
218      ++i;
219    }
220  }
221}
222
223// If the block is exactly the requested size, simply mark it IN_USE, otherwise
224// split it and mark the first one (of the requested size) IN_USE.
225FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
226                                                      unsigned int size) {
227  Block &block = blocks_[index];
228  DCHECK_GE(block.size, size);
229  DCHECK_EQ(block.state, FREE);
230  Offset offset = block.offset;
231  bytes_in_use_ += size;
232  if (block.size == size) {
233    block.state = IN_USE;
234    return offset;
235  }
236  Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
237  block.state = IN_USE;
238  block.size = size;
239  // this is the last thing being done because it may invalidate block;
240  blocks_.insert(blocks_.begin() + index + 1, newblock);
241  return offset;
242}
243
244// The blocks are in offset order, so we can do a binary search.
245FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
246  Block templ = { IN_USE, offset, 0, kUnusedToken };
247  Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
248                                            templ, OffsetCmp());
249  DCHECK(it != blocks_.end() && it->offset == offset);
250  return it-blocks_.begin();
251}
252
253}  // namespace gpu
254