cmd_buffer_helper.cc revision 23730a6e56a168d1879203e4b3819bb36e3d8f1f
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file contains the implementation of the command buffer helper class.
6
7#include "gpu/command_buffer/client/cmd_buffer_helper.h"
8
9#include "base/logging.h"
10#include "gpu/command_buffer/common/command_buffer.h"
11#include "gpu/command_buffer/common/trace_event.h"
12
13namespace gpu {
14
15CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
16    : command_buffer_(command_buffer),
17      ring_buffer_id_(-1),
18      ring_buffer_size_(0),
19      entries_(NULL),
20      total_entry_count_(0),
21      immediate_entry_count_(0),
22      token_(0),
23      put_(0),
24      last_put_sent_(0),
25#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
26      commands_issued_(0),
27#endif
28      usable_(true),
29      context_lost_(false),
30      flush_automatically_(true),
31      last_flush_time_(0),
32      flush_generation_(0) {
33}
34
35void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
36  flush_automatically_ = enabled;
37  CalcImmediateEntries(0);
38}
39
40bool CommandBufferHelper::IsContextLost() {
41  if (!context_lost_) {
42    context_lost_ = error::IsError(command_buffer()->GetLastError());
43  }
44  return context_lost_;
45}
46
47void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
48  DCHECK_GE(waiting_count, 0);
49
50  // Check if usable & allocated.
51  if (!usable() || !HaveRingBuffer()) {
52    immediate_entry_count_ = 0;
53    return;
54  }
55
56  // Get maximum safe contiguous entries.
57  const int32 curr_get = get_offset();
58  if (curr_get > put_) {
59    immediate_entry_count_ = curr_get - put_ - 1;
60  } else {
61    immediate_entry_count_ =
62        total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
63  }
64
65  // Limit entry count to force early flushing.
66  if (flush_automatically_) {
67    int32 limit =
68        total_entry_count_ /
69        ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
70
71    int32 pending =
72        (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
73
74    if (pending > 0 && pending >= limit) {
75      // Time to force flush.
76      immediate_entry_count_ = 0;
77    } else {
78      // Limit remaining entries, but not lower than waiting_count entries to
79      // prevent deadlock when command size is greater than the flush limit.
80      limit -= pending;
81      limit = limit < waiting_count ? waiting_count : limit;
82      immediate_entry_count_ =
83          immediate_entry_count_ > limit ? limit : immediate_entry_count_;
84    }
85  }
86}
87
88bool CommandBufferHelper::AllocateRingBuffer() {
89  if (!usable()) {
90    return false;
91  }
92
93  if (HaveRingBuffer()) {
94    return true;
95  }
96
97  int32 id = -1;
98  Buffer buffer = command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
99  if (id < 0) {
100    ClearUsable();
101    return false;
102  }
103
104  ring_buffer_ = buffer;
105  ring_buffer_id_ = id;
106  command_buffer_->SetGetBuffer(id);
107
108  // TODO(gman): Do we really need to call GetState here? We know get & put = 0
109  // Also do we need to check state.num_entries?
110  CommandBuffer::State state = command_buffer_->GetState();
111  entries_ = static_cast<CommandBufferEntry*>(ring_buffer_.ptr);
112  int32 num_ring_buffer_entries =
113      ring_buffer_size_ / sizeof(CommandBufferEntry);
114  if (num_ring_buffer_entries > state.num_entries) {
115    ClearUsable();
116    return false;
117  }
118
119  total_entry_count_ = num_ring_buffer_entries;
120  put_ = state.put_offset;
121  CalcImmediateEntries(0);
122  return true;
123}
124
125void CommandBufferHelper::FreeResources() {
126  if (HaveRingBuffer()) {
127    command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
128    ring_buffer_id_ = -1;
129    CalcImmediateEntries(0);
130  }
131}
132
133void CommandBufferHelper::FreeRingBuffer() {
134  CHECK((put_ == get_offset()) ||
135      error::IsError(command_buffer_->GetLastState().error));
136  FreeResources();
137}
138
139bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
140  ring_buffer_size_ = ring_buffer_size;
141  return AllocateRingBuffer();
142}
143
144CommandBufferHelper::~CommandBufferHelper() {
145  FreeResources();
146}
147
148bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start, int32 end) {
149  if (!usable()) {
150    return false;
151  }
152  command_buffer_->WaitForGetOffsetInRange(start, end);
153  return command_buffer_->GetLastError() == gpu::error::kNoError;
154}
155
156void CommandBufferHelper::Flush() {
157  // Wrap put_ before flush.
158  if (put_ == total_entry_count_)
159    put_ = 0;
160
161  if (usable() && last_put_sent_ != put_) {
162    last_flush_time_ = clock();
163    last_put_sent_ = put_;
164    command_buffer_->Flush(put_);
165    ++flush_generation_;
166    CalcImmediateEntries(0);
167  }
168}
169
170#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
171void CommandBufferHelper::PeriodicFlushCheck() {
172  clock_t current_time = clock();
173  if (current_time - last_flush_time_ > kPeriodicFlushDelay * CLOCKS_PER_SEC)
174    Flush();
175}
176#endif
177
178// Calls Flush() and then waits until the buffer is empty. Break early if the
179// error is set.
180bool CommandBufferHelper::Finish() {
181  TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
182  if (!usable()) {
183    return false;
184  }
185  // If there is no work just exit.
186  if (put_ == get_offset()) {
187    return true;
188  }
189  DCHECK(HaveRingBuffer());
190  Flush();
191  if (!WaitForGetOffsetInRange(put_, put_))
192    return false;
193  DCHECK_EQ(get_offset(), put_);
194
195  CalcImmediateEntries(0);
196
197  return true;
198}
199
200// Inserts a new token into the command stream. It uses an increasing value
201// scheme so that we don't lose tokens (a token has passed if the current token
202// value is higher than that token). Calls Finish() if the token value wraps,
203// which will be rare.
204int32 CommandBufferHelper::InsertToken() {
205  AllocateRingBuffer();
206  if (!usable()) {
207    return token_;
208  }
209  DCHECK(HaveRingBuffer());
210  // Increment token as 31-bit integer. Negative values are used to signal an
211  // error.
212  token_ = (token_ + 1) & 0x7FFFFFFF;
213  cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
214  if (cmd) {
215    cmd->Init(token_);
216    if (token_ == 0) {
217      TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
218      // we wrapped
219      Finish();
220      DCHECK_EQ(token_, last_token_read());
221    }
222  }
223  return token_;
224}
225
226// Waits until the current token value is greater or equal to the value passed
227// in argument.
228void CommandBufferHelper::WaitForToken(int32 token) {
229  if (!usable() || !HaveRingBuffer()) {
230    return;
231  }
232  // Return immediately if corresponding InsertToken failed.
233  if (token < 0)
234    return;
235  if (token > token_) return;  // we wrapped
236  if (last_token_read() > token)
237    return;
238  Flush();
239  command_buffer_->WaitForTokenInRange(token, token_);
240}
241
242// Waits for available entries, basically waiting until get >= put + count + 1.
243// It actually waits for contiguous entries, so it may need to wrap the buffer
244// around, adding a noops. Thus this function may change the value of put_. The
245// function will return early if an error occurs, in which case the available
246// space may not be available.
247void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
248  AllocateRingBuffer();
249  if (!usable()) {
250    return;
251  }
252  DCHECK(HaveRingBuffer());
253  DCHECK(count < total_entry_count_);
254  if (put_ + count > total_entry_count_) {
255    // There's not enough room between the current put and the end of the
256    // buffer, so we need to wrap. We will add noops all the way to the end,
257    // but we need to make sure get wraps first, actually that get is 1 or
258    // more (since put will wrap to 0 after we add the noops).
259    DCHECK_LE(1, put_);
260    int32 curr_get = get_offset();
261    if (curr_get > put_ || curr_get == 0) {
262      TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
263      Flush();
264      if (!WaitForGetOffsetInRange(1, put_))
265        return;
266      curr_get = get_offset();
267      DCHECK_LE(curr_get, put_);
268      DCHECK_NE(0, curr_get);
269    }
270    // Insert Noops to fill out the buffer.
271    int32 num_entries = total_entry_count_ - put_;
272    while (num_entries > 0) {
273      int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
274      cmd::Noop::Set(&entries_[put_], num_to_skip);
275      put_ += num_to_skip;
276      num_entries -= num_to_skip;
277    }
278    put_ = 0;
279  }
280
281  // Try to get 'count' entries without flushing.
282  CalcImmediateEntries(count);
283  if (immediate_entry_count_ < count) {
284    // Try again with a shallow Flush().
285    Flush();
286    CalcImmediateEntries(count);
287    if (immediate_entry_count_ < count) {
288      // Buffer is full.  Need to wait for entries.
289      TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
290      if (!WaitForGetOffsetInRange(put_ + count + 1, put_))
291        return;
292      CalcImmediateEntries(count);
293      DCHECK_GE(immediate_entry_count_, count);
294    }
295  }
296}
297
298
299}  // namespace gpu
300