cmd_buffer_helper.h revision 23730a6e56a168d1879203e4b3819bb36e3d8f1f
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file contains the command buffer helper class.
6
7#ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
8#define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
9
10#include <string.h>
11#include <time.h>
12
13#include "gpu/command_buffer/common/cmd_buffer_common.h"
14#include "gpu/command_buffer/common/command_buffer.h"
15#include "gpu/command_buffer/common/constants.h"
16#include "gpu/gpu_export.h"
17
18namespace gpu {
19
20#if !defined(OS_ANDROID)
21#define CMD_HELPER_PERIODIC_FLUSH_CHECK
22const int kCommandsPerFlushCheck = 100;
23const float kPeriodicFlushDelay = 1.0f / (5.0f * 60.0f);
24#endif
25
26const int kAutoFlushSmall = 16;  // 1/16 of the buffer
27const int kAutoFlushBig = 2;     // 1/2 of the buffer
28
29// Command buffer helper class. This class simplifies ring buffer management:
30// it will allocate the buffer, give it to the buffer interface, and let the
31// user add commands to it, while taking care of the synchronization (put and
32// get). It also provides a way to ensure commands have been executed, through
33// the token mechanism:
34//
35// helper.AddCommand(...);
36// helper.AddCommand(...);
37// int32 token = helper.InsertToken();
38// helper.AddCommand(...);
39// helper.AddCommand(...);
40// [...]
41//
42// helper.WaitForToken(token);  // this doesn't return until the first two
43//                              // commands have been executed.
44class GPU_EXPORT CommandBufferHelper {
45 public:
46  explicit CommandBufferHelper(CommandBuffer* command_buffer);
47  virtual ~CommandBufferHelper();
48
49  // Initializes the CommandBufferHelper.
50  // Parameters:
51  //   ring_buffer_size: The size of the ring buffer portion of the command
52  //       buffer.
53  bool Initialize(int32 ring_buffer_size);
54
55  // Sets whether the command buffer should automatically flush periodically
56  // to try to increase performance. Defaults to true.
57  void SetAutomaticFlushes(bool enabled);
58
59  // True if the context is lost.
60  bool IsContextLost();
61
62  // Asynchronously flushes the commands, setting the put pointer to let the
63  // buffer interface know that new commands have been added. After a flush
64  // returns, the command buffer service is aware of all pending commands.
65  void Flush();
66
67  // Waits until all the commands have been executed. Returns whether it
68  // was successful. The function will fail if the command buffer service has
69  // disconnected.
70  bool Finish();
71
72  // Waits until a given number of available entries are available.
73  // Parameters:
74  //   count: number of entries needed. This value must be at most
75  //     the size of the buffer minus one.
76  void WaitForAvailableEntries(int32 count);
77
78  // Inserts a new token into the command buffer. This token either has a value
79  // different from previously inserted tokens, or ensures that previously
80  // inserted tokens with that value have already passed through the command
81  // stream.
82  // Returns:
83  //   the value of the new token or -1 if the command buffer reader has
84  //   shutdown.
85  int32 InsertToken();
86
87  // Waits until the token of a particular value has passed through the command
88  // stream (i.e. commands inserted before that token have been executed).
89  // NOTE: This will call Flush if it needs to block.
90  // Parameters:
91  //   the value of the token to wait for.
92  void WaitForToken(int32 token);
93
94  // Called prior to each command being issued. Waits for a certain amount of
95  // space to be available. Returns address of space.
96  CommandBufferEntry* GetSpace(int32 entries) {
97#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
98    // Allow this command buffer to be pre-empted by another if a "reasonable"
99    // amount of work has been done. On highend machines, this reduces the
100    // latency of GPU commands. However, on Android, this can cause the
101    // kernel to thrash between generating GPU commands and executing them.
102    ++commands_issued_;
103    if (flush_automatically_ &&
104        (commands_issued_ % kCommandsPerFlushCheck == 0)) {
105      PeriodicFlushCheck();
106    }
107#endif
108
109    // Test for immediate entries.
110    if (entries > immediate_entry_count_) {
111      WaitForAvailableEntries(entries);
112      if (entries > immediate_entry_count_)
113        return NULL;
114    }
115
116    DCHECK_LE(entries, immediate_entry_count_);
117
118    // Allocate space and advance put_.
119    CommandBufferEntry* space = &entries_[put_];
120    put_ += entries;
121    immediate_entry_count_ -= entries;
122
123    DCHECK_LE(put_, total_entry_count_);
124    return space;
125  }
126
127  // Typed version of GetSpace. Gets enough room for the given type and returns
128  // a reference to it.
129  template <typename T>
130  T* GetCmdSpace() {
131    COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
132    int32 space_needed = ComputeNumEntries(sizeof(T));
133    void* data = GetSpace(space_needed);
134    return reinterpret_cast<T*>(data);
135  }
136
137  // Typed version of GetSpace for immediate commands.
138  template <typename T>
139  T* GetImmediateCmdSpace(size_t data_space) {
140    COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
141    int32 space_needed = ComputeNumEntries(sizeof(T) + data_space);
142    void* data = GetSpace(space_needed);
143    return reinterpret_cast<T*>(data);
144  }
145
146  // Typed version of GetSpace for immediate commands.
147  template <typename T>
148  T* GetImmediateCmdSpaceTotalSize(size_t total_space) {
149    COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
150    int32 space_needed = ComputeNumEntries(total_space);
151    void* data = GetSpace(space_needed);
152    return reinterpret_cast<T*>(data);
153  }
154
155  int32 last_token_read() const {
156    return command_buffer_->GetLastToken();
157  }
158
159  int32 get_offset() const {
160    return command_buffer_->GetLastState().get_offset;
161  }
162
163  // Common Commands
164  void Noop(uint32 skip_count) {
165    cmd::Noop* cmd = GetImmediateCmdSpace<cmd::Noop>(
166        (skip_count - 1) * sizeof(CommandBufferEntry));
167    if (cmd) {
168      cmd->Init(skip_count);
169    }
170  }
171
172  void SetToken(uint32 token) {
173    cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
174    if (cmd) {
175      cmd->Init(token);
176    }
177  }
178
179  void SetBucketSize(uint32 bucket_id, uint32 size) {
180    cmd::SetBucketSize* cmd = GetCmdSpace<cmd::SetBucketSize>();
181    if (cmd) {
182      cmd->Init(bucket_id, size);
183    }
184  }
185
186  void SetBucketData(uint32 bucket_id,
187                     uint32 offset,
188                     uint32 size,
189                     uint32 shared_memory_id,
190                     uint32 shared_memory_offset) {
191    cmd::SetBucketData* cmd = GetCmdSpace<cmd::SetBucketData>();
192    if (cmd) {
193      cmd->Init(bucket_id,
194                offset,
195                size,
196                shared_memory_id,
197                shared_memory_offset);
198    }
199  }
200
201  void SetBucketDataImmediate(
202      uint32 bucket_id, uint32 offset, const void* data, uint32 size) {
203    cmd::SetBucketDataImmediate* cmd =
204        GetImmediateCmdSpace<cmd::SetBucketDataImmediate>(size);
205    if (cmd) {
206      cmd->Init(bucket_id, offset, size);
207      memcpy(ImmediateDataAddress(cmd), data, size);
208    }
209  }
210
211  void GetBucketStart(uint32 bucket_id,
212                      uint32 result_memory_id,
213                      uint32 result_memory_offset,
214                      uint32 data_memory_size,
215                      uint32 data_memory_id,
216                      uint32 data_memory_offset) {
217    cmd::GetBucketStart* cmd = GetCmdSpace<cmd::GetBucketStart>();
218    if (cmd) {
219      cmd->Init(bucket_id,
220                result_memory_id,
221                result_memory_offset,
222                data_memory_size,
223                data_memory_id,
224                data_memory_offset);
225    }
226  }
227
228  void GetBucketData(uint32 bucket_id,
229                     uint32 offset,
230                     uint32 size,
231                     uint32 shared_memory_id,
232                     uint32 shared_memory_offset) {
233    cmd::GetBucketData* cmd = GetCmdSpace<cmd::GetBucketData>();
234    if (cmd) {
235      cmd->Init(bucket_id,
236                offset,
237                size,
238                shared_memory_id,
239                shared_memory_offset);
240    }
241  }
242
243  CommandBuffer* command_buffer() const {
244    return command_buffer_;
245  }
246
247  Buffer get_ring_buffer() const {
248    return ring_buffer_;
249  }
250
251  uint32 flush_generation() const { return flush_generation_; }
252
253  void FreeRingBuffer();
254
255  bool HaveRingBuffer() const {
256    return ring_buffer_id_ != -1;
257  }
258
259  bool usable () const {
260    return usable_;
261  }
262
263  void ClearUsable() {
264    usable_ = false;
265    CalcImmediateEntries(0);
266  }
267
268 private:
269  // Returns the number of available entries (they may not be contiguous).
270  int32 AvailableEntries() {
271    return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
272  }
273
274  void CalcImmediateEntries(int waiting_count);
275  bool AllocateRingBuffer();
276  void FreeResources();
277
278  // Waits for the get offset to be in a specific range, inclusive. Returns
279  // false if there was an error.
280  bool WaitForGetOffsetInRange(int32 start, int32 end);
281
282#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
283  // Calls Flush if automatic flush conditions are met.
284  void PeriodicFlushCheck();
285#endif
286
287  CommandBuffer* command_buffer_;
288  int32 ring_buffer_id_;
289  int32 ring_buffer_size_;
290  Buffer ring_buffer_;
291  CommandBufferEntry* entries_;
292  int32 total_entry_count_;  // the total number of entries
293  int32 immediate_entry_count_;
294  int32 token_;
295  int32 put_;
296  int32 last_put_sent_;
297
298#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
299  int commands_issued_;
300#endif
301
302  bool usable_;
303  bool context_lost_;
304  bool flush_automatically_;
305
306  // Using C runtime instead of base because this file cannot depend on base.
307  clock_t last_flush_time_;
308
309  // Incremented every time the helper flushes the command buffer.
310  // Can be used to track when prior commands have been flushed.
311  uint32 flush_generation_;
312
313  friend class CommandBufferHelperTest;
314  DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
315};
316
317}  // namespace gpu
318
319#endif  // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
320