1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file contains the command buffer helper class.
6
7#ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
8#define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
9
10#include <string.h>
11#include <time.h>
12
13#include "base/time/time.h"
14#include "gpu/command_buffer/common/cmd_buffer_common.h"
15#include "gpu/command_buffer/common/command_buffer.h"
16#include "gpu/command_buffer/common/constants.h"
17#include "gpu/gpu_export.h"
18
19namespace gpu {
20
21#if !defined(OS_ANDROID)
22#define CMD_HELPER_PERIODIC_FLUSH_CHECK
23const int kCommandsPerFlushCheck = 100;
24const int kPeriodicFlushDelayInMicroseconds =
25    base::Time::kMicrosecondsPerSecond / (5 * 60);
26#endif
27
28const int kAutoFlushSmall = 16;  // 1/16 of the buffer
29const int kAutoFlushBig = 2;     // 1/2 of the buffer
30
31// Command buffer helper class. This class simplifies ring buffer management:
32// it will allocate the buffer, give it to the buffer interface, and let the
33// user add commands to it, while taking care of the synchronization (put and
34// get). It also provides a way to ensure commands have been executed, through
35// the token mechanism:
36//
37// helper.AddCommand(...);
38// helper.AddCommand(...);
39// int32 token = helper.InsertToken();
40// helper.AddCommand(...);
41// helper.AddCommand(...);
42// [...]
43//
44// helper.WaitForToken(token);  // this doesn't return until the first two
45//                              // commands have been executed.
46class GPU_EXPORT CommandBufferHelper {
47 public:
48  explicit CommandBufferHelper(CommandBuffer* command_buffer);
49  virtual ~CommandBufferHelper();
50
51  // Initializes the CommandBufferHelper.
52  // Parameters:
53  //   ring_buffer_size: The size of the ring buffer portion of the command
54  //       buffer.
55  bool Initialize(int32 ring_buffer_size);
56
57  // Sets whether the command buffer should automatically flush periodically
58  // to try to increase performance. Defaults to true.
59  void SetAutomaticFlushes(bool enabled);
60
61  // True if the context is lost.
62  bool IsContextLost();
63
64  // Asynchronously flushes the commands, setting the put pointer to let the
65  // buffer interface know that new commands have been added. After a flush
66  // returns, the command buffer service is aware of all pending commands.
67  void Flush();
68
69  // Waits until all the commands have been executed. Returns whether it
70  // was successful. The function will fail if the command buffer service has
71  // disconnected.
72  bool Finish();
73
74  // Waits until a given number of available entries are available.
75  // Parameters:
76  //   count: number of entries needed. This value must be at most
77  //     the size of the buffer minus one.
78  void WaitForAvailableEntries(int32 count);
79
80  // Inserts a new token into the command buffer. This token either has a value
81  // different from previously inserted tokens, or ensures that previously
82  // inserted tokens with that value have already passed through the command
83  // stream.
84  // Returns:
85  //   the value of the new token or -1 if the command buffer reader has
86  //   shutdown.
87  int32 InsertToken();
88
89  // Returns true if the token has passed.
90  // Parameters:
91  //   the value of the token to check whether it has passed
92  bool HasTokenPassed(int32 token) const {
93    if (token > token_)
94      return true;  // we wrapped
95    return last_token_read() >= token;
96  }
97
98  // Waits until the token of a particular value has passed through the command
99  // stream (i.e. commands inserted before that token have been executed).
100  // NOTE: This will call Flush if it needs to block.
101  // Parameters:
102  //   the value of the token to wait for.
103  void WaitForToken(int32 token);
104
105  // Called prior to each command being issued. Waits for a certain amount of
106  // space to be available. Returns address of space.
107  void* GetSpace(int32 entries) {
108#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
109    // Allow this command buffer to be pre-empted by another if a "reasonable"
110    // amount of work has been done. On highend machines, this reduces the
111    // latency of GPU commands. However, on Android, this can cause the
112    // kernel to thrash between generating GPU commands and executing them.
113    ++commands_issued_;
114    if (flush_automatically_ &&
115        (commands_issued_ % kCommandsPerFlushCheck == 0)) {
116      PeriodicFlushCheck();
117    }
118#endif
119
120    // Test for immediate entries.
121    if (entries > immediate_entry_count_) {
122      WaitForAvailableEntries(entries);
123      if (entries > immediate_entry_count_)
124        return NULL;
125    }
126
127    DCHECK_LE(entries, immediate_entry_count_);
128
129    // Allocate space and advance put_.
130    CommandBufferEntry* space = &entries_[put_];
131    put_ += entries;
132    immediate_entry_count_ -= entries;
133
134    DCHECK_LE(put_, total_entry_count_);
135    return space;
136  }
137
138  template <typename T>
139  void ForceNullCheck(T* data) {
140#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
141    // 64-bit MSVC's alias analysis was determining that the command buffer
142    // entry couldn't be NULL, so it optimized out the NULL check.
143    // Dereferencing the same datatype through a volatile pointer seems to
144    // prevent that from happening. http://crbug.com/361936
145    if (data)
146      static_cast<volatile T*>(data)->header;
147#endif
148  }
149
150  // Typed version of GetSpace. Gets enough room for the given type and returns
151  // a reference to it.
152  template <typename T>
153  T* GetCmdSpace() {
154    COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
155    int32 space_needed = ComputeNumEntries(sizeof(T));
156    T* data = static_cast<T*>(GetSpace(space_needed));
157    ForceNullCheck(data);
158    return data;
159  }
160
161  // Typed version of GetSpace for immediate commands.
162  template <typename T>
163  T* GetImmediateCmdSpace(size_t data_space) {
164    COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
165    int32 space_needed = ComputeNumEntries(sizeof(T) + data_space);
166    T* data = static_cast<T*>(GetSpace(space_needed));
167    ForceNullCheck(data);
168    return data;
169  }
170
171  // Typed version of GetSpace for immediate commands.
172  template <typename T>
173  T* GetImmediateCmdSpaceTotalSize(size_t total_space) {
174    COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
175    int32 space_needed = ComputeNumEntries(total_space);
176    T* data = static_cast<T*>(GetSpace(space_needed));
177    ForceNullCheck(data);
178    return data;
179  }
180
181  int32 last_token_read() const {
182    return command_buffer_->GetLastToken();
183  }
184
185  int32 get_offset() const {
186    return command_buffer_->GetLastState().get_offset;
187  }
188
189  // Common Commands
190  void Noop(uint32 skip_count) {
191    cmd::Noop* cmd = GetImmediateCmdSpace<cmd::Noop>(
192        (skip_count - 1) * sizeof(CommandBufferEntry));
193    if (cmd) {
194      cmd->Init(skip_count);
195    }
196  }
197
198  void SetToken(uint32 token) {
199    cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
200    if (cmd) {
201      cmd->Init(token);
202    }
203  }
204
205  void SetBucketSize(uint32 bucket_id, uint32 size) {
206    cmd::SetBucketSize* cmd = GetCmdSpace<cmd::SetBucketSize>();
207    if (cmd) {
208      cmd->Init(bucket_id, size);
209    }
210  }
211
212  void SetBucketData(uint32 bucket_id,
213                     uint32 offset,
214                     uint32 size,
215                     uint32 shared_memory_id,
216                     uint32 shared_memory_offset) {
217    cmd::SetBucketData* cmd = GetCmdSpace<cmd::SetBucketData>();
218    if (cmd) {
219      cmd->Init(bucket_id,
220                offset,
221                size,
222                shared_memory_id,
223                shared_memory_offset);
224    }
225  }
226
227  void SetBucketDataImmediate(
228      uint32 bucket_id, uint32 offset, const void* data, uint32 size) {
229    cmd::SetBucketDataImmediate* cmd =
230        GetImmediateCmdSpace<cmd::SetBucketDataImmediate>(size);
231    if (cmd) {
232      cmd->Init(bucket_id, offset, size);
233      memcpy(ImmediateDataAddress(cmd), data, size);
234    }
235  }
236
237  void GetBucketStart(uint32 bucket_id,
238                      uint32 result_memory_id,
239                      uint32 result_memory_offset,
240                      uint32 data_memory_size,
241                      uint32 data_memory_id,
242                      uint32 data_memory_offset) {
243    cmd::GetBucketStart* cmd = GetCmdSpace<cmd::GetBucketStart>();
244    if (cmd) {
245      cmd->Init(bucket_id,
246                result_memory_id,
247                result_memory_offset,
248                data_memory_size,
249                data_memory_id,
250                data_memory_offset);
251    }
252  }
253
254  void GetBucketData(uint32 bucket_id,
255                     uint32 offset,
256                     uint32 size,
257                     uint32 shared_memory_id,
258                     uint32 shared_memory_offset) {
259    cmd::GetBucketData* cmd = GetCmdSpace<cmd::GetBucketData>();
260    if (cmd) {
261      cmd->Init(bucket_id,
262                offset,
263                size,
264                shared_memory_id,
265                shared_memory_offset);
266    }
267  }
268
269  CommandBuffer* command_buffer() const {
270    return command_buffer_;
271  }
272
273  scoped_refptr<Buffer> get_ring_buffer() const { return ring_buffer_; }
274
275  uint32 flush_generation() const { return flush_generation_; }
276
277  void FreeRingBuffer();
278
279  bool HaveRingBuffer() const {
280    return ring_buffer_id_ != -1;
281  }
282
283  bool usable () const {
284    return usable_;
285  }
286
287  void ClearUsable() {
288    usable_ = false;
289    CalcImmediateEntries(0);
290  }
291
292 private:
293  // Returns the number of available entries (they may not be contiguous).
294  int32 AvailableEntries() {
295    return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
296  }
297
298  void CalcImmediateEntries(int waiting_count);
299  bool AllocateRingBuffer();
300  void FreeResources();
301
302  // Waits for the get offset to be in a specific range, inclusive. Returns
303  // false if there was an error.
304  bool WaitForGetOffsetInRange(int32 start, int32 end);
305
306#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
307  // Calls Flush if automatic flush conditions are met.
308  void PeriodicFlushCheck();
309#endif
310
311  CommandBuffer* command_buffer_;
312  int32 ring_buffer_id_;
313  int32 ring_buffer_size_;
314  scoped_refptr<gpu::Buffer> ring_buffer_;
315  CommandBufferEntry* entries_;
316  int32 total_entry_count_;  // the total number of entries
317  int32 immediate_entry_count_;
318  int32 token_;
319  int32 put_;
320  int32 last_put_sent_;
321
322#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
323  int commands_issued_;
324#endif
325
326  bool usable_;
327  bool context_lost_;
328  bool flush_automatically_;
329
330  base::TimeTicks last_flush_time_;
331
332  // Incremented every time the helper flushes the command buffer.
333  // Can be used to track when prior commands have been flushed.
334  uint32 flush_generation_;
335
336  friend class CommandBufferHelperTest;
337  DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
338};
339
340}  // namespace gpu
341
342#endif  // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
343