1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "content/common/gpu/client/command_buffer_proxy_impl.h"
6
7#include "base/callback.h"
8#include "base/debug/trace_event.h"
9#include "base/logging.h"
10#include "base/memory/shared_memory.h"
11#include "base/stl_util.h"
12#include "content/common/child_process_messages.h"
13#include "content/common/gpu/client/gpu_channel_host.h"
14#include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15#include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
16#include "content/common/gpu/gpu_messages.h"
17#include "content/common/view_messages.h"
18#include "gpu/command_buffer/common/cmd_buffer_common.h"
19#include "gpu/command_buffer/common/command_buffer_shared.h"
20#include "gpu/command_buffer/common/gpu_memory_allocation.h"
21#include "ui/gfx/size.h"
22
23namespace content {
24
25CommandBufferProxyImpl::CommandBufferProxyImpl(
26    GpuChannelHost* channel,
27    int route_id)
28    : channel_(channel),
29      route_id_(route_id),
30      flush_count_(0),
31      last_put_offset_(-1),
32      next_signal_id_(0) {
33}
34
35CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36  FOR_EACH_OBSERVER(DeletionObserver,
37                    deletion_observers_,
38                    OnWillDeleteImpl());
39}
40
41bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
42  bool handled = true;
43  IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
44    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
45    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
46    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
47    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
48                        OnSetMemoryAllocation);
49    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
50                        OnSignalSyncPointAck);
51    IPC_MESSAGE_UNHANDLED(handled = false)
52  IPC_END_MESSAGE_MAP()
53
54  DCHECK(handled);
55  return handled;
56}
57
58void CommandBufferProxyImpl::OnChannelError() {
59  OnDestroyed(gpu::error::kUnknown);
60}
61
62void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
63  // Prevent any further messages from being sent.
64  channel_ = NULL;
65
66  // When the client sees that the context is lost, they should delete this
67  // CommandBufferProxyImpl and create a new one.
68  last_state_.error = gpu::error::kLostContext;
69  last_state_.context_lost_reason = reason;
70
71  if (!channel_error_callback_.is_null()) {
72    channel_error_callback_.Run();
73    // Avoid calling the error callback more than once.
74    channel_error_callback_.Reset();
75  }
76}
77
78void CommandBufferProxyImpl::OnEchoAck() {
79  DCHECK(!echo_tasks_.empty());
80  base::Closure callback = echo_tasks_.front();
81  echo_tasks_.pop();
82  callback.Run();
83}
84
85void CommandBufferProxyImpl::OnConsoleMessage(
86    const GPUCommandBufferConsoleMessage& message) {
87  if (!console_message_callback_.is_null()) {
88    console_message_callback_.Run(message.message, message.id);
89  }
90}
91
92void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
93    const MemoryAllocationChangedCallback& callback) {
94  if (last_state_.error != gpu::error::kNoError)
95    return;
96
97  memory_allocation_changed_callback_ = callback;
98  Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
99      route_id_, !memory_allocation_changed_callback_.is_null()));
100}
101
102void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
103  deletion_observers_.AddObserver(observer);
104}
105
106void CommandBufferProxyImpl::RemoveDeletionObserver(
107    DeletionObserver* observer) {
108  deletion_observers_.RemoveObserver(observer);
109}
110
111void CommandBufferProxyImpl::OnSetMemoryAllocation(
112    const gpu::MemoryAllocation& allocation) {
113  if (!memory_allocation_changed_callback_.is_null())
114    memory_allocation_changed_callback_.Run(allocation);
115}
116
117void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
118  SignalTaskMap::iterator it = signal_tasks_.find(id);
119  DCHECK(it != signal_tasks_.end());
120  base::Closure callback = it->second;
121  signal_tasks_.erase(it);
122  callback.Run();
123}
124
125void CommandBufferProxyImpl::SetChannelErrorCallback(
126    const base::Closure& callback) {
127  channel_error_callback_ = callback;
128}
129
130bool CommandBufferProxyImpl::Initialize() {
131  TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
132  shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
133      sizeof(*shared_state())).release());
134  if (!shared_state_shm_)
135    return false;
136
137  if (!shared_state_shm_->Map(sizeof(*shared_state())))
138    return false;
139
140  shared_state()->Initialize();
141
142  // This handle is owned by the GPU process and must be passed to it or it
143  // will leak. In otherwords, do not early out on error between here and the
144  // sending of the Initialize IPC below.
145  base::SharedMemoryHandle handle =
146      channel_->ShareToGpuProcess(shared_state_shm_->handle());
147  if (!base::SharedMemory::IsHandleValid(handle))
148    return false;
149
150  bool result = false;
151  if (!Send(new GpuCommandBufferMsg_Initialize(
152      route_id_, handle, &result, &capabilities_))) {
153    LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
154    return false;
155  }
156
157  if (!result) {
158    LOG(ERROR) << "Failed to initialize command buffer service.";
159    return false;
160  }
161
162  capabilities_.map_image = true;
163
164  return true;
165}
166
167gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
168  return last_state_;
169}
170
171int32 CommandBufferProxyImpl::GetLastToken() {
172  TryUpdateState();
173  return last_state_.token;
174}
175
176void CommandBufferProxyImpl::Flush(int32 put_offset) {
177  if (last_state_.error != gpu::error::kNoError)
178    return;
179
180  TRACE_EVENT1("gpu",
181               "CommandBufferProxyImpl::Flush",
182               "put_offset",
183               put_offset);
184
185  if (last_put_offset_ == put_offset)
186    return;
187
188  last_put_offset_ = put_offset;
189
190  Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
191                                          put_offset,
192                                          ++flush_count_,
193                                          latency_info_));
194  latency_info_.clear();
195}
196
197void CommandBufferProxyImpl::SetLatencyInfo(
198    const std::vector<ui::LatencyInfo>& latency_info) {
199  for (size_t i = 0; i < latency_info.size(); i++)
200    latency_info_.push_back(latency_info[i]);
201}
202
203void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) {
204  TRACE_EVENT2("gpu",
205               "CommandBufferProxyImpl::WaitForToken",
206               "start",
207               start,
208               "end",
209               end);
210  TryUpdateState();
211  if (!InRange(start, end, last_state_.token) &&
212      last_state_.error == gpu::error::kNoError) {
213    gpu::CommandBuffer::State state;
214    if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
215            route_id_, start, end, &state)))
216      OnUpdateState(state);
217  }
218  DCHECK(InRange(start, end, last_state_.token) ||
219         last_state_.error != gpu::error::kNoError);
220}
221
222void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start, int32 end) {
223  TRACE_EVENT2("gpu",
224               "CommandBufferProxyImpl::WaitForGetOffset",
225               "start",
226               start,
227               "end",
228               end);
229  TryUpdateState();
230  if (!InRange(start, end, last_state_.get_offset) &&
231      last_state_.error == gpu::error::kNoError) {
232    gpu::CommandBuffer::State state;
233    if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
234            route_id_, start, end, &state)))
235      OnUpdateState(state);
236  }
237  DCHECK(InRange(start, end, last_state_.get_offset) ||
238         last_state_.error != gpu::error::kNoError);
239}
240
241void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
242  if (last_state_.error != gpu::error::kNoError)
243    return;
244
245  Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
246  last_put_offset_ = -1;
247}
248
249scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
250    size_t size,
251    int32* id) {
252  *id = -1;
253
254  if (last_state_.error != gpu::error::kNoError)
255    return NULL;
256
257  int32 new_id = channel_->ReserveTransferBufferId();
258
259  scoped_ptr<base::SharedMemory> shared_memory(
260      channel_->factory()->AllocateSharedMemory(size));
261  if (!shared_memory)
262    return NULL;
263
264  DCHECK(!shared_memory->memory());
265  if (!shared_memory->Map(size))
266    return NULL;
267
268  // This handle is owned by the GPU process and must be passed to it or it
269  // will leak. In otherwords, do not early out on error between here and the
270  // sending of the RegisterTransferBuffer IPC below.
271  base::SharedMemoryHandle handle =
272      channel_->ShareToGpuProcess(shared_memory->handle());
273  if (!base::SharedMemory::IsHandleValid(handle))
274    return NULL;
275
276  if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
277                                                           new_id,
278                                                           handle,
279                                                           size))) {
280    return NULL;
281  }
282
283  *id = new_id;
284  scoped_refptr<gpu::Buffer> buffer(
285      gpu::MakeBufferFromSharedMemory(shared_memory.Pass(), size));
286  return buffer;
287}
288
289void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
290  if (last_state_.error != gpu::error::kNoError)
291    return;
292
293  Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
294}
295
296gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
297  return capabilities_;
298}
299
300gfx::GpuMemoryBuffer* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
301    size_t width,
302    size_t height,
303    unsigned internalformat,
304    unsigned usage,
305    int32* id) {
306  *id = -1;
307
308  if (last_state_.error != gpu::error::kNoError)
309    return NULL;
310
311  scoped_ptr<gfx::GpuMemoryBuffer> buffer(
312      channel_->factory()->AllocateGpuMemoryBuffer(
313          width, height, internalformat, usage));
314  if (!buffer)
315    return NULL;
316
317  DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(buffer->GetHandle()));
318
319  int32 new_id = channel_->ReserveGpuMemoryBufferId();
320
321  // This handle is owned by the GPU process and must be passed to it or it
322  // will leak. In otherwords, do not early out on error between here and the
323  // sending of the RegisterGpuMemoryBuffer IPC below.
324  gfx::GpuMemoryBufferHandle handle =
325      channel_->ShareGpuMemoryBufferToGpuProcess(buffer->GetHandle());
326
327  if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
328                route_id_,
329                new_id,
330                handle,
331                width,
332                height,
333                internalformat))) {
334    return NULL;
335  }
336
337  *id = new_id;
338  DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
339  return gpu_memory_buffers_.add(new_id, buffer.Pass()).first->second;
340}
341
342void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id) {
343  if (last_state_.error != gpu::error::kNoError)
344    return;
345
346  Send(new GpuCommandBufferMsg_UnregisterGpuMemoryBuffer(route_id_, id));
347
348  // Remove the gpu memory buffer from the client side cache.
349  DCHECK(gpu_memory_buffers_.find(id) != gpu_memory_buffers_.end());
350  gpu_memory_buffers_.take(id);
351}
352
353int CommandBufferProxyImpl::GetRouteID() const {
354  return route_id_;
355}
356
357void CommandBufferProxyImpl::Echo(const base::Closure& callback) {
358  if (last_state_.error != gpu::error::kNoError) {
359    return;
360  }
361
362  if (!Send(new GpuCommandBufferMsg_Echo(
363           route_id_, GpuCommandBufferMsg_EchoAck(route_id_)))) {
364    return;
365  }
366
367  echo_tasks_.push(callback);
368}
369
370uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) {
371  if (last_state_.error != gpu::error::kNoError)
372    return 0;
373
374  int32 stream_id = channel_->GenerateRouteID();
375  bool succeeded = false;
376  Send(new GpuCommandBufferMsg_CreateStreamTexture(
377      route_id_, texture_id, stream_id, &succeeded));
378  if (!succeeded) {
379    DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
380    return 0;
381  }
382  return stream_id;
383}
384
385uint32 CommandBufferProxyImpl::InsertSyncPoint() {
386  if (last_state_.error != gpu::error::kNoError)
387    return 0;
388
389  uint32 sync_point = 0;
390  Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, true, &sync_point));
391  return sync_point;
392}
393
394uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
395  if (last_state_.error != gpu::error::kNoError)
396    return 0;
397
398  uint32 sync_point = 0;
399  Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, false, &sync_point));
400  return sync_point;
401}
402
403void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point) {
404  if (last_state_.error != gpu::error::kNoError)
405    return;
406
407  Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_, sync_point));
408}
409
410void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
411                                             const base::Closure& callback) {
412  if (last_state_.error != gpu::error::kNoError)
413    return;
414
415  uint32 signal_id = next_signal_id_++;
416  if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
417                                                    sync_point,
418                                                    signal_id))) {
419    return;
420  }
421
422  signal_tasks_.insert(std::make_pair(signal_id, callback));
423}
424
425void CommandBufferProxyImpl::SignalQuery(uint32 query,
426                                         const base::Closure& callback) {
427  if (last_state_.error != gpu::error::kNoError)
428    return;
429
430  // Signal identifiers are hidden, so nobody outside of this class will see
431  // them. (And thus, they cannot save them.) The IDs themselves only last
432  // until the callback is invoked, which will happen as soon as the GPU
433  // catches upwith the command buffer.
434  // A malicious caller trying to create a collision by making next_signal_id
435  // would have to make calls at an astounding rate (300B/s) and even if they
436  // could do that, all they would do is to prevent some callbacks from getting
437  // called, leading to stalled threads and/or memory leaks.
438  uint32 signal_id = next_signal_id_++;
439  if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
440                                                query,
441                                                signal_id))) {
442    return;
443  }
444
445  signal_tasks_.insert(std::make_pair(signal_id, callback));
446}
447
448void CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
449  if (last_state_.error != gpu::error::kNoError)
450    return;
451
452  Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
453}
454
455bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
456  if (last_state_.error != gpu::error::kNoError)
457    return false;
458
459  return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
460}
461
462scoped_ptr<media::VideoDecodeAccelerator>
463CommandBufferProxyImpl::CreateVideoDecoder() {
464  if (!channel_)
465    return scoped_ptr<media::VideoDecodeAccelerator>();
466  return scoped_ptr<media::VideoDecodeAccelerator>(
467      new GpuVideoDecodeAcceleratorHost(channel_, this));
468}
469
470scoped_ptr<media::VideoEncodeAccelerator>
471CommandBufferProxyImpl::CreateVideoEncoder() {
472  if (!channel_)
473    return scoped_ptr<media::VideoEncodeAccelerator>();
474  return scoped_ptr<media::VideoEncodeAccelerator>(
475      new GpuVideoEncodeAcceleratorHost(channel_, this));
476}
477
478gpu::error::Error CommandBufferProxyImpl::GetLastError() {
479  return last_state_.error;
480}
481
482bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
483  // Caller should not intentionally send a message if the context is lost.
484  DCHECK(last_state_.error == gpu::error::kNoError);
485
486  if (channel_) {
487    if (channel_->Send(msg)) {
488      return true;
489    } else {
490      // Flag the command buffer as lost. Defer deleting the channel until
491      // OnChannelError is called after returning to the message loop in case
492      // it is referenced elsewhere.
493      DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
494      last_state_.error = gpu::error::kLostContext;
495      return false;
496    }
497  }
498
499  // Callee takes ownership of message, regardless of whether Send is
500  // successful. See IPC::Sender.
501  delete msg;
502  return false;
503}
504
505void CommandBufferProxyImpl::OnUpdateState(
506    const gpu::CommandBuffer::State& state) {
507  // Handle wraparound. It works as long as we don't have more than 2B state
508  // updates in flight across which reordering occurs.
509  if (state.generation - last_state_.generation < 0x80000000U)
510    last_state_ = state;
511}
512
513void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
514    const GpuConsoleMessageCallback& callback) {
515  console_message_callback_ = callback;
516}
517
518void CommandBufferProxyImpl::TryUpdateState() {
519  if (last_state_.error == gpu::error::kNoError)
520    shared_state()->Read(&last_state_);
521}
522
523gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
524  return reinterpret_cast<gpu::CommandBufferSharedState*>(
525      shared_state_shm_->memory());
526}
527
528}  // namespace content
529