1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "content/common/gpu/client/command_buffer_proxy_impl.h"
6
7#include "base/callback.h"
8#include "base/debug/trace_event.h"
9#include "base/logging.h"
10#include "base/memory/shared_memory.h"
11#include "base/stl_util.h"
12#include "content/common/child_process_messages.h"
13#include "content/common/gpu/client/gpu_channel_host.h"
14#include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15#include "content/common/gpu/gpu_messages.h"
16#include "content/common/view_messages.h"
17#include "gpu/command_buffer/common/cmd_buffer_common.h"
18#include "gpu/command_buffer/common/command_buffer_shared.h"
19#include "gpu/command_buffer/common/gpu_memory_allocation.h"
20#include "ui/gfx/size.h"
21
22namespace content {
23
24CommandBufferProxyImpl::CommandBufferProxyImpl(
25    GpuChannelHost* channel,
26    int route_id)
27    : channel_(channel),
28      route_id_(route_id),
29      flush_count_(0),
30      last_put_offset_(-1),
31      next_signal_id_(0) {
32}
33
34CommandBufferProxyImpl::~CommandBufferProxyImpl() {
35  FOR_EACH_OBSERVER(DeletionObserver,
36                    deletion_observers_,
37                    OnWillDeleteImpl());
38
39  // Delete all the locally cached shared memory objects, closing the handle
40  // in this process.
41  for (TransferBufferMap::iterator it = transfer_buffers_.begin();
42       it != transfer_buffers_.end();
43       ++it) {
44    delete it->second.shared_memory;
45    it->second.shared_memory = NULL;
46  }
47}
48
49bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
50  bool handled = true;
51  IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
52    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
53    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
54    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
55    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
56                        OnSetMemoryAllocation);
57    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
58                        OnSignalSyncPointAck);
59    IPC_MESSAGE_UNHANDLED(handled = false)
60  IPC_END_MESSAGE_MAP()
61
62  DCHECK(handled);
63  return handled;
64}
65
66void CommandBufferProxyImpl::OnChannelError() {
67  OnDestroyed(gpu::error::kUnknown);
68}
69
70void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
71  // Prevent any further messages from being sent.
72  channel_ = NULL;
73
74  // When the client sees that the context is lost, they should delete this
75  // CommandBufferProxyImpl and create a new one.
76  last_state_.error = gpu::error::kLostContext;
77  last_state_.context_lost_reason = reason;
78
79  if (!channel_error_callback_.is_null()) {
80    channel_error_callback_.Run();
81    // Avoid calling the error callback more than once.
82    channel_error_callback_.Reset();
83  }
84}
85
86void CommandBufferProxyImpl::OnEchoAck() {
87  DCHECK(!echo_tasks_.empty());
88  base::Closure callback = echo_tasks_.front();
89  echo_tasks_.pop();
90  callback.Run();
91}
92
93void CommandBufferProxyImpl::OnConsoleMessage(
94    const GPUCommandBufferConsoleMessage& message) {
95  if (!console_message_callback_.is_null()) {
96    console_message_callback_.Run(message.message, message.id);
97  }
98}
99
100void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
101    const MemoryAllocationChangedCallback& callback) {
102  if (last_state_.error != gpu::error::kNoError)
103    return;
104
105  memory_allocation_changed_callback_ = callback;
106  Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
107      route_id_, !memory_allocation_changed_callback_.is_null()));
108}
109
110void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
111  deletion_observers_.AddObserver(observer);
112}
113
114void CommandBufferProxyImpl::RemoveDeletionObserver(
115    DeletionObserver* observer) {
116  deletion_observers_.RemoveObserver(observer);
117}
118
119void CommandBufferProxyImpl::OnSetMemoryAllocation(
120    const gpu::MemoryAllocation& allocation) {
121  if (!memory_allocation_changed_callback_.is_null())
122    memory_allocation_changed_callback_.Run(allocation);
123}
124
125void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
126  SignalTaskMap::iterator it = signal_tasks_.find(id);
127  DCHECK(it != signal_tasks_.end());
128  base::Closure callback = it->second;
129  signal_tasks_.erase(it);
130  callback.Run();
131}
132
133void CommandBufferProxyImpl::SetChannelErrorCallback(
134    const base::Closure& callback) {
135  channel_error_callback_ = callback;
136}
137
138bool CommandBufferProxyImpl::Initialize() {
139  shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
140      sizeof(*shared_state())).release());
141  if (!shared_state_shm_)
142    return false;
143
144  if (!shared_state_shm_->Map(sizeof(*shared_state())))
145    return false;
146
147  shared_state()->Initialize();
148
149  // This handle is owned by the GPU process and must be passed to it or it
150  // will leak. In otherwords, do not early out on error between here and the
151  // sending of the Initialize IPC below.
152  base::SharedMemoryHandle handle =
153      channel_->ShareToGpuProcess(shared_state_shm_->handle());
154  if (!base::SharedMemory::IsHandleValid(handle))
155    return false;
156
157  bool result;
158  if (!Send(new GpuCommandBufferMsg_Initialize(
159      route_id_, handle, &result, &capabilities_))) {
160    LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
161    return false;
162  }
163
164  if (!result) {
165    LOG(ERROR) << "Failed to initialize command buffer service.";
166    return false;
167  }
168
169  capabilities_.map_image = true;
170
171  return true;
172}
173
174gpu::CommandBuffer::State CommandBufferProxyImpl::GetState() {
175  // Send will flag state with lost context if IPC fails.
176  if (last_state_.error == gpu::error::kNoError) {
177    gpu::CommandBuffer::State state;
178    if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state)))
179      OnUpdateState(state);
180  }
181
182  TryUpdateState();
183  return last_state_;
184}
185
186gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
187  return last_state_;
188}
189
190int32 CommandBufferProxyImpl::GetLastToken() {
191  TryUpdateState();
192  return last_state_.token;
193}
194
195void CommandBufferProxyImpl::Flush(int32 put_offset) {
196  if (last_state_.error != gpu::error::kNoError)
197    return;
198
199  TRACE_EVENT1("gpu",
200               "CommandBufferProxyImpl::Flush",
201               "put_offset",
202               put_offset);
203
204  if (last_put_offset_ == put_offset)
205    return;
206
207  last_put_offset_ = put_offset;
208
209  Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
210                                          put_offset,
211                                          ++flush_count_));
212}
213
214void CommandBufferProxyImpl::SetLatencyInfo(
215    const ui::LatencyInfo& latency_info) {
216  if (last_state_.error != gpu::error::kNoError)
217    return;
218  Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
219}
220
221gpu::CommandBuffer::State CommandBufferProxyImpl::FlushSync(
222    int32 put_offset,
223    int32 last_known_get) {
224  TRACE_EVENT1("gpu", "CommandBufferProxyImpl::FlushSync", "put_offset",
225               put_offset);
226  Flush(put_offset);
227  TryUpdateState();
228  if (last_known_get == last_state_.get_offset) {
229    // Send will flag state with lost context if IPC fails.
230    if (last_state_.error == gpu::error::kNoError) {
231      gpu::CommandBuffer::State state;
232      if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_,
233                                                    &state)))
234        OnUpdateState(state);
235    }
236    TryUpdateState();
237  }
238
239  return last_state_;
240}
241
242void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
243  if (last_state_.error != gpu::error::kNoError)
244    return;
245
246  Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
247  last_put_offset_ = -1;
248}
249
250void CommandBufferProxyImpl::SetGetOffset(int32 get_offset) {
251  // Not implemented in proxy.
252  NOTREACHED();
253}
254
255gpu::Buffer CommandBufferProxyImpl::CreateTransferBuffer(size_t size,
256                                                         int32* id) {
257  *id = -1;
258
259  if (last_state_.error != gpu::error::kNoError)
260    return gpu::Buffer();
261
262  int32 new_id = channel_->ReserveTransferBufferId();
263  DCHECK(transfer_buffers_.find(new_id) == transfer_buffers_.end());
264
265  scoped_ptr<base::SharedMemory> shared_memory(
266      channel_->factory()->AllocateSharedMemory(size));
267  if (!shared_memory)
268    return gpu::Buffer();
269
270  DCHECK(!shared_memory->memory());
271  if (!shared_memory->Map(size))
272    return gpu::Buffer();
273
274  // This handle is owned by the GPU process and must be passed to it or it
275  // will leak. In otherwords, do not early out on error between here and the
276  // sending of the RegisterTransferBuffer IPC below.
277  base::SharedMemoryHandle handle =
278      channel_->ShareToGpuProcess(shared_memory->handle());
279  if (!base::SharedMemory::IsHandleValid(handle))
280    return gpu::Buffer();
281
282  if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
283                                                           new_id,
284                                                           handle,
285                                                           size))) {
286    return gpu::Buffer();
287  }
288
289  *id = new_id;
290  gpu::Buffer buffer;
291  buffer.ptr = shared_memory->memory();
292  buffer.size = size;
293  buffer.shared_memory = shared_memory.release();
294  transfer_buffers_[new_id] = buffer;
295
296  return buffer;
297}
298
299void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
300  if (last_state_.error != gpu::error::kNoError)
301    return;
302
303  // Remove the transfer buffer from the client side cache.
304  TransferBufferMap::iterator it = transfer_buffers_.find(id);
305  if (it != transfer_buffers_.end()) {
306    delete it->second.shared_memory;
307    transfer_buffers_.erase(it);
308  }
309
310  Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
311}
312
313gpu::Buffer CommandBufferProxyImpl::GetTransferBuffer(int32 id) {
314  if (last_state_.error != gpu::error::kNoError)
315    return gpu::Buffer();
316
317  // Check local cache to see if there is already a client side shared memory
318  // object for this id.
319  TransferBufferMap::iterator it = transfer_buffers_.find(id);
320  if (it != transfer_buffers_.end()) {
321    return it->second;
322  }
323
324  // Assuming we are in the renderer process, the service is responsible for
325  // duplicating the handle. This might not be true for NaCl.
326  base::SharedMemoryHandle handle = base::SharedMemoryHandle();
327  uint32 size;
328  if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_,
329                                                      id,
330                                                      &handle,
331                                                      &size))) {
332    return gpu::Buffer();
333  }
334
335  // Cache the transfer buffer shared memory object client side.
336  scoped_ptr<base::SharedMemory> shared_memory(
337      new base::SharedMemory(handle, false));
338
339  // Map the shared memory on demand.
340  if (!shared_memory->memory()) {
341    if (!shared_memory->Map(size))
342      return gpu::Buffer();
343  }
344
345  gpu::Buffer buffer;
346  buffer.ptr = shared_memory->memory();
347  buffer.size = size;
348  buffer.shared_memory = shared_memory.release();
349  transfer_buffers_[id] = buffer;
350
351  return buffer;
352}
353
354void CommandBufferProxyImpl::SetToken(int32 token) {
355  // Not implemented in proxy.
356  NOTREACHED();
357}
358
359void CommandBufferProxyImpl::SetParseError(
360    gpu::error::Error error) {
361  // Not implemented in proxy.
362  NOTREACHED();
363}
364
365void CommandBufferProxyImpl::SetContextLostReason(
366    gpu::error::ContextLostReason reason) {
367  // Not implemented in proxy.
368  NOTREACHED();
369}
370
371gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
372  return capabilities_;
373}
374
375gfx::GpuMemoryBuffer* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
376    size_t width,
377    size_t height,
378    unsigned internalformat,
379    int32* id) {
380  *id = -1;
381
382  if (last_state_.error != gpu::error::kNoError)
383    return NULL;
384
385  int32 new_id = channel_->ReserveGpuMemoryBufferId();
386  DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
387
388  scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer(
389      channel_->factory()->AllocateGpuMemoryBuffer(width,
390                                                   height,
391                                                   internalformat));
392  if (!gpu_memory_buffer)
393    return NULL;
394
395  DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(
396             gpu_memory_buffer->GetHandle()));
397
398  // This handle is owned by the GPU process and must be passed to it or it
399  // will leak. In otherwords, do not early out on error between here and the
400  // sending of the RegisterGpuMemoryBuffer IPC below.
401  gfx::GpuMemoryBufferHandle handle =
402      channel_->ShareGpuMemoryBufferToGpuProcess(
403          gpu_memory_buffer->GetHandle());
404
405  if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
406                route_id_,
407                new_id,
408                handle,
409                width,
410                height,
411                internalformat))) {
412    return NULL;
413  }
414
415  *id = new_id;
416  gpu_memory_buffers_[new_id] = gpu_memory_buffer.release();
417  return gpu_memory_buffers_[new_id];
418}
419
420void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id) {
421  if (last_state_.error != gpu::error::kNoError)
422    return;
423
424  // Remove the gpu memory buffer from the client side cache.
425  GpuMemoryBufferMap::iterator it = gpu_memory_buffers_.find(id);
426  if (it != gpu_memory_buffers_.end()) {
427    delete it->second;
428    gpu_memory_buffers_.erase(it);
429  }
430
431  Send(new GpuCommandBufferMsg_DestroyGpuMemoryBuffer(route_id_, id));
432}
433
434int CommandBufferProxyImpl::GetRouteID() const {
435  return route_id_;
436}
437
438void CommandBufferProxyImpl::Echo(const base::Closure& callback) {
439  if (last_state_.error != gpu::error::kNoError) {
440    return;
441  }
442
443  if (!Send(new GpuCommandBufferMsg_Echo(
444           route_id_, GpuCommandBufferMsg_EchoAck(route_id_)))) {
445    return;
446  }
447
448  echo_tasks_.push(callback);
449}
450
451uint32 CommandBufferProxyImpl::InsertSyncPoint() {
452  if (last_state_.error != gpu::error::kNoError)
453    return 0;
454
455  uint32 sync_point = 0;
456  Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, &sync_point));
457  return sync_point;
458}
459
460void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
461                                             const base::Closure& callback) {
462  if (last_state_.error != gpu::error::kNoError)
463    return;
464
465  uint32 signal_id = next_signal_id_++;
466  if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
467                                                    sync_point,
468                                                    signal_id))) {
469    return;
470  }
471
472  signal_tasks_.insert(std::make_pair(signal_id, callback));
473}
474
475void CommandBufferProxyImpl::SignalQuery(uint32 query,
476                                         const base::Closure& callback) {
477  if (last_state_.error != gpu::error::kNoError)
478    return;
479
480  // Signal identifiers are hidden, so nobody outside of this class will see
481  // them. (And thus, they cannot save them.) The IDs themselves only last
482  // until the callback is invoked, which will happen as soon as the GPU
483  // catches upwith the command buffer.
484  // A malicious caller trying to create a collision by making next_signal_id
485  // would have to make calls at an astounding rate (300B/s) and even if they
486  // could do that, all they would do is to prevent some callbacks from getting
487  // called, leading to stalled threads and/or memory leaks.
488  uint32 signal_id = next_signal_id_++;
489  if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
490                                                query,
491                                                signal_id))) {
492    return;
493  }
494
495  signal_tasks_.insert(std::make_pair(signal_id, callback));
496}
497
498void CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
499  if (last_state_.error != gpu::error::kNoError)
500    return;
501
502  Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
503}
504
505void CommandBufferProxyImpl::SendManagedMemoryStats(
506    const gpu::ManagedMemoryStats& stats) {
507  if (last_state_.error != gpu::error::kNoError)
508    return;
509
510  Send(new GpuCommandBufferMsg_SendClientManagedMemoryStats(route_id_,
511                                                            stats));
512}
513
514bool CommandBufferProxyImpl::GenerateMailboxNames(
515    unsigned num,
516    std::vector<gpu::Mailbox>* names) {
517  if (last_state_.error != gpu::error::kNoError)
518    return false;
519
520  return channel_->GenerateMailboxNames(num, names);
521}
522
523bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
524  if (last_state_.error != gpu::error::kNoError)
525    return false;
526
527  return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
528}
529
530scoped_ptr<media::VideoDecodeAccelerator>
531CommandBufferProxyImpl::CreateVideoDecoder(
532    media::VideoCodecProfile profile,
533    media::VideoDecodeAccelerator::Client* client) {
534  int decoder_route_id;
535  scoped_ptr<media::VideoDecodeAccelerator> vda;
536  if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_, profile,
537                                                       &decoder_route_id))) {
538    LOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoDecoder) failed";
539    return vda.Pass();
540  }
541
542  if (decoder_route_id < 0) {
543    DLOG(ERROR) << "Failed to Initialize GPU decoder on profile: " << profile;
544    return vda.Pass();
545  }
546
547  GpuVideoDecodeAcceleratorHost* decoder_host =
548      new GpuVideoDecodeAcceleratorHost(channel_, decoder_route_id, client,
549                                        this);
550  vda.reset(decoder_host);
551  return vda.Pass();
552}
553
554gpu::error::Error CommandBufferProxyImpl::GetLastError() {
555  return last_state_.error;
556}
557
558bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
559  // Caller should not intentionally send a message if the context is lost.
560  DCHECK(last_state_.error == gpu::error::kNoError);
561
562  if (channel_) {
563    if (channel_->Send(msg)) {
564      return true;
565    } else {
566      // Flag the command buffer as lost. Defer deleting the channel until
567      // OnChannelError is called after returning to the message loop in case
568      // it is referenced elsewhere.
569      last_state_.error = gpu::error::kLostContext;
570      return false;
571    }
572  }
573
574  // Callee takes ownership of message, regardless of whether Send is
575  // successful. See IPC::Sender.
576  delete msg;
577  return false;
578}
579
580void CommandBufferProxyImpl::OnUpdateState(
581    const gpu::CommandBuffer::State& state) {
582  // Handle wraparound. It works as long as we don't have more than 2B state
583  // updates in flight across which reordering occurs.
584  if (state.generation - last_state_.generation < 0x80000000U)
585    last_state_ = state;
586}
587
588void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
589    const GpuConsoleMessageCallback& callback) {
590  console_message_callback_ = callback;
591}
592
593void CommandBufferProxyImpl::TryUpdateState() {
594  if (last_state_.error == gpu::error::kNoError)
595    shared_state()->Read(&last_state_);
596}
597
598}  // namespace content
599