command_buffer_proxy_impl.cc revision 2a99a7e74a7f215066514fe81d2bfa6639d9eddd
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "content/common/gpu/client/command_buffer_proxy_impl.h"
6
7#include "base/callback.h"
8#include "base/debug/trace_event.h"
9#include "base/logging.h"
10#include "base/process_util.h"
11#include "base/shared_memory.h"
12#include "base/stl_util.h"
13#include "content/common/child_process_messages.h"
14#include "content/common/gpu/gpu_memory_allocation.h"
15#include "content/common/gpu/client/gpu_channel_host.h"
16#include "content/common/gpu/gpu_messages.h"
17#include "content/common/plugin_messages.h"
18#include "content/common/view_messages.h"
19#include "gpu/command_buffer/common/cmd_buffer_common.h"
20#include "gpu/command_buffer/common/command_buffer_shared.h"
21#include "ui/gfx/size.h"
22
23namespace content {
24
25CommandBufferProxyImpl::CommandBufferProxyImpl(
26    GpuChannelHost* channel,
27    int route_id)
28    : channel_(channel),
29      route_id_(route_id),
30      flush_count_(0),
31      last_put_offset_(-1),
32      next_signal_id_(0) {
33}
34
35CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36  // Delete all the locally cached shared memory objects, closing the handle
37  // in this process.
38  for (TransferBufferMap::iterator it = transfer_buffers_.begin();
39       it != transfer_buffers_.end();
40       ++it) {
41    delete it->second.shared_memory;
42    it->second.shared_memory = NULL;
43  }
44  for (Decoders::iterator it = video_decoder_hosts_.begin();
45      it != video_decoder_hosts_.end(); ++it) {
46    if (it->second)
47      it->second->OnChannelError();
48  }
49}
50
51bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
52  bool handled = true;
53  IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
54    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
55    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
56    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
57    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
58                        OnSetMemoryAllocation);
59    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
60                        OnSignalSyncPointAck);
61    IPC_MESSAGE_UNHANDLED(handled = false)
62  IPC_END_MESSAGE_MAP()
63
64  DCHECK(handled);
65  return handled;
66}
67
68void CommandBufferProxyImpl::OnChannelError() {
69  OnDestroyed(gpu::error::kUnknown);
70}
71
72void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
73  // Prevent any further messages from being sent.
74  channel_ = NULL;
75
76  // When the client sees that the context is lost, they should delete this
77  // CommandBufferProxyImpl and create a new one.
78  last_state_.error = gpu::error::kLostContext;
79  last_state_.context_lost_reason = reason;
80
81  if (!channel_error_callback_.is_null()) {
82    channel_error_callback_.Run();
83    // Avoid calling the error callback more than once.
84    channel_error_callback_.Reset();
85  }
86}
87
88void CommandBufferProxyImpl::OnEchoAck() {
89  DCHECK(!echo_tasks_.empty());
90  base::Closure callback = echo_tasks_.front();
91  echo_tasks_.pop();
92  callback.Run();
93}
94
95void CommandBufferProxyImpl::OnConsoleMessage(
96    const GPUCommandBufferConsoleMessage& message) {
97  if (!console_message_callback_.is_null()) {
98    console_message_callback_.Run(message.message, message.id);
99  }
100}
101
102void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
103    const base::Callback<void(const GpuMemoryAllocationForRenderer&)>&
104        callback) {
105  if (last_state_.error != gpu::error::kNoError)
106    return;
107
108  memory_allocation_changed_callback_ = callback;
109  Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
110      route_id_, !memory_allocation_changed_callback_.is_null()));
111}
112
113void CommandBufferProxyImpl::OnSetMemoryAllocation(
114    const GpuMemoryAllocationForRenderer& allocation) {
115  if (!memory_allocation_changed_callback_.is_null())
116    memory_allocation_changed_callback_.Run(allocation);
117}
118
119void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
120  SignalTaskMap::iterator it = signal_tasks_.find(id);
121  DCHECK(it != signal_tasks_.end());
122  base::Closure callback = it->second;
123  signal_tasks_.erase(it);
124  callback.Run();
125}
126
127void CommandBufferProxyImpl::SetChannelErrorCallback(
128    const base::Closure& callback) {
129  channel_error_callback_ = callback;
130}
131
132bool CommandBufferProxyImpl::Initialize() {
133  shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
134      sizeof(*shared_state())).release());
135  if (!shared_state_shm_.get())
136    return false;
137
138  if (!shared_state_shm_->Map(sizeof(*shared_state())))
139    return false;
140
141  shared_state()->Initialize();
142
143  // This handle is owned by the GPU process and must be passed to it or it
144  // will leak. In otherwords, do not early out on error between here and the
145  // sending of the Initialize IPC below.
146  base::SharedMemoryHandle handle =
147      channel_->ShareToGpuProcess(shared_state_shm_.get());
148  if (!base::SharedMemory::IsHandleValid(handle))
149    return false;
150
151  bool result;
152  if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, handle, &result))) {
153    LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
154    return false;
155  }
156
157  if (!result) {
158    LOG(ERROR) << "Failed to initialize command buffer service.";
159    return false;
160  }
161
162  return true;
163}
164
165gpu::CommandBuffer::State CommandBufferProxyImpl::GetState() {
166  // Send will flag state with lost context if IPC fails.
167  if (last_state_.error == gpu::error::kNoError) {
168    gpu::CommandBuffer::State state;
169    if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state)))
170      OnUpdateState(state);
171  }
172
173  TryUpdateState();
174  return last_state_;
175}
176
177gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
178  return last_state_;
179}
180
181int32 CommandBufferProxyImpl::GetLastToken() {
182  TryUpdateState();
183  return last_state_.token;
184}
185
186void CommandBufferProxyImpl::Flush(int32 put_offset) {
187  if (last_state_.error != gpu::error::kNoError)
188    return;
189
190  TRACE_EVENT1("gpu",
191               "CommandBufferProxyImpl::Flush",
192               "put_offset",
193               put_offset);
194
195  if (last_put_offset_ == put_offset)
196    return;
197
198  last_put_offset_ = put_offset;
199
200  Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
201                                          put_offset,
202                                          ++flush_count_));
203}
204
205gpu::CommandBuffer::State CommandBufferProxyImpl::FlushSync(
206    int32 put_offset,
207    int32 last_known_get) {
208  TRACE_EVENT1("gpu", "CommandBufferProxyImpl::FlushSync", "put_offset",
209               put_offset);
210  Flush(put_offset);
211  TryUpdateState();
212  if (last_known_get == last_state_.get_offset) {
213    // Send will flag state with lost context if IPC fails.
214    if (last_state_.error == gpu::error::kNoError) {
215      gpu::CommandBuffer::State state;
216      if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_,
217                                                    &state)))
218        OnUpdateState(state);
219    }
220    TryUpdateState();
221  }
222
223  return last_state_;
224}
225
226void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
227  if (last_state_.error != gpu::error::kNoError)
228    return;
229
230  Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
231  last_put_offset_ = -1;
232}
233
234void CommandBufferProxyImpl::SetGetOffset(int32 get_offset) {
235  // Not implemented in proxy.
236  NOTREACHED();
237}
238
239gpu::Buffer CommandBufferProxyImpl::CreateTransferBuffer(size_t size,
240                                                         int32* id) {
241  *id = -1;
242
243  if (last_state_.error != gpu::error::kNoError)
244    return gpu::Buffer();
245
246  int32 new_id = channel_->ReserveTransferBufferId();
247  DCHECK(transfer_buffers_.find(new_id) == transfer_buffers_.end());
248
249  scoped_ptr<base::SharedMemory> shared_memory(
250      channel_->factory()->AllocateSharedMemory(size));
251  if (!shared_memory.get())
252    return gpu::Buffer();
253
254  DCHECK(!shared_memory->memory());
255  if (!shared_memory->Map(size))
256    return gpu::Buffer();
257
258  // This handle is owned by the GPU process and must be passed to it or it
259  // will leak. In otherwords, do not early out on error between here and the
260  // sending of the RegisterTransferBuffer IPC below.
261  base::SharedMemoryHandle handle =
262      channel_->ShareToGpuProcess(shared_memory.get());
263  if (!base::SharedMemory::IsHandleValid(handle))
264    return gpu::Buffer();
265
266  if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
267                                                           new_id,
268                                                           handle,
269                                                           size))) {
270    return gpu::Buffer();
271  }
272
273  *id = new_id;
274  gpu::Buffer buffer;
275  buffer.ptr = shared_memory->memory();
276  buffer.size = size;
277  buffer.shared_memory = shared_memory.release();
278  transfer_buffers_[new_id] = buffer;
279
280  return buffer;
281}
282
283void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
284  if (last_state_.error != gpu::error::kNoError)
285    return;
286
287  // Remove the transfer buffer from the client side cache.
288  TransferBufferMap::iterator it = transfer_buffers_.find(id);
289  if (it != transfer_buffers_.end()) {
290    delete it->second.shared_memory;
291    transfer_buffers_.erase(it);
292  }
293
294  Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
295}
296
297gpu::Buffer CommandBufferProxyImpl::GetTransferBuffer(int32 id) {
298  if (last_state_.error != gpu::error::kNoError)
299    return gpu::Buffer();
300
301  // Check local cache to see if there is already a client side shared memory
302  // object for this id.
303  TransferBufferMap::iterator it = transfer_buffers_.find(id);
304  if (it != transfer_buffers_.end()) {
305    return it->second;
306  }
307
308  // Assuming we are in the renderer process, the service is responsible for
309  // duplicating the handle. This might not be true for NaCl.
310  base::SharedMemoryHandle handle = base::SharedMemoryHandle();
311  uint32 size;
312  if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_,
313                                                      id,
314                                                      &handle,
315                                                      &size))) {
316    return gpu::Buffer();
317  }
318
319  // Cache the transfer buffer shared memory object client side.
320  scoped_ptr<base::SharedMemory> shared_memory(
321      new base::SharedMemory(handle, false));
322
323  // Map the shared memory on demand.
324  if (!shared_memory->memory()) {
325    if (!shared_memory->Map(size))
326      return gpu::Buffer();
327  }
328
329  gpu::Buffer buffer;
330  buffer.ptr = shared_memory->memory();
331  buffer.size = size;
332  buffer.shared_memory = shared_memory.release();
333  transfer_buffers_[id] = buffer;
334
335  return buffer;
336}
337
338void CommandBufferProxyImpl::SetToken(int32 token) {
339  // Not implemented in proxy.
340  NOTREACHED();
341}
342
343void CommandBufferProxyImpl::SetParseError(
344    gpu::error::Error error) {
345  // Not implemented in proxy.
346  NOTREACHED();
347}
348
349void CommandBufferProxyImpl::SetContextLostReason(
350    gpu::error::ContextLostReason reason) {
351  // Not implemented in proxy.
352  NOTREACHED();
353}
354
355int CommandBufferProxyImpl::GetRouteID() const {
356  return route_id_;
357}
358
359bool CommandBufferProxyImpl::Echo(const base::Closure& callback) {
360  if (last_state_.error != gpu::error::kNoError) {
361    return false;
362  }
363
364  if (!Send(new GpuCommandBufferMsg_Echo(route_id_,
365                    GpuCommandBufferMsg_EchoAck(route_id_)))) {
366    return false;
367  }
368
369  echo_tasks_.push(callback);
370
371  return true;
372}
373
374bool CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
375  if (last_state_.error != gpu::error::kNoError)
376    return false;
377
378  return Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
379}
380
381bool CommandBufferProxyImpl::DiscardBackbuffer() {
382  if (last_state_.error != gpu::error::kNoError)
383    return false;
384
385  return Send(new GpuCommandBufferMsg_DiscardBackbuffer(route_id_));
386}
387
388bool CommandBufferProxyImpl::EnsureBackbuffer() {
389  if (last_state_.error != gpu::error::kNoError)
390    return false;
391
392  return Send(new GpuCommandBufferMsg_EnsureBackbuffer(route_id_));
393}
394
395uint32 CommandBufferProxyImpl::InsertSyncPoint() {
396  uint32 sync_point = 0;
397  Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, &sync_point));
398  return sync_point;
399}
400
401bool CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
402                                             const base::Closure& callback) {
403  if (last_state_.error != gpu::error::kNoError) {
404    return false;
405  }
406
407  uint32 signal_id = next_signal_id_++;
408  if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
409                                                    sync_point,
410                                                    signal_id))) {
411    return false;
412  }
413
414  signal_tasks_.insert(std::make_pair(signal_id, callback));
415
416  return true;
417}
418
419
420bool CommandBufferProxyImpl::GenerateMailboxNames(
421    unsigned num,
422    std::vector<gpu::Mailbox>* names) {
423  return channel_->GenerateMailboxNames(num, names);
424}
425
426bool CommandBufferProxyImpl::SetParent(
427    CommandBufferProxy* parent_command_buffer,
428    uint32 parent_texture_id) {
429  if (last_state_.error != gpu::error::kNoError)
430    return false;
431
432  bool result;
433  if (parent_command_buffer) {
434    if (!Send(new GpuCommandBufferMsg_SetParent(
435        route_id_,
436        parent_command_buffer->GetRouteID(),
437        parent_texture_id,
438        &result))) {
439      return false;
440    }
441  } else {
442    if (!Send(new GpuCommandBufferMsg_SetParent(
443        route_id_,
444        MSG_ROUTING_NONE,
445        0,
446        &result))) {
447      return false;
448    }
449  }
450
451  return result;
452}
453
454GpuVideoDecodeAcceleratorHost*
455CommandBufferProxyImpl::CreateVideoDecoder(
456    media::VideoCodecProfile profile,
457    media::VideoDecodeAccelerator::Client* client) {
458  int decoder_route_id;
459  if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_, profile,
460                                                       &decoder_route_id))) {
461    LOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoDecoder) failed";
462    return NULL;
463  }
464
465  if (decoder_route_id < 0) {
466    DLOG(ERROR) << "Failed to Initialize GPU decoder on profile: " << profile;
467    return NULL;
468  }
469
470  GpuVideoDecodeAcceleratorHost* decoder_host =
471      new GpuVideoDecodeAcceleratorHost(channel_, decoder_route_id, client);
472  bool inserted = video_decoder_hosts_.insert(std::make_pair(
473      decoder_route_id, base::AsWeakPtr(decoder_host))).second;
474  DCHECK(inserted);
475
476  channel_->AddRoute(decoder_route_id, base::AsWeakPtr(decoder_host));
477
478  return decoder_host;
479}
480
481gpu::error::Error CommandBufferProxyImpl::GetLastError() {
482  return last_state_.error;
483}
484
485bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
486  // Caller should not intentionally send a message if the context is lost.
487  DCHECK(last_state_.error == gpu::error::kNoError);
488
489  if (channel_) {
490    if (channel_->Send(msg)) {
491      return true;
492    } else {
493      // Flag the command buffer as lost. Defer deleting the channel until
494      // OnChannelError is called after returning to the message loop in case
495      // it is referenced elsewhere.
496      last_state_.error = gpu::error::kLostContext;
497      return false;
498    }
499  }
500
501  // Callee takes ownership of message, regardless of whether Send is
502  // successful. See IPC::Sender.
503  delete msg;
504  return false;
505}
506
507void CommandBufferProxyImpl::OnUpdateState(
508    const gpu::CommandBuffer::State& state) {
509  // Handle wraparound. It works as long as we don't have more than 2B state
510  // updates in flight across which reordering occurs.
511  if (state.generation - last_state_.generation < 0x80000000U)
512    last_state_ = state;
513}
514
515void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
516    const GpuConsoleMessageCallback& callback) {
517  console_message_callback_ = callback;
518}
519
520void CommandBufferProxyImpl::TryUpdateState() {
521  if (last_state_.error == gpu::error::kNoError)
522    shared_state()->Read(&last_state_);
523}
524
525void CommandBufferProxyImpl::SendManagedMemoryStats(
526    const GpuManagedMemoryStats& stats) {
527  if (last_state_.error != gpu::error::kNoError)
528    return;
529
530  Send(new GpuCommandBufferMsg_SendClientManagedMemoryStats(route_id_,
531                                                            stats));
532}
533
534}  // namespace content
535