gpu_command_buffer_stub.cc revision 868fa2fe829687343ffae624259930155e16dbd8
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/bind.h"
6#include "base/bind_helpers.h"
7#include "base/command_line.h"
8#include "base/debug/trace_event.h"
9#include "base/hash.h"
10#include "base/shared_memory.h"
11#include "base/time.h"
12#include "build/build_config.h"
13#include "content/common/gpu/gpu_channel.h"
14#include "content/common/gpu/gpu_channel_manager.h"
15#include "content/common/gpu/gpu_command_buffer_stub.h"
16#include "content/common/gpu/gpu_memory_manager.h"
17#include "content/common/gpu/gpu_memory_tracking.h"
18#include "content/common/gpu/gpu_messages.h"
19#include "content/common/gpu/gpu_watchdog.h"
20#include "content/common/gpu/image_transport_surface.h"
21#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
22#include "content/common/gpu/sync_point_manager.h"
23#include "content/public/common/content_client.h"
24#include "content/public/common/content_switches.h"
25#include "gpu/command_buffer/common/constants.h"
26#include "gpu/command_buffer/common/gles2_cmd_utils.h"
27#include "gpu/command_buffer/common/mailbox.h"
28#include "gpu/command_buffer/service/gl_context_virtual.h"
29#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
30#include "gpu/command_buffer/service/logger.h"
31#include "gpu/command_buffer/service/memory_tracking.h"
32#include "ui/gl/gl_bindings.h"
33#include "ui/gl/gl_switches.h"
34
35#if defined(OS_WIN)
36#include "content/public/common/sandbox_init.h"
37#endif
38
39#if defined(OS_ANDROID)
40#include "content/common/gpu/stream_texture_manager_android.h"
41#endif
42
43namespace content {
44namespace {
45
46// The GpuCommandBufferMemoryTracker class provides a bridge between the
47// ContextGroup's memory type managers and the GpuMemoryManager class.
48class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
49 public:
50  explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
51      tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
52          CreateTrackingGroup(channel->renderer_pid(), this)) {
53  }
54
55  virtual void TrackMemoryAllocatedChange(
56      size_t old_size,
57      size_t new_size,
58      gpu::gles2::MemoryTracker::Pool pool) OVERRIDE {
59    tracking_group_->TrackMemoryAllocatedChange(
60        old_size, new_size, pool);
61  }
62
63  virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
64    return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
65  };
66
67 private:
68  virtual ~GpuCommandBufferMemoryTracker() {
69  }
70  scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
71
72  DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
73};
74
75// FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
76// url_hash matches.
77void FastSetActiveURL(const GURL& url, size_t url_hash) {
78  // Leave the previously set URL in the empty case -- empty URLs are given by
79  // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
80  // onscreen context URL was set previously and will show up even when a crash
81  // occurs during offscreen command processing.
82  if (url.is_empty())
83    return;
84  static size_t g_last_url_hash = 0;
85  if (url_hash != g_last_url_hash) {
86    g_last_url_hash = url_hash;
87    GetContentClient()->SetActiveURL(url);
88  }
89}
90
91// The first time polling a fence, delay some extra time to allow other
92// stubs to process some work, or else the timing of the fences could
93// allow a pattern of alternating fast and slow frames to occur.
94const int64 kHandleMoreWorkPeriodMs = 2;
95const int64 kHandleMoreWorkPeriodBusyMs = 1;
96
97// Prevents idle work from being starved.
98const int64 kMaxTimeSinceIdleMs = 10;
99
100}  // namespace
101
102GpuCommandBufferStub::GpuCommandBufferStub(
103    GpuChannel* channel,
104    GpuCommandBufferStub* share_group,
105    const gfx::GLSurfaceHandle& handle,
106    gpu::gles2::MailboxManager* mailbox_manager,
107    gpu::gles2::ImageManager* image_manager,
108    const gfx::Size& size,
109    const gpu::gles2::DisallowedFeatures& disallowed_features,
110    const std::string& allowed_extensions,
111    const std::vector<int32>& attribs,
112    gfx::GpuPreference gpu_preference,
113    bool use_virtualized_gl_context,
114    int32 route_id,
115    int32 surface_id,
116    GpuWatchdog* watchdog,
117    bool software,
118    const GURL& active_url)
119    : channel_(channel),
120      handle_(handle),
121      initial_size_(size),
122      disallowed_features_(disallowed_features),
123      allowed_extensions_(allowed_extensions),
124      requested_attribs_(attribs),
125      gpu_preference_(gpu_preference),
126      use_virtualized_gl_context_(use_virtualized_gl_context),
127      route_id_(route_id),
128      surface_id_(surface_id),
129      software_(software),
130      last_flush_count_(0),
131      last_memory_allocation_valid_(false),
132      watchdog_(watchdog),
133      sync_point_wait_count_(0),
134      delayed_work_scheduled_(false),
135      previous_messages_processed_(0),
136      active_url_(active_url),
137      total_gpu_memory_(0) {
138  active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
139  FastSetActiveURL(active_url_, active_url_hash_);
140  if (share_group) {
141    context_group_ = share_group->context_group_;
142  } else {
143    context_group_ = new gpu::gles2::ContextGroup(
144        mailbox_manager,
145        image_manager,
146        new GpuCommandBufferMemoryTracker(channel),
147        true);
148  }
149}
150
151GpuCommandBufferStub::~GpuCommandBufferStub() {
152  Destroy();
153
154  GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
155  gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
156}
157
158GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() {
159    return channel()->gpu_channel_manager()->gpu_memory_manager();
160}
161
162bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
163  FastSetActiveURL(active_url_, active_url_hash_);
164
165  // Ensure the appropriate GL context is current before handling any IPC
166  // messages directed at the command buffer. This ensures that the message
167  // handler can assume that the context is current (not necessary for
168  // Echo, RetireSyncPoint, or WaitSyncPoint).
169  if (decoder_.get() &&
170      message.type() != GpuCommandBufferMsg_Echo::ID &&
171      message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
172      message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
173    if (!MakeCurrent())
174      return false;
175  }
176
177  // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
178  // here. This is so the reply can be delayed if the scheduler is unscheduled.
179  bool handled = true;
180  IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
181    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
182                                    OnInitialize);
183    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
184                                    OnSetGetBuffer);
185    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
186                        OnProduceFrontBuffer);
187    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
188    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
189    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast,
190                                    OnGetStateFast);
191    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
192    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
193    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
194    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
195                        OnRegisterTransferBuffer);
196    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
197                        OnDestroyTransferBuffer);
198    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
199                                    OnGetTransferBuffer);
200    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
201                                    OnCreateVideoDecoder)
202    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
203                        OnSetSurfaceVisible)
204    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DiscardBackbuffer,
205                        OnDiscardBackbuffer)
206    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer,
207                        OnEnsureBackbuffer)
208    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
209                        OnRetireSyncPoint)
210    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
211                        OnSignalSyncPoint)
212    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats,
213                        OnReceivedClientManagedMemoryStats)
214    IPC_MESSAGE_HANDLER(
215        GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
216        OnSetClientHasMemoryAllocationChangedCallback)
217    IPC_MESSAGE_UNHANDLED(handled = false)
218  IPC_END_MESSAGE_MAP()
219
220  // Ensure that any delayed work that was created will be handled.
221  ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
222
223  DCHECK(handled);
224  return handled;
225}
226
227bool GpuCommandBufferStub::Send(IPC::Message* message) {
228  return channel_->Send(message);
229}
230
231bool GpuCommandBufferStub::IsScheduled() {
232  return (!scheduler_.get() || scheduler_->IsScheduled());
233}
234
235bool GpuCommandBufferStub::HasMoreWork() {
236  return scheduler_.get() && scheduler_->HasMoreWork();
237}
238
239void GpuCommandBufferStub::PollWork() {
240  TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
241  delayed_work_scheduled_ = false;
242  FastSetActiveURL(active_url_, active_url_hash_);
243  if (decoder_.get() && !MakeCurrent())
244    return;
245
246  if (scheduler_) {
247    bool fences_complete = scheduler_->PollUnscheduleFences();
248    // Perform idle work if all fences are complete.
249    if (fences_complete) {
250      uint64 current_messages_processed =
251          channel()->gpu_channel_manager()->MessagesProcessed();
252      // We're idle when no messages were processed or scheduled.
253      bool is_idle =
254          (previous_messages_processed_ == current_messages_processed) &&
255          !channel()->gpu_channel_manager()->HandleMessagesScheduled();
256      if (!is_idle && !last_idle_time_.is_null()) {
257        base::TimeDelta time_since_idle = base::TimeTicks::Now() -
258            last_idle_time_;
259        base::TimeDelta max_time_since_idle =
260            base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
261
262        // Force idle when it's been too long since last time we were idle.
263        if (time_since_idle > max_time_since_idle)
264          is_idle = true;
265      }
266
267      if (is_idle) {
268        last_idle_time_ = base::TimeTicks::Now();
269        scheduler_->PerformIdleWork();
270      }
271    }
272  }
273  ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
274}
275
276bool GpuCommandBufferStub::HasUnprocessedCommands() {
277  if (command_buffer_) {
278    gpu::CommandBuffer::State state = command_buffer_->GetLastState();
279    return state.put_offset != state.get_offset &&
280        !gpu::error::IsError(state.error);
281  }
282  return false;
283}
284
285void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
286  if (!HasMoreWork()) {
287    last_idle_time_ = base::TimeTicks();
288    return;
289  }
290
291  if (delayed_work_scheduled_)
292    return;
293  delayed_work_scheduled_ = true;
294
295  // Idle when no messages are processed between now and when
296  // PollWork is called.
297  previous_messages_processed_ =
298      channel()->gpu_channel_manager()->MessagesProcessed();
299  if (last_idle_time_.is_null())
300    last_idle_time_ = base::TimeTicks::Now();
301
302  // IsScheduled() returns true after passing all unschedule fences
303  // and this is when we can start performing idle work. Idle work
304  // is done synchronously so we can set delay to 0 and instead poll
305  // for more work at the rate idle work is performed. This also ensures
306  // that idle work is done as efficiently as possible without any
307  // unnecessary delays.
308  if (scheduler_.get() &&
309      scheduler_->IsScheduled() &&
310      scheduler_->HasMoreIdleWork()) {
311    delay = 0;
312  }
313
314  base::MessageLoop::current()->PostDelayedTask(
315      FROM_HERE,
316      base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
317      base::TimeDelta::FromMilliseconds(delay));
318}
319
320void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
321  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
322  Send(new IPC::Message(message));
323}
324
325bool GpuCommandBufferStub::MakeCurrent() {
326  if (decoder_->MakeCurrent())
327    return true;
328  DLOG(ERROR) << "Context lost because MakeCurrent failed.";
329  command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
330  command_buffer_->SetParseError(gpu::error::kLostContext);
331  if (gfx::GLContext::LosesAllContextsOnContextLost())
332    channel_->LoseAllContexts();
333  return false;
334}
335
336void GpuCommandBufferStub::Destroy() {
337  if (handle_.is_null() && !active_url_.is_empty()) {
338    GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
339    gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
340        active_url_));
341  }
342
343  memory_manager_client_state_.reset();
344
345  while (!sync_points_.empty())
346    OnRetireSyncPoint(sync_points_.front());
347
348  if (decoder_)
349    decoder_->set_engine(NULL);
350
351  // The scheduler has raw references to the decoder and the command buffer so
352  // destroy it before those.
353  scheduler_.reset();
354
355  bool have_context = false;
356  if (decoder_)
357    have_context = decoder_->MakeCurrent();
358  FOR_EACH_OBSERVER(DestructionObserver,
359                    destruction_observers_,
360                    OnWillDestroyStub());
361
362  scoped_refptr<gfx::GLContext> context;
363  if (decoder_) {
364    context = decoder_->GetGLContext();
365    decoder_->Destroy(have_context);
366    decoder_.reset();
367  }
368
369  command_buffer_.reset();
370
371  // Make sure that context_ is current while we destroy surface_, because
372  // surface_ may have GL resources that it needs to destroy, and will need
373  // context_ to be current in order to not leak these resources.
374  if (context.get())
375    context->MakeCurrent(surface_.get());
376  surface_ = NULL;
377  if (context.get())
378    context->ReleaseCurrent(NULL);
379}
380
381void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
382  Destroy();
383  GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, false);
384  Send(reply_message);
385}
386
387void GpuCommandBufferStub::OnInitialize(
388    base::SharedMemoryHandle shared_state_handle,
389    IPC::Message* reply_message) {
390  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
391  DCHECK(!command_buffer_.get());
392
393  scoped_ptr<base::SharedMemory> shared_state_shm(
394      new base::SharedMemory(shared_state_handle, false));
395
396  command_buffer_.reset(new gpu::CommandBufferService(
397      context_group_->transfer_buffer_manager()));
398
399  if (!command_buffer_->Initialize()) {
400    DLOG(ERROR) << "CommandBufferService failed to initialize.\n";
401    OnInitializeFailed(reply_message);
402    return;
403  }
404
405  decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
406
407  scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
408                                         decoder_.get(),
409                                         decoder_.get()));
410  if (preemption_flag_.get())
411    scheduler_->SetPreemptByFlag(preemption_flag_);
412
413  decoder_->set_engine(scheduler_.get());
414
415  if (!handle_.is_null()) {
416#if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
417    if (software_) {
418      DLOG(ERROR) << "No software support.\n";
419      OnInitializeFailed(reply_message);
420      return;
421    }
422#endif
423
424    surface_ = ImageTransportSurface::CreateSurface(
425        channel_->gpu_channel_manager(),
426        this,
427        handle_);
428  } else {
429    GpuChannelManager* manager = channel_->gpu_channel_manager();
430    surface_ = manager->GetDefaultOffscreenSurface();
431  }
432
433  if (!surface_.get()) {
434    DLOG(ERROR) << "Failed to create surface.\n";
435    OnInitializeFailed(reply_message);
436    return;
437  }
438
439  scoped_refptr<gfx::GLContext> context;
440  if ((CommandLine::ForCurrentProcess()->HasSwitch(
441          switches::kEnableVirtualGLContexts) || use_virtualized_gl_context_) &&
442      channel_->share_group()) {
443    context = channel_->share_group()->GetSharedContext();
444    if (!context.get()) {
445      context = gfx::GLContext::CreateGLContext(
446          channel_->share_group(),
447          channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
448          gpu_preference_);
449      channel_->share_group()->SetSharedContext(context.get());
450    }
451    // This should be a non-virtual GL context.
452    DCHECK(context->GetHandle());
453    context = new gpu::GLContextVirtual(
454        channel_->share_group(), context.get(), decoder_->AsWeakPtr());
455    if (!context->Initialize(surface_.get(), gpu_preference_)) {
456      // TODO(sievers): The real context created above for the default
457      // offscreen surface might not be compatible with this surface.
458      // Need to adjust at least GLX to be able to create the initial context
459      // with a config that is compatible with onscreen and offscreen surfaces.
460      context = NULL;
461
462      DLOG(ERROR) << "Failed to initialize virtual GL context.";
463      OnInitializeFailed(reply_message);
464      return;
465    } else {
466      LOG(INFO) << "Created virtual GL context.";
467    }
468  }
469  if (!context.get()) {
470    context = gfx::GLContext::CreateGLContext(
471        channel_->share_group(), surface_.get(), gpu_preference_);
472  }
473  if (!context.get()) {
474    DLOG(ERROR) << "Failed to create context.\n";
475    OnInitializeFailed(reply_message);
476    return;
477  }
478
479  if (!context->MakeCurrent(surface_.get())) {
480    LOG(ERROR) << "Failed to make context current.";
481    OnInitializeFailed(reply_message);
482    return;
483  }
484
485  if (!context->GetGLStateRestorer()) {
486    context->SetGLStateRestorer(
487        new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
488  }
489
490  if (!context->GetTotalGpuMemory(&total_gpu_memory_))
491    total_gpu_memory_ = 0;
492
493  if (!context_group_->has_program_cache()) {
494    context_group_->set_program_cache(
495        channel_->gpu_channel_manager()->program_cache());
496  }
497
498  // Initialize the decoder with either the view or pbuffer GLContext.
499  if (!decoder_->Initialize(surface_,
500                            context,
501                            !surface_id(),
502                            initial_size_,
503                            disallowed_features_,
504                            allowed_extensions_.c_str(),
505                            requested_attribs_)) {
506    DLOG(ERROR) << "Failed to initialize decoder.";
507    OnInitializeFailed(reply_message);
508    return;
509  }
510
511  if (CommandLine::ForCurrentProcess()->HasSwitch(
512      switches::kEnableGPUServiceLogging)) {
513    decoder_->set_log_commands(true);
514  }
515
516  decoder_->GetLogger()->SetMsgCallback(
517      base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
518                 base::Unretained(this)));
519  decoder_->SetShaderCacheCallback(
520      base::Bind(&GpuCommandBufferStub::SendCachedShader,
521                 base::Unretained(this)));
522  decoder_->SetWaitSyncPointCallback(
523      base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
524                 base::Unretained(this)));
525
526  command_buffer_->SetPutOffsetChangeCallback(
527      base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
528  command_buffer_->SetGetBufferChangeCallback(
529      base::Bind(&gpu::GpuScheduler::SetGetBuffer,
530                 base::Unretained(scheduler_.get())));
531  command_buffer_->SetParseErrorCallback(
532      base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
533  scheduler_->SetSchedulingChangedCallback(
534      base::Bind(&GpuChannel::StubSchedulingChanged,
535                 base::Unretained(channel_)));
536
537  if (watchdog_) {
538    scheduler_->SetCommandProcessedCallback(
539        base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
540                   base::Unretained(this)));
541  }
542
543#if defined(OS_ANDROID)
544  decoder_->SetStreamTextureManager(channel_->stream_texture_manager());
545#endif
546
547  if (!command_buffer_->SetSharedStateBuffer(shared_state_shm.Pass())) {
548    DLOG(ERROR) << "Failed to map shared stae buffer.";
549    OnInitializeFailed(reply_message);
550    return;
551  }
552
553  GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, true);
554  Send(reply_message);
555
556  if (handle_.is_null() && !active_url_.is_empty()) {
557    GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
558    gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
559        active_url_));
560  }
561}
562
563void GpuCommandBufferStub::OnSetLatencyInfo(
564    const ui::LatencyInfo& latency_info) {
565  if (!latency_info_callback_.is_null())
566    latency_info_callback_.Run(latency_info);
567}
568
569void GpuCommandBufferStub::SetLatencyInfoCallback(
570    const LatencyInfoCallback& callback) {
571  latency_info_callback_ = callback;
572}
573
574void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
575                                          IPC::Message* reply_message) {
576  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
577  if (command_buffer_)
578    command_buffer_->SetGetBuffer(shm_id);
579  Send(reply_message);
580}
581
582void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
583  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
584  if (!decoder_)
585    LOG(ERROR) << "Can't produce front buffer before initialization.";
586
587  if (!decoder_->ProduceFrontBuffer(mailbox))
588    LOG(ERROR) << "Failed to produce front buffer.";
589}
590
591void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) {
592  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetState");
593  if (command_buffer_) {
594    gpu::CommandBuffer::State state = command_buffer_->GetState();
595    if (state.error == gpu::error::kLostContext &&
596        gfx::GLContext::LosesAllContextsOnContextLost())
597      channel_->LoseAllContexts();
598
599    GpuCommandBufferMsg_GetState::WriteReplyParams(reply_message, state);
600  } else {
601    DLOG(ERROR) << "no command_buffer.";
602    reply_message->set_reply_error();
603  }
604  Send(reply_message);
605}
606
607void GpuCommandBufferStub::OnParseError() {
608  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
609  DCHECK(command_buffer_.get());
610  gpu::CommandBuffer::State state = command_buffer_->GetState();
611  IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
612      route_id_, state.context_lost_reason);
613  msg->set_unblock(true);
614  Send(msg);
615
616  // Tell the browser about this context loss as well, so it can
617  // determine whether client APIs like WebGL need to be immediately
618  // blocked from automatically running.
619  GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
620  gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
621      handle_.is_null(), state.context_lost_reason, active_url_));
622}
623
624void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) {
625  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast");
626  DCHECK(command_buffer_.get());
627  gpu::CommandBuffer::State state = command_buffer_->GetState();
628  if (state.error == gpu::error::kLostContext &&
629      gfx::GLContext::LosesAllContextsOnContextLost())
630    channel_->LoseAllContexts();
631
632  GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state);
633  Send(reply_message);
634}
635
636void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset,
637                                        uint32 flush_count) {
638  TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush",
639               "put_offset", put_offset);
640  DCHECK(command_buffer_.get());
641  if (flush_count - last_flush_count_ < 0x8000000U) {
642    last_flush_count_ = flush_count;
643    command_buffer_->Flush(put_offset);
644  } else {
645    // We received this message out-of-order. This should not happen but is here
646    // to catch regressions. Ignore the message.
647    NOTREACHED() << "Received a Flush message out-of-order";
648  }
649
650  ReportState();
651}
652
653void GpuCommandBufferStub::OnRescheduled() {
654  gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
655  command_buffer_->Flush(pre_state.put_offset);
656  gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
657
658  if (pre_state.get_offset != post_state.get_offset)
659    ReportState();
660}
661
662void GpuCommandBufferStub::OnRegisterTransferBuffer(
663    int32 id,
664    base::SharedMemoryHandle transfer_buffer,
665    uint32 size) {
666  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
667  base::SharedMemory shared_memory(transfer_buffer, false);
668
669  if (command_buffer_)
670    command_buffer_->RegisterTransferBuffer(id, &shared_memory, size);
671}
672
673void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
674  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
675
676  if (command_buffer_)
677    command_buffer_->DestroyTransferBuffer(id);
678}
679
680void GpuCommandBufferStub::OnGetTransferBuffer(
681    int32 id,
682    IPC::Message* reply_message) {
683  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetTransferBuffer");
684  if (command_buffer_) {
685    base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle();
686    uint32 size = 0;
687
688    gpu::Buffer buffer = command_buffer_->GetTransferBuffer(id);
689    if (buffer.shared_memory) {
690#if defined(OS_WIN)
691      transfer_buffer = NULL;
692      BrokerDuplicateHandle(buffer.shared_memory->handle(),
693          channel_->renderer_pid(), &transfer_buffer, FILE_MAP_READ |
694          FILE_MAP_WRITE, 0);
695      DCHECK(transfer_buffer != NULL);
696#else
697      buffer.shared_memory->ShareToProcess(channel_->renderer_pid(),
698                                           &transfer_buffer);
699#endif
700      size = buffer.size;
701    }
702
703    GpuCommandBufferMsg_GetTransferBuffer::WriteReplyParams(reply_message,
704                                                            transfer_buffer,
705                                                            size);
706  } else {
707    reply_message->set_reply_error();
708  }
709  Send(reply_message);
710}
711
712void GpuCommandBufferStub::OnCommandProcessed() {
713  if (watchdog_)
714    watchdog_->CheckArmed();
715}
716
717void GpuCommandBufferStub::ReportState() {
718  gpu::CommandBuffer::State state = command_buffer_->GetState();
719  if (state.error == gpu::error::kLostContext &&
720      gfx::GLContext::LosesAllContextsOnContextLost()) {
721    channel_->LoseAllContexts();
722  } else {
723    command_buffer_->UpdateState();
724  }
725}
726
727void GpuCommandBufferStub::PutChanged() {
728  FastSetActiveURL(active_url_, active_url_hash_);
729  scheduler_->PutChanged();
730}
731
732void GpuCommandBufferStub::OnCreateVideoDecoder(
733    media::VideoCodecProfile profile,
734    IPC::Message* reply_message) {
735  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
736  int decoder_route_id = channel_->GenerateRouteID();
737  GpuVideoDecodeAccelerator* decoder =
738      new GpuVideoDecodeAccelerator(decoder_route_id, this);
739  decoder->Initialize(profile, reply_message);
740  // decoder is registered as a DestructionObserver of this stub and will
741  // self-delete during destruction of this stub.
742}
743
744void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
745  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
746  if (memory_manager_client_state_)
747    memory_manager_client_state_->SetVisible(visible);
748}
749
750void GpuCommandBufferStub::OnDiscardBackbuffer() {
751  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDiscardBackbuffer");
752  if (!surface_.get())
753    return;
754  if (surface_->DeferDraws()) {
755    DCHECK(!IsScheduled());
756    channel_->RequeueMessage();
757  } else {
758    if (!surface_->SetBackbufferAllocation(false))
759      channel_->DestroySoon();
760  }
761}
762
763void GpuCommandBufferStub::OnEnsureBackbuffer() {
764  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEnsureBackbuffer");
765  if (!surface_.get())
766    return;
767  if (surface_->DeferDraws()) {
768    DCHECK(!IsScheduled());
769    channel_->RequeueMessage();
770  } else {
771    if (!surface_->SetBackbufferAllocation(true))
772      channel_->DestroySoon();
773  }
774}
775
776void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
777  sync_points_.push_back(sync_point);
778}
779
780void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
781  DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
782  sync_points_.pop_front();
783  GpuChannelManager* manager = channel_->gpu_channel_manager();
784  manager->sync_point_manager()->RetireSyncPoint(sync_point);
785}
786
787bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
788  if (sync_point_wait_count_ == 0) {
789    TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
790                             "GpuCommandBufferStub", this);
791  }
792  scheduler_->SetScheduled(false);
793  ++sync_point_wait_count_;
794  GpuChannelManager* manager = channel_->gpu_channel_manager();
795  manager->sync_point_manager()->AddSyncPointCallback(
796      sync_point,
797      base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
798                 this->AsWeakPtr()));
799  return scheduler_->IsScheduled();
800}
801
802void GpuCommandBufferStub::OnSyncPointRetired() {
803  --sync_point_wait_count_;
804  if (sync_point_wait_count_ == 0) {
805    TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
806                           "GpuCommandBufferStub", this);
807  }
808  scheduler_->SetScheduled(true);
809}
810
811void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
812  GpuChannelManager* manager = channel_->gpu_channel_manager();
813  manager->sync_point_manager()->AddSyncPointCallback(
814      sync_point,
815      base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
816                 this->AsWeakPtr(),
817                 id));
818}
819
820void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
821  Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
822}
823
824void GpuCommandBufferStub::OnReceivedClientManagedMemoryStats(
825    const GpuManagedMemoryStats& stats) {
826  TRACE_EVENT0(
827      "gpu",
828      "GpuCommandBufferStub::OnReceivedClientManagedMemoryStats");
829  if (memory_manager_client_state_)
830    memory_manager_client_state_->SetManagedMemoryStats(stats);
831}
832
833void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
834    bool has_callback) {
835  TRACE_EVENT0(
836      "gpu",
837      "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
838  if (has_callback) {
839    if (!memory_manager_client_state_) {
840      memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
841          this, surface_id_ != 0, true));
842    }
843  } else {
844    memory_manager_client_state_.reset();
845  }
846}
847
848void GpuCommandBufferStub::SendConsoleMessage(
849    int32 id,
850    const std::string& message) {
851  GPUCommandBufferConsoleMessage console_message;
852  console_message.id = id;
853  console_message.message = message;
854  IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
855      route_id_, console_message);
856  msg->set_unblock(true);
857  Send(msg);
858}
859
860void GpuCommandBufferStub::SendCachedShader(
861    const std::string& key, const std::string& shader) {
862  channel_->CacheShader(key, shader);
863}
864
865void GpuCommandBufferStub::AddDestructionObserver(
866    DestructionObserver* observer) {
867  destruction_observers_.AddObserver(observer);
868}
869
870void GpuCommandBufferStub::RemoveDestructionObserver(
871    DestructionObserver* observer) {
872  destruction_observers_.RemoveObserver(observer);
873}
874
875void GpuCommandBufferStub::SetPreemptByFlag(
876    scoped_refptr<gpu::PreemptionFlag> flag) {
877  preemption_flag_ = flag;
878  if (scheduler_)
879    scheduler_->SetPreemptByFlag(preemption_flag_);
880}
881
882bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
883  *bytes = total_gpu_memory_;
884  return !!total_gpu_memory_;
885}
886
887gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
888  if (!surface_.get())
889    return gfx::Size();
890  return surface_->GetSize();
891}
892
893gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
894  return context_group_->memory_tracker();
895}
896
897void GpuCommandBufferStub::SetMemoryAllocation(
898    const GpuMemoryAllocation& allocation) {
899  if (!last_memory_allocation_valid_ ||
900      !allocation.renderer_allocation.Equals(
901          last_memory_allocation_.renderer_allocation)) {
902    Send(new GpuCommandBufferMsg_SetMemoryAllocation(
903        route_id_, allocation.renderer_allocation));
904  }
905
906  if (!last_memory_allocation_valid_ ||
907      !allocation.browser_allocation.Equals(
908          last_memory_allocation_.browser_allocation)) {
909    // This can be called outside of OnMessageReceived, so the context needs
910    // to be made current before calling methods on the surface.
911    if (surface_.get() && MakeCurrent())
912      surface_->SetFrontbufferAllocation(
913          allocation.browser_allocation.suggest_have_frontbuffer);
914  }
915
916  last_memory_allocation_valid_ = true;
917  last_memory_allocation_ = allocation;
918}
919
920}  // namespace content
921