1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/bind.h"
6#include "base/bind_helpers.h"
7#include "base/command_line.h"
8#include "base/debug/trace_event.h"
9#include "base/hash.h"
10#include "base/memory/shared_memory.h"
11#include "base/time/time.h"
12#include "build/build_config.h"
13#include "content/common/gpu/gpu_channel.h"
14#include "content/common/gpu/gpu_channel_manager.h"
15#include "content/common/gpu/gpu_command_buffer_stub.h"
16#include "content/common/gpu/gpu_memory_manager.h"
17#include "content/common/gpu/gpu_memory_tracking.h"
18#include "content/common/gpu/gpu_messages.h"
19#include "content/common/gpu/gpu_watchdog.h"
20#include "content/common/gpu/image_transport_surface.h"
21#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
22#include "content/common/gpu/sync_point_manager.h"
23#include "content/public/common/content_client.h"
24#include "content/public/common/content_switches.h"
25#include "gpu/command_buffer/common/constants.h"
26#include "gpu/command_buffer/common/gles2_cmd_utils.h"
27#include "gpu/command_buffer/common/mailbox.h"
28#include "gpu/command_buffer/service/gl_context_virtual.h"
29#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
30#include "gpu/command_buffer/service/logger.h"
31#include "gpu/command_buffer/service/memory_tracking.h"
32#include "gpu/command_buffer/service/query_manager.h"
33#include "ui/gl/gl_bindings.h"
34#include "ui/gl/gl_switches.h"
35
36#if defined(OS_WIN)
37#include "content/public/common/sandbox_init.h"
38#endif
39
40#if defined(OS_ANDROID)
41#include "content/common/gpu/stream_texture_manager_android.h"
42#endif
43
44namespace content {
45namespace {
46
47// The GpuCommandBufferMemoryTracker class provides a bridge between the
48// ContextGroup's memory type managers and the GpuMemoryManager class.
49class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
50 public:
51  explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
52      tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
53          CreateTrackingGroup(channel->renderer_pid(), this)) {
54  }
55
56  virtual void TrackMemoryAllocatedChange(
57      size_t old_size,
58      size_t new_size,
59      gpu::gles2::MemoryTracker::Pool pool) OVERRIDE {
60    tracking_group_->TrackMemoryAllocatedChange(
61        old_size, new_size, pool);
62  }
63
64  virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
65    return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
66  };
67
68 private:
69  virtual ~GpuCommandBufferMemoryTracker() {
70  }
71  scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
72
73  DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
74};
75
76// FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
77// url_hash matches.
78void FastSetActiveURL(const GURL& url, size_t url_hash) {
79  // Leave the previously set URL in the empty case -- empty URLs are given by
80  // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
81  // onscreen context URL was set previously and will show up even when a crash
82  // occurs during offscreen command processing.
83  if (url.is_empty())
84    return;
85  static size_t g_last_url_hash = 0;
86  if (url_hash != g_last_url_hash) {
87    g_last_url_hash = url_hash;
88    GetContentClient()->SetActiveURL(url);
89  }
90}
91
92// The first time polling a fence, delay some extra time to allow other
93// stubs to process some work, or else the timing of the fences could
94// allow a pattern of alternating fast and slow frames to occur.
95const int64 kHandleMoreWorkPeriodMs = 2;
96const int64 kHandleMoreWorkPeriodBusyMs = 1;
97
98// Prevents idle work from being starved.
99const int64 kMaxTimeSinceIdleMs = 10;
100
101}  // namespace
102
103GpuCommandBufferStub::GpuCommandBufferStub(
104    GpuChannel* channel,
105    GpuCommandBufferStub* share_group,
106    const gfx::GLSurfaceHandle& handle,
107    gpu::gles2::MailboxManager* mailbox_manager,
108    gpu::gles2::ImageManager* image_manager,
109    const gfx::Size& size,
110    const gpu::gles2::DisallowedFeatures& disallowed_features,
111    const std::string& allowed_extensions,
112    const std::vector<int32>& attribs,
113    gfx::GpuPreference gpu_preference,
114    bool use_virtualized_gl_context,
115    int32 route_id,
116    int32 surface_id,
117    GpuWatchdog* watchdog,
118    bool software,
119    const GURL& active_url)
120    : channel_(channel),
121      handle_(handle),
122      initial_size_(size),
123      disallowed_features_(disallowed_features),
124      allowed_extensions_(allowed_extensions),
125      requested_attribs_(attribs),
126      gpu_preference_(gpu_preference),
127      use_virtualized_gl_context_(use_virtualized_gl_context),
128      route_id_(route_id),
129      surface_id_(surface_id),
130      software_(software),
131      last_flush_count_(0),
132      last_memory_allocation_valid_(false),
133      watchdog_(watchdog),
134      sync_point_wait_count_(0),
135      delayed_work_scheduled_(false),
136      previous_messages_processed_(0),
137      active_url_(active_url),
138      total_gpu_memory_(0) {
139  active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
140  FastSetActiveURL(active_url_, active_url_hash_);
141  if (share_group) {
142    context_group_ = share_group->context_group_;
143  } else {
144    gpu::StreamTextureManager* stream_texture_manager = NULL;
145#if defined(OS_ANDROID)
146    stream_texture_manager = channel_->stream_texture_manager();
147#endif
148    context_group_ = new gpu::gles2::ContextGroup(
149        mailbox_manager,
150        image_manager,
151        new GpuCommandBufferMemoryTracker(channel),
152        stream_texture_manager,
153        true);
154  }
155}
156
157GpuCommandBufferStub::~GpuCommandBufferStub() {
158  Destroy();
159
160  GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
161  gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
162}
163
164GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() {
165    return channel()->gpu_channel_manager()->gpu_memory_manager();
166}
167
168bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
169  FastSetActiveURL(active_url_, active_url_hash_);
170
171  // Ensure the appropriate GL context is current before handling any IPC
172  // messages directed at the command buffer. This ensures that the message
173  // handler can assume that the context is current (not necessary for
174  // Echo, RetireSyncPoint, or WaitSyncPoint).
175  if (decoder_.get() &&
176      message.type() != GpuCommandBufferMsg_Echo::ID &&
177      message.type() != GpuCommandBufferMsg_GetStateFast::ID &&
178      message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
179      message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
180    if (!MakeCurrent())
181      return false;
182  }
183
184  // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
185  // here. This is so the reply can be delayed if the scheduler is unscheduled.
186  bool handled = true;
187  IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
188    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
189                                    OnInitialize);
190    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
191                                    OnSetGetBuffer);
192    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
193                        OnProduceFrontBuffer);
194    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
195    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
196    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast,
197                                    OnGetStateFast);
198    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
199    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
200    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
201    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
202                        OnRegisterTransferBuffer);
203    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
204                        OnDestroyTransferBuffer);
205    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
206                                    OnGetTransferBuffer);
207    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
208                                    OnCreateVideoDecoder)
209    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
210                        OnSetSurfaceVisible)
211    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DiscardBackbuffer,
212                        OnDiscardBackbuffer)
213    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer,
214                        OnEnsureBackbuffer)
215    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
216                        OnRetireSyncPoint)
217    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
218                        OnSignalSyncPoint)
219    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
220                        OnSignalQuery)
221    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats,
222                        OnReceivedClientManagedMemoryStats)
223    IPC_MESSAGE_HANDLER(
224        GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
225        OnSetClientHasMemoryAllocationChangedCallback)
226    IPC_MESSAGE_UNHANDLED(handled = false)
227  IPC_END_MESSAGE_MAP()
228
229  // Ensure that any delayed work that was created will be handled.
230  ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
231
232  DCHECK(handled);
233  return handled;
234}
235
236bool GpuCommandBufferStub::Send(IPC::Message* message) {
237  return channel_->Send(message);
238}
239
240bool GpuCommandBufferStub::IsScheduled() {
241  return (!scheduler_.get() || scheduler_->IsScheduled());
242}
243
244bool GpuCommandBufferStub::HasMoreWork() {
245  return scheduler_.get() && scheduler_->HasMoreWork();
246}
247
248void GpuCommandBufferStub::PollWork() {
249  TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
250  delayed_work_scheduled_ = false;
251  FastSetActiveURL(active_url_, active_url_hash_);
252  if (decoder_.get() && !MakeCurrent())
253    return;
254
255  if (scheduler_) {
256    bool fences_complete = scheduler_->PollUnscheduleFences();
257    // Perform idle work if all fences are complete.
258    if (fences_complete) {
259      uint64 current_messages_processed =
260          channel()->gpu_channel_manager()->MessagesProcessed();
261      // We're idle when no messages were processed or scheduled.
262      bool is_idle =
263          (previous_messages_processed_ == current_messages_processed) &&
264          !channel()->gpu_channel_manager()->HandleMessagesScheduled();
265      if (!is_idle && !last_idle_time_.is_null()) {
266        base::TimeDelta time_since_idle = base::TimeTicks::Now() -
267            last_idle_time_;
268        base::TimeDelta max_time_since_idle =
269            base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
270
271        // Force idle when it's been too long since last time we were idle.
272        if (time_since_idle > max_time_since_idle)
273          is_idle = true;
274      }
275
276      if (is_idle) {
277        last_idle_time_ = base::TimeTicks::Now();
278        scheduler_->PerformIdleWork();
279      }
280    }
281  }
282  ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
283}
284
285bool GpuCommandBufferStub::HasUnprocessedCommands() {
286  if (command_buffer_) {
287    gpu::CommandBuffer::State state = command_buffer_->GetLastState();
288    return state.put_offset != state.get_offset &&
289        !gpu::error::IsError(state.error);
290  }
291  return false;
292}
293
294void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
295  if (!HasMoreWork()) {
296    last_idle_time_ = base::TimeTicks();
297    return;
298  }
299
300  if (delayed_work_scheduled_)
301    return;
302  delayed_work_scheduled_ = true;
303
304  // Idle when no messages are processed between now and when
305  // PollWork is called.
306  previous_messages_processed_ =
307      channel()->gpu_channel_manager()->MessagesProcessed();
308  if (last_idle_time_.is_null())
309    last_idle_time_ = base::TimeTicks::Now();
310
311  // IsScheduled() returns true after passing all unschedule fences
312  // and this is when we can start performing idle work. Idle work
313  // is done synchronously so we can set delay to 0 and instead poll
314  // for more work at the rate idle work is performed. This also ensures
315  // that idle work is done as efficiently as possible without any
316  // unnecessary delays.
317  if (scheduler_.get() &&
318      scheduler_->IsScheduled() &&
319      scheduler_->HasMoreIdleWork()) {
320    delay = 0;
321  }
322
323  base::MessageLoop::current()->PostDelayedTask(
324      FROM_HERE,
325      base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
326      base::TimeDelta::FromMilliseconds(delay));
327}
328
329void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
330  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
331  Send(new IPC::Message(message));
332}
333
334bool GpuCommandBufferStub::MakeCurrent() {
335  if (decoder_->MakeCurrent())
336    return true;
337  DLOG(ERROR) << "Context lost because MakeCurrent failed.";
338  command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
339  command_buffer_->SetParseError(gpu::error::kLostContext);
340  CheckContextLost();
341  return false;
342}
343
344void GpuCommandBufferStub::Destroy() {
345  if (handle_.is_null() && !active_url_.is_empty()) {
346    GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
347    gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
348        active_url_));
349  }
350
351  memory_manager_client_state_.reset();
352
353  while (!sync_points_.empty())
354    OnRetireSyncPoint(sync_points_.front());
355
356  if (decoder_)
357    decoder_->set_engine(NULL);
358
359  // The scheduler has raw references to the decoder and the command buffer so
360  // destroy it before those.
361  scheduler_.reset();
362
363  bool have_context = false;
364  if (decoder_ && command_buffer_ &&
365      command_buffer_->GetState().error != gpu::error::kLostContext)
366    have_context = decoder_->MakeCurrent();
367  FOR_EACH_OBSERVER(DestructionObserver,
368                    destruction_observers_,
369                    OnWillDestroyStub());
370
371  if (decoder_) {
372    decoder_->Destroy(have_context);
373    decoder_.reset();
374  }
375
376  command_buffer_.reset();
377
378  // Remove this after crbug.com/248395 is sorted out.
379  surface_ = NULL;
380}
381
382void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
383  Destroy();
384  GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, false);
385  Send(reply_message);
386}
387
388void GpuCommandBufferStub::OnInitialize(
389    base::SharedMemoryHandle shared_state_handle,
390    IPC::Message* reply_message) {
391  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
392  DCHECK(!command_buffer_.get());
393
394  scoped_ptr<base::SharedMemory> shared_state_shm(
395      new base::SharedMemory(shared_state_handle, false));
396
397  command_buffer_.reset(new gpu::CommandBufferService(
398      context_group_->transfer_buffer_manager()));
399
400  if (!command_buffer_->Initialize()) {
401    DLOG(ERROR) << "CommandBufferService failed to initialize.\n";
402    OnInitializeFailed(reply_message);
403    return;
404  }
405
406  decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
407
408  scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
409                                         decoder_.get(),
410                                         decoder_.get()));
411  if (preemption_flag_.get())
412    scheduler_->SetPreemptByFlag(preemption_flag_);
413
414  decoder_->set_engine(scheduler_.get());
415
416  if (!handle_.is_null()) {
417#if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
418    if (software_) {
419      DLOG(ERROR) << "No software support.\n";
420      OnInitializeFailed(reply_message);
421      return;
422    }
423#endif
424
425    surface_ = ImageTransportSurface::CreateSurface(
426        channel_->gpu_channel_manager(),
427        this,
428        handle_);
429  } else {
430    GpuChannelManager* manager = channel_->gpu_channel_manager();
431    surface_ = manager->GetDefaultOffscreenSurface();
432  }
433
434  if (!surface_.get()) {
435    DLOG(ERROR) << "Failed to create surface.\n";
436    OnInitializeFailed(reply_message);
437    return;
438  }
439
440  scoped_refptr<gfx::GLContext> context;
441  if ((CommandLine::ForCurrentProcess()->HasSwitch(
442          switches::kEnableVirtualGLContexts) || use_virtualized_gl_context_) &&
443      channel_->share_group()) {
444    context = channel_->share_group()->GetSharedContext();
445    if (!context.get()) {
446      context = gfx::GLContext::CreateGLContext(
447          channel_->share_group(),
448          channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
449          gpu_preference_);
450      channel_->share_group()->SetSharedContext(context.get());
451    }
452    // This should be a non-virtual GL context.
453    DCHECK(context->GetHandle());
454    context = new gpu::GLContextVirtual(
455        channel_->share_group(), context.get(), decoder_->AsWeakPtr());
456    if (!context->Initialize(surface_.get(), gpu_preference_)) {
457      // TODO(sievers): The real context created above for the default
458      // offscreen surface might not be compatible with this surface.
459      // Need to adjust at least GLX to be able to create the initial context
460      // with a config that is compatible with onscreen and offscreen surfaces.
461      context = NULL;
462
463      DLOG(ERROR) << "Failed to initialize virtual GL context.";
464      OnInitializeFailed(reply_message);
465      return;
466    }
467  }
468  if (!context.get()) {
469    context = gfx::GLContext::CreateGLContext(
470        channel_->share_group(), surface_.get(), gpu_preference_);
471  }
472  if (!context.get()) {
473    DLOG(ERROR) << "Failed to create context.\n";
474    OnInitializeFailed(reply_message);
475    return;
476  }
477
478  if (!context->MakeCurrent(surface_.get())) {
479    LOG(ERROR) << "Failed to make context current.";
480    OnInitializeFailed(reply_message);
481    return;
482  }
483
484  if (!context->GetGLStateRestorer()) {
485    context->SetGLStateRestorer(
486        new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
487  }
488
489  if (!context->GetTotalGpuMemory(&total_gpu_memory_))
490    total_gpu_memory_ = 0;
491
492  if (!context_group_->has_program_cache()) {
493    context_group_->set_program_cache(
494        channel_->gpu_channel_manager()->program_cache());
495  }
496
497  // Initialize the decoder with either the view or pbuffer GLContext.
498  if (!decoder_->Initialize(surface_,
499                            context,
500                            !surface_id(),
501                            initial_size_,
502                            disallowed_features_,
503                            allowed_extensions_.c_str(),
504                            requested_attribs_)) {
505    DLOG(ERROR) << "Failed to initialize decoder.";
506    OnInitializeFailed(reply_message);
507    return;
508  }
509
510  if (CommandLine::ForCurrentProcess()->HasSwitch(
511      switches::kEnableGPUServiceLogging)) {
512    decoder_->set_log_commands(true);
513  }
514
515  decoder_->GetLogger()->SetMsgCallback(
516      base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
517                 base::Unretained(this)));
518  decoder_->SetShaderCacheCallback(
519      base::Bind(&GpuCommandBufferStub::SendCachedShader,
520                 base::Unretained(this)));
521  decoder_->SetWaitSyncPointCallback(
522      base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
523                 base::Unretained(this)));
524
525  command_buffer_->SetPutOffsetChangeCallback(
526      base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
527  command_buffer_->SetGetBufferChangeCallback(
528      base::Bind(&gpu::GpuScheduler::SetGetBuffer,
529                 base::Unretained(scheduler_.get())));
530  command_buffer_->SetParseErrorCallback(
531      base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
532  scheduler_->SetSchedulingChangedCallback(
533      base::Bind(&GpuChannel::StubSchedulingChanged,
534                 base::Unretained(channel_)));
535
536  if (watchdog_) {
537    scheduler_->SetCommandProcessedCallback(
538        base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
539                   base::Unretained(this)));
540  }
541
542  if (!command_buffer_->SetSharedStateBuffer(shared_state_shm.Pass())) {
543    DLOG(ERROR) << "Failed to map shared stae buffer.";
544    OnInitializeFailed(reply_message);
545    return;
546  }
547
548  GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, true);
549  Send(reply_message);
550
551  if (handle_.is_null() && !active_url_.is_empty()) {
552    GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
553    gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
554        active_url_));
555  }
556}
557
558void GpuCommandBufferStub::OnSetLatencyInfo(
559    const ui::LatencyInfo& latency_info) {
560  if (!latency_info_callback_.is_null())
561    latency_info_callback_.Run(latency_info);
562}
563
564void GpuCommandBufferStub::SetLatencyInfoCallback(
565    const LatencyInfoCallback& callback) {
566  latency_info_callback_ = callback;
567}
568
569void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
570                                          IPC::Message* reply_message) {
571  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
572  if (command_buffer_)
573    command_buffer_->SetGetBuffer(shm_id);
574  Send(reply_message);
575}
576
577void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
578  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
579  if (!decoder_)
580    LOG(ERROR) << "Can't produce front buffer before initialization.";
581
582  if (!decoder_->ProduceFrontBuffer(mailbox))
583    LOG(ERROR) << "Failed to produce front buffer.";
584}
585
586void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) {
587  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetState");
588  if (command_buffer_) {
589    gpu::CommandBuffer::State state = command_buffer_->GetState();
590    CheckContextLost();
591    GpuCommandBufferMsg_GetState::WriteReplyParams(reply_message, state);
592  } else {
593    DLOG(ERROR) << "no command_buffer.";
594    reply_message->set_reply_error();
595  }
596  Send(reply_message);
597}
598
599void GpuCommandBufferStub::OnParseError() {
600  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
601  DCHECK(command_buffer_.get());
602  gpu::CommandBuffer::State state = command_buffer_->GetState();
603  IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
604      route_id_, state.context_lost_reason);
605  msg->set_unblock(true);
606  Send(msg);
607
608  // Tell the browser about this context loss as well, so it can
609  // determine whether client APIs like WebGL need to be immediately
610  // blocked from automatically running.
611  GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
612  gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
613      handle_.is_null(), state.context_lost_reason, active_url_));
614
615  CheckContextLost();
616}
617
618void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) {
619  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast");
620  DCHECK(command_buffer_.get());
621  CheckContextLost();
622  gpu::CommandBuffer::State state = command_buffer_->GetState();
623  GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state);
624  Send(reply_message);
625}
626
627void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset,
628                                        uint32 flush_count) {
629  TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush",
630               "put_offset", put_offset);
631  DCHECK(command_buffer_.get());
632  if (flush_count - last_flush_count_ < 0x8000000U) {
633    last_flush_count_ = flush_count;
634    command_buffer_->Flush(put_offset);
635  } else {
636    // We received this message out-of-order. This should not happen but is here
637    // to catch regressions. Ignore the message.
638    NOTREACHED() << "Received a Flush message out-of-order";
639  }
640
641  ReportState();
642}
643
644void GpuCommandBufferStub::OnRescheduled() {
645  gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
646  command_buffer_->Flush(pre_state.put_offset);
647  gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
648
649  if (pre_state.get_offset != post_state.get_offset)
650    ReportState();
651}
652
653void GpuCommandBufferStub::OnRegisterTransferBuffer(
654    int32 id,
655    base::SharedMemoryHandle transfer_buffer,
656    uint32 size) {
657  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
658  base::SharedMemory shared_memory(transfer_buffer, false);
659
660  if (command_buffer_)
661    command_buffer_->RegisterTransferBuffer(id, &shared_memory, size);
662}
663
664void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
665  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
666
667  if (command_buffer_)
668    command_buffer_->DestroyTransferBuffer(id);
669}
670
671void GpuCommandBufferStub::OnGetTransferBuffer(
672    int32 id,
673    IPC::Message* reply_message) {
674  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetTransferBuffer");
675  if (command_buffer_) {
676    base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle();
677    uint32 size = 0;
678
679    gpu::Buffer buffer = command_buffer_->GetTransferBuffer(id);
680    if (buffer.shared_memory) {
681#if defined(OS_WIN)
682      transfer_buffer = NULL;
683      BrokerDuplicateHandle(buffer.shared_memory->handle(),
684          channel_->renderer_pid(), &transfer_buffer, FILE_MAP_READ |
685          FILE_MAP_WRITE, 0);
686      DCHECK(transfer_buffer != NULL);
687#else
688      buffer.shared_memory->ShareToProcess(channel_->renderer_pid(),
689                                           &transfer_buffer);
690#endif
691      size = buffer.size;
692    }
693
694    GpuCommandBufferMsg_GetTransferBuffer::WriteReplyParams(reply_message,
695                                                            transfer_buffer,
696                                                            size);
697  } else {
698    reply_message->set_reply_error();
699  }
700  Send(reply_message);
701}
702
703void GpuCommandBufferStub::OnCommandProcessed() {
704  if (watchdog_)
705    watchdog_->CheckArmed();
706}
707
708void GpuCommandBufferStub::ReportState() {
709  if (!CheckContextLost())
710    command_buffer_->UpdateState();
711}
712
713void GpuCommandBufferStub::PutChanged() {
714  FastSetActiveURL(active_url_, active_url_hash_);
715  scheduler_->PutChanged();
716}
717
718void GpuCommandBufferStub::OnCreateVideoDecoder(
719    media::VideoCodecProfile profile,
720    IPC::Message* reply_message) {
721  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
722  int decoder_route_id = channel_->GenerateRouteID();
723  GpuVideoDecodeAccelerator* decoder =
724      new GpuVideoDecodeAccelerator(decoder_route_id, this);
725  decoder->Initialize(profile, reply_message);
726  // decoder is registered as a DestructionObserver of this stub and will
727  // self-delete during destruction of this stub.
728}
729
730void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
731  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
732  if (memory_manager_client_state_)
733    memory_manager_client_state_->SetVisible(visible);
734}
735
736void GpuCommandBufferStub::OnDiscardBackbuffer() {
737  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDiscardBackbuffer");
738  if (!surface_.get())
739    return;
740  if (surface_->DeferDraws()) {
741    DCHECK(!IsScheduled());
742    channel_->RequeueMessage();
743  } else {
744    if (!surface_->SetBackbufferAllocation(false))
745      channel_->DestroySoon();
746  }
747}
748
749void GpuCommandBufferStub::OnEnsureBackbuffer() {
750  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEnsureBackbuffer");
751  if (!surface_.get())
752    return;
753  if (surface_->DeferDraws()) {
754    DCHECK(!IsScheduled());
755    channel_->RequeueMessage();
756  } else {
757    if (!surface_->SetBackbufferAllocation(true))
758      channel_->DestroySoon();
759  }
760}
761
762void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
763  sync_points_.push_back(sync_point);
764}
765
766void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
767  DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
768  sync_points_.pop_front();
769  GpuChannelManager* manager = channel_->gpu_channel_manager();
770  manager->sync_point_manager()->RetireSyncPoint(sync_point);
771}
772
773bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
774  if (sync_point_wait_count_ == 0) {
775    TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
776                             "GpuCommandBufferStub", this);
777  }
778  scheduler_->SetScheduled(false);
779  ++sync_point_wait_count_;
780  GpuChannelManager* manager = channel_->gpu_channel_manager();
781  manager->sync_point_manager()->AddSyncPointCallback(
782      sync_point,
783      base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
784                 this->AsWeakPtr()));
785  return scheduler_->IsScheduled();
786}
787
788void GpuCommandBufferStub::OnSyncPointRetired() {
789  --sync_point_wait_count_;
790  if (sync_point_wait_count_ == 0) {
791    TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
792                           "GpuCommandBufferStub", this);
793  }
794  scheduler_->SetScheduled(true);
795}
796
797void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
798  GpuChannelManager* manager = channel_->gpu_channel_manager();
799  manager->sync_point_manager()->AddSyncPointCallback(
800      sync_point,
801      base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
802                 this->AsWeakPtr(),
803                 id));
804}
805
806void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
807  Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
808}
809
810void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
811  if (decoder_) {
812    gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
813    if (query_manager) {
814      gpu::gles2::QueryManager::Query* query =
815          query_manager->GetQuery(query_id);
816      if (query) {
817        query->AddCallback(
818          base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
819                     this->AsWeakPtr(),
820                     id));
821        return;
822      }
823    }
824  }
825  // Something went wrong, run callback immediately.
826  OnSignalSyncPointAck(id);
827}
828
829
830void GpuCommandBufferStub::OnReceivedClientManagedMemoryStats(
831    const GpuManagedMemoryStats& stats) {
832  TRACE_EVENT0(
833      "gpu",
834      "GpuCommandBufferStub::OnReceivedClientManagedMemoryStats");
835  if (memory_manager_client_state_)
836    memory_manager_client_state_->SetManagedMemoryStats(stats);
837}
838
839void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
840    bool has_callback) {
841  TRACE_EVENT0(
842      "gpu",
843      "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
844  if (has_callback) {
845    if (!memory_manager_client_state_) {
846      memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
847          this, surface_id_ != 0, true));
848    }
849  } else {
850    memory_manager_client_state_.reset();
851  }
852}
853
854void GpuCommandBufferStub::SendConsoleMessage(
855    int32 id,
856    const std::string& message) {
857  GPUCommandBufferConsoleMessage console_message;
858  console_message.id = id;
859  console_message.message = message;
860  IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
861      route_id_, console_message);
862  msg->set_unblock(true);
863  Send(msg);
864}
865
866void GpuCommandBufferStub::SendCachedShader(
867    const std::string& key, const std::string& shader) {
868  channel_->CacheShader(key, shader);
869}
870
871void GpuCommandBufferStub::AddDestructionObserver(
872    DestructionObserver* observer) {
873  destruction_observers_.AddObserver(observer);
874}
875
876void GpuCommandBufferStub::RemoveDestructionObserver(
877    DestructionObserver* observer) {
878  destruction_observers_.RemoveObserver(observer);
879}
880
881void GpuCommandBufferStub::SetPreemptByFlag(
882    scoped_refptr<gpu::PreemptionFlag> flag) {
883  preemption_flag_ = flag;
884  if (scheduler_)
885    scheduler_->SetPreemptByFlag(preemption_flag_);
886}
887
888bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
889  *bytes = total_gpu_memory_;
890  return !!total_gpu_memory_;
891}
892
893gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
894  if (!surface_.get())
895    return gfx::Size();
896  return surface_->GetSize();
897}
898
899gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
900  return context_group_->memory_tracker();
901}
902
903void GpuCommandBufferStub::SetMemoryAllocation(
904    const GpuMemoryAllocation& allocation) {
905  if (!last_memory_allocation_valid_ ||
906      !allocation.renderer_allocation.Equals(
907          last_memory_allocation_.renderer_allocation)) {
908    Send(new GpuCommandBufferMsg_SetMemoryAllocation(
909        route_id_, allocation.renderer_allocation));
910  }
911
912  if (!last_memory_allocation_valid_ ||
913      !allocation.browser_allocation.Equals(
914          last_memory_allocation_.browser_allocation)) {
915    // This can be called outside of OnMessageReceived, so the context needs
916    // to be made current before calling methods on the surface.
917    if (surface_.get() && MakeCurrent())
918      surface_->SetFrontbufferAllocation(
919          allocation.browser_allocation.suggest_have_frontbuffer);
920  }
921
922  last_memory_allocation_valid_ = true;
923  last_memory_allocation_ = allocation;
924}
925
926bool GpuCommandBufferStub::CheckContextLost() {
927  DCHECK(command_buffer_);
928  gpu::CommandBuffer::State state = command_buffer_->GetState();
929  bool was_lost = state.error == gpu::error::kLostContext;
930  // Lose all other contexts if the reset was triggered by the robustness
931  // extension instead of being synthetic.
932  if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
933      (gfx::GLContext::LosesAllContextsOnContextLost() ||
934       use_virtualized_gl_context_))
935    channel_->LoseAllContexts();
936  return was_lost;
937}
938
939void GpuCommandBufferStub::MarkContextLost() {
940  if (!command_buffer_ ||
941      command_buffer_->GetState().error == gpu::error::kLostContext)
942    return;
943
944  command_buffer_->SetContextLostReason(gpu::error::kUnknown);
945  if (decoder_)
946    decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
947  command_buffer_->SetParseError(gpu::error::kLostContext);
948}
949
950}  // namespace content
951