1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/bind.h"
6#include "base/bind_helpers.h"
7#include "base/command_line.h"
8#include "base/debug/trace_event.h"
9#include "base/hash.h"
10#include "base/json/json_writer.h"
11#include "base/memory/shared_memory.h"
12#include "base/time/time.h"
13#include "build/build_config.h"
14#include "content/common/gpu/devtools_gpu_instrumentation.h"
15#include "content/common/gpu/gpu_channel.h"
16#include "content/common/gpu/gpu_channel_manager.h"
17#include "content/common/gpu/gpu_command_buffer_stub.h"
18#include "content/common/gpu/gpu_memory_buffer_factory.h"
19#include "content/common/gpu/gpu_memory_manager.h"
20#include "content/common/gpu/gpu_memory_tracking.h"
21#include "content/common/gpu/gpu_messages.h"
22#include "content/common/gpu/gpu_watchdog.h"
23#include "content/common/gpu/image_transport_surface.h"
24#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
25#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
26#include "content/common/gpu/sync_point_manager.h"
27#include "content/public/common/content_client.h"
28#include "gpu/command_buffer/common/constants.h"
29#include "gpu/command_buffer/common/gles2_cmd_utils.h"
30#include "gpu/command_buffer/common/mailbox.h"
31#include "gpu/command_buffer/service/gl_context_virtual.h"
32#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
33#include "gpu/command_buffer/service/image_manager.h"
34#include "gpu/command_buffer/service/logger.h"
35#include "gpu/command_buffer/service/mailbox_manager.h"
36#include "gpu/command_buffer/service/memory_tracking.h"
37#include "gpu/command_buffer/service/query_manager.h"
38#include "ui/gl/gl_bindings.h"
39#include "ui/gl/gl_switches.h"
40
41#if defined(OS_WIN)
42#include "content/public/common/sandbox_init.h"
43#endif
44
45#if defined(OS_ANDROID)
46#include "content/common/gpu/stream_texture_android.h"
47#endif
48
49namespace content {
50struct WaitForCommandState {
51  WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
52      : start(start), end(end), reply(reply) {}
53
54  int32 start;
55  int32 end;
56  scoped_ptr<IPC::Message> reply;
57};
58
59namespace {
60
61// The GpuCommandBufferMemoryTracker class provides a bridge between the
62// ContextGroup's memory type managers and the GpuMemoryManager class.
63class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
64 public:
65  explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
66      tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
67          CreateTrackingGroup(channel->renderer_pid(), this)) {
68  }
69
70  virtual void TrackMemoryAllocatedChange(
71      size_t old_size,
72      size_t new_size,
73      gpu::gles2::MemoryTracker::Pool pool) OVERRIDE {
74    tracking_group_->TrackMemoryAllocatedChange(
75        old_size, new_size, pool);
76  }
77
78  virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
79    return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
80  };
81
82 private:
83  virtual ~GpuCommandBufferMemoryTracker() {
84  }
85  scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
86
87  DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
88};
89
90// FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
91// url_hash matches.
92void FastSetActiveURL(const GURL& url, size_t url_hash) {
93  // Leave the previously set URL in the empty case -- empty URLs are given by
94  // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
95  // onscreen context URL was set previously and will show up even when a crash
96  // occurs during offscreen command processing.
97  if (url.is_empty())
98    return;
99  static size_t g_last_url_hash = 0;
100  if (url_hash != g_last_url_hash) {
101    g_last_url_hash = url_hash;
102    GetContentClient()->SetActiveURL(url);
103  }
104}
105
106// The first time polling a fence, delay some extra time to allow other
107// stubs to process some work, or else the timing of the fences could
108// allow a pattern of alternating fast and slow frames to occur.
109const int64 kHandleMoreWorkPeriodMs = 2;
110const int64 kHandleMoreWorkPeriodBusyMs = 1;
111
112// Prevents idle work from being starved.
113const int64 kMaxTimeSinceIdleMs = 10;
114
115class DevToolsChannelData : public base::debug::ConvertableToTraceFormat {
116 public:
117  static scoped_refptr<base::debug::ConvertableToTraceFormat> CreateForChannel(
118      GpuChannel* channel);
119
120  virtual void AppendAsTraceFormat(std::string* out) const OVERRIDE {
121    std::string tmp;
122    base::JSONWriter::Write(value_.get(), &tmp);
123    *out += tmp;
124  }
125
126 private:
127  explicit DevToolsChannelData(base::Value* value) : value_(value) {}
128  virtual ~DevToolsChannelData() {}
129  scoped_ptr<base::Value> value_;
130  DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
131};
132
133scoped_refptr<base::debug::ConvertableToTraceFormat>
134DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
135  scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
136  res->SetInteger("renderer_pid", channel->renderer_pid());
137  res->SetDouble("used_bytes", channel->GetMemoryUsage());
138  res->SetDouble("limit_bytes",
139                 channel->gpu_channel_manager()
140                     ->gpu_memory_manager()
141                     ->GetMaximumClientAllocation());
142  return new DevToolsChannelData(res.release());
143}
144
145}  // namespace
146
147GpuCommandBufferStub::GpuCommandBufferStub(
148    GpuChannel* channel,
149    GpuCommandBufferStub* share_group,
150    const gfx::GLSurfaceHandle& handle,
151    gpu::gles2::MailboxManager* mailbox_manager,
152    const gfx::Size& size,
153    const gpu::gles2::DisallowedFeatures& disallowed_features,
154    const std::vector<int32>& attribs,
155    gfx::GpuPreference gpu_preference,
156    bool use_virtualized_gl_context,
157    int32 route_id,
158    int32 surface_id,
159    GpuWatchdog* watchdog,
160    bool software,
161    const GURL& active_url)
162    : channel_(channel),
163      handle_(handle),
164      initial_size_(size),
165      disallowed_features_(disallowed_features),
166      requested_attribs_(attribs),
167      gpu_preference_(gpu_preference),
168      use_virtualized_gl_context_(use_virtualized_gl_context),
169      route_id_(route_id),
170      surface_id_(surface_id),
171      software_(software),
172      last_flush_count_(0),
173      last_memory_allocation_valid_(false),
174      watchdog_(watchdog),
175      sync_point_wait_count_(0),
176      delayed_work_scheduled_(false),
177      previous_messages_processed_(0),
178      active_url_(active_url),
179      total_gpu_memory_(0) {
180  active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
181  FastSetActiveURL(active_url_, active_url_hash_);
182
183  gpu::gles2::ContextCreationAttribHelper attrib_parser;
184  attrib_parser.Parse(requested_attribs_);
185
186  if (share_group) {
187    context_group_ = share_group->context_group_;
188    DCHECK(context_group_->bind_generates_resource() ==
189           attrib_parser.bind_generates_resource);
190  } else {
191    context_group_ = new gpu::gles2::ContextGroup(
192        mailbox_manager,
193        new GpuCommandBufferMemoryTracker(channel),
194        channel_->gpu_channel_manager()->shader_translator_cache(),
195        NULL,
196        attrib_parser.bind_generates_resource);
197  }
198
199  use_virtualized_gl_context_ |=
200      context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
201}
202
203GpuCommandBufferStub::~GpuCommandBufferStub() {
204  Destroy();
205
206  GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
207  gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
208}
209
210GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
211    return channel()->gpu_channel_manager()->gpu_memory_manager();
212}
213
214bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
215  TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
216               "GPUTask",
217               "data",
218               DevToolsChannelData::CreateForChannel(channel()));
219  // TODO(yurys): remove devtools_gpu_instrumentation call once DevTools
220  // Timeline migrates to tracing crbug.com/361045.
221  devtools_gpu_instrumentation::ScopedGpuTask task(channel());
222  FastSetActiveURL(active_url_, active_url_hash_);
223
224  bool have_context = false;
225  // Ensure the appropriate GL context is current before handling any IPC
226  // messages directed at the command buffer. This ensures that the message
227  // handler can assume that the context is current (not necessary for
228  // Echo, RetireSyncPoint, or WaitSyncPoint).
229  if (decoder_.get() && message.type() != GpuCommandBufferMsg_Echo::ID &&
230      message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
231      message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
232      message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID) {
233    if (!MakeCurrent())
234      return false;
235    have_context = true;
236  }
237
238  // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
239  // here. This is so the reply can be delayed if the scheduler is unscheduled.
240  bool handled = true;
241  IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
242    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
243                                    OnInitialize);
244    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
245                                    OnSetGetBuffer);
246    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
247                        OnProduceFrontBuffer);
248    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
249    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
250                                    OnWaitForTokenInRange);
251    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
252                                    OnWaitForGetOffsetInRange);
253    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
254    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
255    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
256                        OnRegisterTransferBuffer);
257    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
258                        OnDestroyTransferBuffer);
259    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
260                                    OnCreateVideoDecoder)
261    IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
262                                    OnCreateVideoEncoder)
263    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
264                        OnSetSurfaceVisible)
265    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
266                        OnRetireSyncPoint)
267    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
268                        OnSignalSyncPoint)
269    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
270                        OnSignalQuery)
271    IPC_MESSAGE_HANDLER(
272        GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
273        OnSetClientHasMemoryAllocationChangedCallback)
274    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterGpuMemoryBuffer,
275                        OnRegisterGpuMemoryBuffer);
276    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UnregisterGpuMemoryBuffer,
277                        OnUnregisterGpuMemoryBuffer);
278    IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
279                        OnCreateStreamTexture)
280    IPC_MESSAGE_UNHANDLED(handled = false)
281  IPC_END_MESSAGE_MAP()
282
283  CheckCompleteWaits();
284
285  if (have_context) {
286    // Ensure that any delayed work that was created will be handled.
287    ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
288  }
289
290  DCHECK(handled);
291  return handled;
292}
293
294bool GpuCommandBufferStub::Send(IPC::Message* message) {
295  return channel_->Send(message);
296}
297
298bool GpuCommandBufferStub::IsScheduled() {
299  return (!scheduler_.get() || scheduler_->IsScheduled());
300}
301
302bool GpuCommandBufferStub::HasMoreWork() {
303  return scheduler_.get() && scheduler_->HasMoreWork();
304}
305
306void GpuCommandBufferStub::PollWork() {
307  TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
308  delayed_work_scheduled_ = false;
309  FastSetActiveURL(active_url_, active_url_hash_);
310  if (decoder_.get() && !MakeCurrent())
311    return;
312
313  if (scheduler_) {
314    bool fences_complete = scheduler_->PollUnscheduleFences();
315    // Perform idle work if all fences are complete.
316    if (fences_complete) {
317      uint64 current_messages_processed =
318          channel()->gpu_channel_manager()->MessagesProcessed();
319      // We're idle when no messages were processed or scheduled.
320      bool is_idle =
321          (previous_messages_processed_ == current_messages_processed) &&
322          !channel()->gpu_channel_manager()->HandleMessagesScheduled();
323      if (!is_idle && !last_idle_time_.is_null()) {
324        base::TimeDelta time_since_idle = base::TimeTicks::Now() -
325            last_idle_time_;
326        base::TimeDelta max_time_since_idle =
327            base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
328
329        // Force idle when it's been too long since last time we were idle.
330        if (time_since_idle > max_time_since_idle)
331          is_idle = true;
332      }
333
334      if (is_idle) {
335        last_idle_time_ = base::TimeTicks::Now();
336        scheduler_->PerformIdleWork();
337      }
338    }
339  }
340  ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
341}
342
343bool GpuCommandBufferStub::HasUnprocessedCommands() {
344  if (command_buffer_) {
345    gpu::CommandBuffer::State state = command_buffer_->GetLastState();
346    return state.put_offset != state.get_offset &&
347        !gpu::error::IsError(state.error);
348  }
349  return false;
350}
351
352void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
353  if (!HasMoreWork()) {
354    last_idle_time_ = base::TimeTicks();
355    return;
356  }
357
358  if (delayed_work_scheduled_)
359    return;
360  delayed_work_scheduled_ = true;
361
362  // Idle when no messages are processed between now and when
363  // PollWork is called.
364  previous_messages_processed_ =
365      channel()->gpu_channel_manager()->MessagesProcessed();
366  if (last_idle_time_.is_null())
367    last_idle_time_ = base::TimeTicks::Now();
368
369  // IsScheduled() returns true after passing all unschedule fences
370  // and this is when we can start performing idle work. Idle work
371  // is done synchronously so we can set delay to 0 and instead poll
372  // for more work at the rate idle work is performed. This also ensures
373  // that idle work is done as efficiently as possible without any
374  // unnecessary delays.
375  if (scheduler_.get() &&
376      scheduler_->IsScheduled() &&
377      scheduler_->HasMoreIdleWork()) {
378    delay = 0;
379  }
380
381  base::MessageLoop::current()->PostDelayedTask(
382      FROM_HERE,
383      base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
384      base::TimeDelta::FromMilliseconds(delay));
385}
386
387void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
388  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
389  Send(new IPC::Message(message));
390}
391
392bool GpuCommandBufferStub::MakeCurrent() {
393  if (decoder_->MakeCurrent())
394    return true;
395  DLOG(ERROR) << "Context lost because MakeCurrent failed.";
396  command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
397  command_buffer_->SetParseError(gpu::error::kLostContext);
398  CheckContextLost();
399  return false;
400}
401
402void GpuCommandBufferStub::Destroy() {
403  if (wait_for_token_) {
404    Send(wait_for_token_->reply.release());
405    wait_for_token_.reset();
406  }
407  if (wait_for_get_offset_) {
408    Send(wait_for_get_offset_->reply.release());
409    wait_for_get_offset_.reset();
410  }
411  if (handle_.is_null() && !active_url_.is_empty()) {
412    GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
413    gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
414        active_url_));
415  }
416
417  memory_manager_client_state_.reset();
418
419  while (!sync_points_.empty())
420    OnRetireSyncPoint(sync_points_.front());
421
422  if (decoder_)
423    decoder_->set_engine(NULL);
424
425  // The scheduler has raw references to the decoder and the command buffer so
426  // destroy it before those.
427  scheduler_.reset();
428
429  bool have_context = false;
430  if (decoder_ && command_buffer_ &&
431      command_buffer_->GetLastState().error != gpu::error::kLostContext)
432    have_context = decoder_->MakeCurrent();
433  FOR_EACH_OBSERVER(DestructionObserver,
434                    destruction_observers_,
435                    OnWillDestroyStub());
436
437  if (decoder_) {
438    decoder_->Destroy(have_context);
439    decoder_.reset();
440  }
441
442  command_buffer_.reset();
443
444  // Remove this after crbug.com/248395 is sorted out.
445  surface_ = NULL;
446}
447
448void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
449  Destroy();
450  GpuCommandBufferMsg_Initialize::WriteReplyParams(
451      reply_message, false, gpu::Capabilities());
452  Send(reply_message);
453}
454
455void GpuCommandBufferStub::OnInitialize(
456    base::SharedMemoryHandle shared_state_handle,
457    IPC::Message* reply_message) {
458  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
459  DCHECK(!command_buffer_.get());
460
461  scoped_ptr<base::SharedMemory> shared_state_shm(
462      new base::SharedMemory(shared_state_handle, false));
463
464  command_buffer_.reset(new gpu::CommandBufferService(
465      context_group_->transfer_buffer_manager()));
466
467  bool result = command_buffer_->Initialize();
468  DCHECK(result);
469
470  decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
471
472  scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
473                                         decoder_.get(),
474                                         decoder_.get()));
475  if (preemption_flag_.get())
476    scheduler_->SetPreemptByFlag(preemption_flag_);
477
478  decoder_->set_engine(scheduler_.get());
479
480  if (!handle_.is_null()) {
481#if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
482    if (software_) {
483      LOG(ERROR) << "No software support.";
484      OnInitializeFailed(reply_message);
485      return;
486    }
487#endif
488
489    surface_ = ImageTransportSurface::CreateSurface(
490        channel_->gpu_channel_manager(),
491        this,
492        handle_);
493  } else {
494    GpuChannelManager* manager = channel_->gpu_channel_manager();
495    surface_ = manager->GetDefaultOffscreenSurface();
496  }
497
498  if (!surface_.get()) {
499    DLOG(ERROR) << "Failed to create surface.";
500    OnInitializeFailed(reply_message);
501    return;
502  }
503
504  scoped_refptr<gfx::GLContext> context;
505  if (use_virtualized_gl_context_ && channel_->share_group()) {
506    context = channel_->share_group()->GetSharedContext();
507    if (!context.get()) {
508      context = gfx::GLContext::CreateGLContext(
509          channel_->share_group(),
510          channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
511          gpu_preference_);
512      if (!context.get()) {
513        DLOG(ERROR) << "Failed to create shared context for virtualization.";
514        OnInitializeFailed(reply_message);
515        return;
516      }
517      channel_->share_group()->SetSharedContext(context.get());
518    }
519    // This should be a non-virtual GL context.
520    DCHECK(context->GetHandle());
521    context = new gpu::GLContextVirtual(
522        channel_->share_group(), context.get(), decoder_->AsWeakPtr());
523    if (!context->Initialize(surface_.get(), gpu_preference_)) {
524      // TODO(sievers): The real context created above for the default
525      // offscreen surface might not be compatible with this surface.
526      // Need to adjust at least GLX to be able to create the initial context
527      // with a config that is compatible with onscreen and offscreen surfaces.
528      context = NULL;
529
530      DLOG(ERROR) << "Failed to initialize virtual GL context.";
531      OnInitializeFailed(reply_message);
532      return;
533    }
534  }
535  if (!context.get()) {
536    context = gfx::GLContext::CreateGLContext(
537        channel_->share_group(), surface_.get(), gpu_preference_);
538  }
539  if (!context.get()) {
540    DLOG(ERROR) << "Failed to create context.";
541    OnInitializeFailed(reply_message);
542    return;
543  }
544
545  if (!context->MakeCurrent(surface_.get())) {
546    LOG(ERROR) << "Failed to make context current.";
547    OnInitializeFailed(reply_message);
548    return;
549  }
550
551  if (!context->GetGLStateRestorer()) {
552    context->SetGLStateRestorer(
553        new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
554  }
555
556  if (!context->GetTotalGpuMemory(&total_gpu_memory_))
557    total_gpu_memory_ = 0;
558
559  if (!context_group_->has_program_cache()) {
560    context_group_->set_program_cache(
561        channel_->gpu_channel_manager()->program_cache());
562  }
563
564  // Initialize the decoder with either the view or pbuffer GLContext.
565  if (!decoder_->Initialize(surface_,
566                            context,
567                            !surface_id(),
568                            initial_size_,
569                            disallowed_features_,
570                            requested_attribs_)) {
571    DLOG(ERROR) << "Failed to initialize decoder.";
572    OnInitializeFailed(reply_message);
573    return;
574  }
575
576  if (CommandLine::ForCurrentProcess()->HasSwitch(
577      switches::kEnableGPUServiceLogging)) {
578    decoder_->set_log_commands(true);
579  }
580
581  decoder_->GetLogger()->SetMsgCallback(
582      base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
583                 base::Unretained(this)));
584  decoder_->SetShaderCacheCallback(
585      base::Bind(&GpuCommandBufferStub::SendCachedShader,
586                 base::Unretained(this)));
587  decoder_->SetWaitSyncPointCallback(
588      base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
589                 base::Unretained(this)));
590
591  command_buffer_->SetPutOffsetChangeCallback(
592      base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
593  command_buffer_->SetGetBufferChangeCallback(
594      base::Bind(&gpu::GpuScheduler::SetGetBuffer,
595                 base::Unretained(scheduler_.get())));
596  command_buffer_->SetParseErrorCallback(
597      base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
598  scheduler_->SetSchedulingChangedCallback(
599      base::Bind(&GpuChannel::StubSchedulingChanged,
600                 base::Unretained(channel_)));
601
602  if (watchdog_) {
603    scheduler_->SetCommandProcessedCallback(
604        base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
605                   base::Unretained(this)));
606  }
607
608  const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
609  if (!shared_state_shm->Map(kSharedStateSize)) {
610    DLOG(ERROR) << "Failed to map shared state buffer.";
611    OnInitializeFailed(reply_message);
612    return;
613  }
614  command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
615      shared_state_shm.Pass(), kSharedStateSize));
616
617  gpu::Capabilities capabilities = decoder_->GetCapabilities();
618  capabilities.future_sync_points = channel_->allow_future_sync_points();
619
620  GpuCommandBufferMsg_Initialize::WriteReplyParams(
621      reply_message, true, capabilities);
622  Send(reply_message);
623
624  if (handle_.is_null() && !active_url_.is_empty()) {
625    GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
626    gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
627        active_url_));
628  }
629}
630
631void GpuCommandBufferStub::OnCreateStreamTexture(
632    uint32 texture_id, int32 stream_id, bool* succeeded) {
633#if defined(OS_ANDROID)
634  *succeeded = StreamTexture::Create(this, texture_id, stream_id);
635#else
636  *succeeded = false;
637#endif
638}
639
640void GpuCommandBufferStub::SetLatencyInfoCallback(
641    const LatencyInfoCallback& callback) {
642  latency_info_callback_ = callback;
643}
644
645int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
646  // The command buffer is pairs of enum, value
647  // search for the requested attribute, return the value.
648  for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
649       it != requested_attribs_.end(); ++it) {
650    if (*it++ == attr) {
651      return *it;
652    }
653  }
654  return -1;
655}
656
657void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
658                                          IPC::Message* reply_message) {
659  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
660  if (command_buffer_)
661    command_buffer_->SetGetBuffer(shm_id);
662  Send(reply_message);
663}
664
665void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
666  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
667  if (!decoder_) {
668    LOG(ERROR) << "Can't produce front buffer before initialization.";
669    return;
670  }
671
672  decoder_->ProduceFrontBuffer(mailbox);
673}
674
675void GpuCommandBufferStub::OnParseError() {
676  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
677  DCHECK(command_buffer_.get());
678  gpu::CommandBuffer::State state = command_buffer_->GetLastState();
679  IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
680      route_id_, state.context_lost_reason);
681  msg->set_unblock(true);
682  Send(msg);
683
684  // Tell the browser about this context loss as well, so it can
685  // determine whether client APIs like WebGL need to be immediately
686  // blocked from automatically running.
687  GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
688  gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
689      handle_.is_null(), state.context_lost_reason, active_url_));
690
691  CheckContextLost();
692}
693
694void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
695                                                 int32 end,
696                                                 IPC::Message* reply_message) {
697  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
698  DCHECK(command_buffer_.get());
699  CheckContextLost();
700  if (wait_for_token_)
701    LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
702  wait_for_token_ =
703      make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
704  CheckCompleteWaits();
705}
706
707void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
708    int32 start,
709    int32 end,
710    IPC::Message* reply_message) {
711  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
712  DCHECK(command_buffer_.get());
713  CheckContextLost();
714  if (wait_for_get_offset_) {
715    LOG(ERROR)
716        << "Got WaitForGetOffset command while currently waiting for offset.";
717  }
718  wait_for_get_offset_ =
719      make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
720  CheckCompleteWaits();
721}
722
723void GpuCommandBufferStub::CheckCompleteWaits() {
724  if (wait_for_token_ || wait_for_get_offset_) {
725    gpu::CommandBuffer::State state = command_buffer_->GetLastState();
726    if (wait_for_token_ &&
727        (gpu::CommandBuffer::InRange(
728             wait_for_token_->start, wait_for_token_->end, state.token) ||
729         state.error != gpu::error::kNoError)) {
730      ReportState();
731      GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
732          wait_for_token_->reply.get(), state);
733      Send(wait_for_token_->reply.release());
734      wait_for_token_.reset();
735    }
736    if (wait_for_get_offset_ &&
737        (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
738                                     wait_for_get_offset_->end,
739                                     state.get_offset) ||
740         state.error != gpu::error::kNoError)) {
741      ReportState();
742      GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
743          wait_for_get_offset_->reply.get(), state);
744      Send(wait_for_get_offset_->reply.release());
745      wait_for_get_offset_.reset();
746    }
747  }
748}
749
750void GpuCommandBufferStub::OnAsyncFlush(
751    int32 put_offset,
752    uint32 flush_count,
753    const std::vector<ui::LatencyInfo>& latency_info) {
754  TRACE_EVENT1(
755      "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
756
757  if (ui::LatencyInfo::Verify(latency_info,
758                              "GpuCommandBufferStub::OnAsyncFlush") &&
759      !latency_info_callback_.is_null()) {
760    latency_info_callback_.Run(latency_info);
761  }
762  DCHECK(command_buffer_.get());
763  if (flush_count - last_flush_count_ < 0x8000000U) {
764    last_flush_count_ = flush_count;
765    command_buffer_->Flush(put_offset);
766  } else {
767    // We received this message out-of-order. This should not happen but is here
768    // to catch regressions. Ignore the message.
769    NOTREACHED() << "Received a Flush message out-of-order";
770  }
771
772  ReportState();
773}
774
775void GpuCommandBufferStub::OnRescheduled() {
776  gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
777  command_buffer_->Flush(pre_state.put_offset);
778  gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
779
780  if (pre_state.get_offset != post_state.get_offset)
781    ReportState();
782}
783
784void GpuCommandBufferStub::OnRegisterTransferBuffer(
785    int32 id,
786    base::SharedMemoryHandle transfer_buffer,
787    uint32 size) {
788  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
789
790  // Take ownership of the memory and map it into this process.
791  // This validates the size.
792  scoped_ptr<base::SharedMemory> shared_memory(
793      new base::SharedMemory(transfer_buffer, false));
794  if (!shared_memory->Map(size)) {
795    DVLOG(0) << "Failed to map shared memory.";
796    return;
797  }
798
799  if (command_buffer_) {
800    command_buffer_->RegisterTransferBuffer(
801        id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
802  }
803}
804
805void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
806  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
807
808  if (command_buffer_)
809    command_buffer_->DestroyTransferBuffer(id);
810}
811
812void GpuCommandBufferStub::OnCommandProcessed() {
813  if (watchdog_)
814    watchdog_->CheckArmed();
815}
816
817void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
818
819void GpuCommandBufferStub::PutChanged() {
820  FastSetActiveURL(active_url_, active_url_hash_);
821  scheduler_->PutChanged();
822}
823
824void GpuCommandBufferStub::OnCreateVideoDecoder(
825    media::VideoCodecProfile profile,
826    int32 decoder_route_id,
827    IPC::Message* reply_message) {
828  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
829  GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
830      decoder_route_id, this, channel_->io_message_loop());
831  decoder->Initialize(profile, reply_message);
832  // decoder is registered as a DestructionObserver of this stub and will
833  // self-delete during destruction of this stub.
834}
835
836void GpuCommandBufferStub::OnCreateVideoEncoder(
837    media::VideoFrame::Format input_format,
838    const gfx::Size& input_visible_size,
839    media::VideoCodecProfile output_profile,
840    uint32 initial_bitrate,
841    int32 encoder_route_id,
842    IPC::Message* reply_message) {
843  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
844  GpuVideoEncodeAccelerator* encoder =
845      new GpuVideoEncodeAccelerator(encoder_route_id, this);
846  encoder->Initialize(input_format,
847                      input_visible_size,
848                      output_profile,
849                      initial_bitrate,
850                      reply_message);
851  // encoder is registered as a DestructionObserver of this stub and will
852  // self-delete during destruction of this stub.
853}
854
855void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
856  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
857  if (memory_manager_client_state_)
858    memory_manager_client_state_->SetVisible(visible);
859}
860
861void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
862  sync_points_.push_back(sync_point);
863}
864
865void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
866  DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
867  sync_points_.pop_front();
868  GpuChannelManager* manager = channel_->gpu_channel_manager();
869  manager->sync_point_manager()->RetireSyncPoint(sync_point);
870}
871
872bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
873  if (!sync_point)
874    return true;
875  GpuChannelManager* manager = channel_->gpu_channel_manager();
876  if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
877    return true;
878
879  if (sync_point_wait_count_ == 0) {
880    TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
881                             "GpuCommandBufferStub", this);
882  }
883  scheduler_->SetScheduled(false);
884  ++sync_point_wait_count_;
885  manager->sync_point_manager()->AddSyncPointCallback(
886      sync_point,
887      base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
888                 this->AsWeakPtr()));
889  return scheduler_->IsScheduled();
890}
891
892void GpuCommandBufferStub::OnSyncPointRetired() {
893  --sync_point_wait_count_;
894  if (sync_point_wait_count_ == 0) {
895    TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
896                           "GpuCommandBufferStub", this);
897  }
898  scheduler_->SetScheduled(true);
899}
900
901void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
902  GpuChannelManager* manager = channel_->gpu_channel_manager();
903  manager->sync_point_manager()->AddSyncPointCallback(
904      sync_point,
905      base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
906                 this->AsWeakPtr(),
907                 id));
908}
909
910void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
911  Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
912}
913
914void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
915  if (decoder_) {
916    gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
917    if (query_manager) {
918      gpu::gles2::QueryManager::Query* query =
919          query_manager->GetQuery(query_id);
920      if (query) {
921        query->AddCallback(
922          base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
923                     this->AsWeakPtr(),
924                     id));
925        return;
926      }
927    }
928  }
929  // Something went wrong, run callback immediately.
930  OnSignalSyncPointAck(id);
931}
932
933
934void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
935    bool has_callback) {
936  TRACE_EVENT0(
937      "gpu",
938      "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
939  if (has_callback) {
940    if (!memory_manager_client_state_) {
941      memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
942          this, surface_id_ != 0, true));
943    }
944  } else {
945    memory_manager_client_state_.reset();
946  }
947}
948
949void GpuCommandBufferStub::OnRegisterGpuMemoryBuffer(
950    int32 id,
951    gfx::GpuMemoryBufferHandle handle,
952    uint32 width,
953    uint32 height,
954    uint32 internalformat) {
955  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
956#if defined(OS_ANDROID)
957  // Verify that renderer is not trying to use a surface texture it doesn't own.
958  if (handle.type == gfx::SURFACE_TEXTURE_BUFFER &&
959      handle.surface_texture_id.secondary_id != channel()->client_id()) {
960    LOG(ERROR) << "Illegal surface texture ID for renderer.";
961    return;
962  }
963#endif
964
965  if (!decoder_)
966    return;
967
968  gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
969  DCHECK(image_manager);
970  if (image_manager->LookupImage(id)) {
971    LOG(ERROR) << "Image already exists with same ID.";
972    return;
973  }
974
975  GpuChannelManager* manager = channel_->gpu_channel_manager();
976  scoped_refptr<gfx::GLImage> image =
977      manager->gpu_memory_buffer_factory()->CreateImageForGpuMemoryBuffer(
978          handle,
979          gfx::Size(width, height),
980          internalformat,
981          channel()->client_id());
982  if (!image.get())
983    return;
984
985  // For Android specific workaround.
986  if (context_group_->feature_info()->workarounds().release_image_after_use)
987    image->SetReleaseAfterUse();
988
989  image_manager->AddImage(image.get(), id);
990}
991
992void GpuCommandBufferStub::OnUnregisterGpuMemoryBuffer(int32 id) {
993  TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnUnregisterGpuMemoryBuffer");
994
995  if (!decoder_)
996    return;
997
998  gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
999  DCHECK(image_manager);
1000  if (!image_manager->LookupImage(id)) {
1001    LOG(ERROR) << "Image with ID doesn't exist.";
1002    return;
1003  }
1004
1005  image_manager->RemoveImage(id);
1006}
1007
1008void GpuCommandBufferStub::SendConsoleMessage(
1009    int32 id,
1010    const std::string& message) {
1011  GPUCommandBufferConsoleMessage console_message;
1012  console_message.id = id;
1013  console_message.message = message;
1014  IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
1015      route_id_, console_message);
1016  msg->set_unblock(true);
1017  Send(msg);
1018}
1019
1020void GpuCommandBufferStub::SendCachedShader(
1021    const std::string& key, const std::string& shader) {
1022  channel_->CacheShader(key, shader);
1023}
1024
1025void GpuCommandBufferStub::AddDestructionObserver(
1026    DestructionObserver* observer) {
1027  destruction_observers_.AddObserver(observer);
1028}
1029
1030void GpuCommandBufferStub::RemoveDestructionObserver(
1031    DestructionObserver* observer) {
1032  destruction_observers_.RemoveObserver(observer);
1033}
1034
1035void GpuCommandBufferStub::SetPreemptByFlag(
1036    scoped_refptr<gpu::PreemptionFlag> flag) {
1037  preemption_flag_ = flag;
1038  if (scheduler_)
1039    scheduler_->SetPreemptByFlag(preemption_flag_);
1040}
1041
1042bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
1043  *bytes = total_gpu_memory_;
1044  return !!total_gpu_memory_;
1045}
1046
1047gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
1048  if (!surface_.get())
1049    return gfx::Size();
1050  return surface_->GetSize();
1051}
1052
1053gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1054  return context_group_->memory_tracker();
1055}
1056
1057void GpuCommandBufferStub::SetMemoryAllocation(
1058    const gpu::MemoryAllocation& allocation) {
1059  if (!last_memory_allocation_valid_ ||
1060      !allocation.Equals(last_memory_allocation_)) {
1061    Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1062        route_id_, allocation));
1063  }
1064
1065  last_memory_allocation_valid_ = true;
1066  last_memory_allocation_ = allocation;
1067}
1068
1069void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1070    bool suggest_have_frontbuffer) {
1071  // This can be called outside of OnMessageReceived, so the context needs
1072  // to be made current before calling methods on the surface.
1073  if (surface_.get() && MakeCurrent())
1074    surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1075}
1076
1077bool GpuCommandBufferStub::CheckContextLost() {
1078  DCHECK(command_buffer_);
1079  gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1080  bool was_lost = state.error == gpu::error::kLostContext;
1081  // Lose all other contexts if the reset was triggered by the robustness
1082  // extension instead of being synthetic.
1083  if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1084      (gfx::GLContext::LosesAllContextsOnContextLost() ||
1085       use_virtualized_gl_context_))
1086    channel_->LoseAllContexts();
1087  CheckCompleteWaits();
1088  return was_lost;
1089}
1090
1091void GpuCommandBufferStub::MarkContextLost() {
1092  if (!command_buffer_ ||
1093      command_buffer_->GetLastState().error == gpu::error::kLostContext)
1094    return;
1095
1096  command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1097  if (decoder_)
1098    decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
1099  command_buffer_->SetParseError(gpu::error::kLostContext);
1100}
1101
1102uint64 GpuCommandBufferStub::GetMemoryUsage() const {
1103  return GetMemoryManager()->GetClientMemoryUsage(this);
1104}
1105
1106}  // namespace content
1107