1// Copyright 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7#include <queue>
8#include <set>
9#include <utility>
10
11#include <GLES2/gl2.h>
12#ifndef GL_GLEXT_PROTOTYPES
13#define GL_GLEXT_PROTOTYPES 1
14#endif
15#include <GLES2/gl2ext.h>
16#include <GLES2/gl2extchromium.h>
17
18#include "base/bind.h"
19#include "base/bind_helpers.h"
20#include "base/lazy_instance.h"
21#include "base/logging.h"
22#include "base/memory/weak_ptr.h"
23#include "base/message_loop/message_loop_proxy.h"
24#include "base/sequence_checker.h"
25#include "base/synchronization/condition_variable.h"
26#include "base/threading/thread.h"
27#include "gpu/command_buffer/service/command_buffer_service.h"
28#include "gpu/command_buffer/service/context_group.h"
29#include "gpu/command_buffer/service/gl_context_virtual.h"
30#include "gpu/command_buffer/service/gpu_scheduler.h"
31#include "gpu/command_buffer/service/image_manager.h"
32#include "gpu/command_buffer/service/mailbox_manager.h"
33#include "gpu/command_buffer/service/memory_tracking.h"
34#include "gpu/command_buffer/service/query_manager.h"
35#include "gpu/command_buffer/service/transfer_buffer_manager.h"
36#include "ui/gfx/size.h"
37#include "ui/gl/gl_context.h"
38#include "ui/gl/gl_image.h"
39#include "ui/gl/gl_share_group.h"
40
41#if defined(OS_ANDROID)
42#include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
43#include "ui/gl/android/surface_texture.h"
44#endif
45
46namespace gpu {
47
48namespace {
49
50static InProcessGpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
51
52template <typename T>
53static void RunTaskWithResult(base::Callback<T(void)> task,
54                              T* result,
55                              base::WaitableEvent* completion) {
56  *result = task.Run();
57  completion->Signal();
58}
59
60class GpuInProcessThread
61    : public base::Thread,
62      public InProcessCommandBuffer::Service,
63      public base::RefCountedThreadSafe<GpuInProcessThread> {
64 public:
65  GpuInProcessThread();
66
67  virtual void AddRef() const OVERRIDE {
68    base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
69  }
70  virtual void Release() const OVERRIDE {
71    base::RefCountedThreadSafe<GpuInProcessThread>::Release();
72  }
73
74  virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
75  virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
76  virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
77  virtual scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
78      OVERRIDE;
79
80 private:
81  virtual ~GpuInProcessThread();
82  friend class base::RefCountedThreadSafe<GpuInProcessThread>;
83
84  scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
85  DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
86};
87
88GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
89  Start();
90}
91
92GpuInProcessThread::~GpuInProcessThread() {
93  Stop();
94}
95
96void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
97  message_loop()->PostTask(FROM_HERE, task);
98}
99
100void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
101  message_loop()->PostDelayedTask(
102      FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
103}
104
105scoped_refptr<gles2::ShaderTranslatorCache>
106GpuInProcessThread::shader_translator_cache() {
107  if (!shader_translator_cache_.get())
108    shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
109  return shader_translator_cache_;
110}
111
112base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
113    LAZY_INSTANCE_INITIALIZER;
114base::LazyInstance<base::Lock> default_thread_clients_lock_ =
115    LAZY_INSTANCE_INITIALIZER;
116
117class ScopedEvent {
118 public:
119  ScopedEvent(base::WaitableEvent* event) : event_(event) {}
120  ~ScopedEvent() { event_->Signal(); }
121
122 private:
123  base::WaitableEvent* event_;
124};
125
126class SyncPointManager {
127 public:
128  SyncPointManager();
129  ~SyncPointManager();
130
131  uint32 GenerateSyncPoint();
132  void RetireSyncPoint(uint32 sync_point);
133
134  bool IsSyncPointPassed(uint32 sync_point);
135  void WaitSyncPoint(uint32 sync_point);
136
137private:
138  // This lock protects access to pending_sync_points_ and next_sync_point_ and
139  // is used with the ConditionVariable to signal when a sync point is retired.
140  base::Lock lock_;
141  std::set<uint32> pending_sync_points_;
142  uint32 next_sync_point_;
143  base::ConditionVariable cond_var_;
144};
145
146SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
147
148SyncPointManager::~SyncPointManager() {
149  DCHECK_EQ(pending_sync_points_.size(), 0U);
150}
151
152uint32 SyncPointManager::GenerateSyncPoint() {
153  base::AutoLock lock(lock_);
154  uint32 sync_point = next_sync_point_++;
155  DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
156  pending_sync_points_.insert(sync_point);
157  return sync_point;
158}
159
160void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
161  base::AutoLock lock(lock_);
162  DCHECK(pending_sync_points_.count(sync_point));
163  pending_sync_points_.erase(sync_point);
164  cond_var_.Broadcast();
165}
166
167bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
168  base::AutoLock lock(lock_);
169  return pending_sync_points_.count(sync_point) == 0;
170}
171
172void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
173  base::AutoLock lock(lock_);
174  while (pending_sync_points_.count(sync_point)) {
175    cond_var_.Wait();
176  }
177}
178
179base::LazyInstance<SyncPointManager> g_sync_point_manager =
180    LAZY_INSTANCE_INITIALIZER;
181
182}  // anonyous namespace
183
184InProcessCommandBuffer::Service::Service() {}
185
186InProcessCommandBuffer::Service::~Service() {}
187
188scoped_refptr<gles2::MailboxManager>
189InProcessCommandBuffer::Service::mailbox_manager() {
190  if (!mailbox_manager_.get())
191    mailbox_manager_ = new gles2::MailboxManager();
192  return mailbox_manager_;
193}
194
195scoped_refptr<InProcessCommandBuffer::Service>
196InProcessCommandBuffer::GetDefaultService() {
197  base::AutoLock lock(default_thread_clients_lock_.Get());
198  scoped_refptr<Service> service;
199  if (!default_thread_clients_.Get().empty()) {
200    InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
201    service = other->service_;
202    DCHECK(service.get());
203  } else {
204    service = new GpuInProcessThread;
205  }
206  return service;
207}
208
209InProcessCommandBuffer::InProcessCommandBuffer(
210    const scoped_refptr<Service>& service)
211    : context_lost_(false),
212      idle_work_pending_(false),
213      last_put_offset_(-1),
214      flush_event_(false, false),
215      service_(service.get() ? service : GetDefaultService()),
216      gpu_thread_weak_ptr_factory_(this) {
217  if (!service.get()) {
218    base::AutoLock lock(default_thread_clients_lock_.Get());
219    default_thread_clients_.Get().insert(this);
220  }
221}
222
223InProcessCommandBuffer::~InProcessCommandBuffer() {
224  Destroy();
225  base::AutoLock lock(default_thread_clients_lock_.Get());
226  default_thread_clients_.Get().erase(this);
227}
228
229void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
230  CheckSequencedThread();
231  DCHECK(!surface_->IsOffscreen());
232  surface_->Resize(size);
233}
234
235bool InProcessCommandBuffer::MakeCurrent() {
236  CheckSequencedThread();
237  command_buffer_lock_.AssertAcquired();
238
239  if (!context_lost_ && decoder_->MakeCurrent())
240    return true;
241  DLOG(ERROR) << "Context lost because MakeCurrent failed.";
242  command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
243  command_buffer_->SetParseError(gpu::error::kLostContext);
244  return false;
245}
246
247void InProcessCommandBuffer::PumpCommands() {
248  CheckSequencedThread();
249  command_buffer_lock_.AssertAcquired();
250
251  if (!MakeCurrent())
252    return;
253
254  gpu_scheduler_->PutChanged();
255}
256
257bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
258  CheckSequencedThread();
259  command_buffer_lock_.AssertAcquired();
260  command_buffer_->SetGetBuffer(transfer_buffer_id);
261  return true;
262}
263
264bool InProcessCommandBuffer::Initialize(
265    scoped_refptr<gfx::GLSurface> surface,
266    bool is_offscreen,
267    gfx::AcceleratedWidget window,
268    const gfx::Size& size,
269    const std::vector<int32>& attribs,
270    gfx::GpuPreference gpu_preference,
271    const base::Closure& context_lost_callback,
272    InProcessCommandBuffer* share_group) {
273  DCHECK(!share_group || service_.get() == share_group->service_.get());
274  context_lost_callback_ = WrapCallback(context_lost_callback);
275
276  if (surface.get()) {
277    // GPU thread must be the same as client thread due to GLSurface not being
278    // thread safe.
279    sequence_checker_.reset(new base::SequenceChecker);
280    surface_ = surface;
281  }
282
283  gpu::Capabilities capabilities;
284  InitializeOnGpuThreadParams params(is_offscreen,
285                                     window,
286                                     size,
287                                     attribs,
288                                     gpu_preference,
289                                     &capabilities,
290                                     share_group);
291
292  base::Callback<bool(void)> init_task =
293      base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
294                 base::Unretained(this),
295                 params);
296
297  base::WaitableEvent completion(true, false);
298  bool result = false;
299  QueueTask(
300      base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
301  completion.Wait();
302
303  if (result) {
304    capabilities_ = capabilities;
305    capabilities_.map_image =
306        capabilities_.map_image && g_gpu_memory_buffer_factory;
307  }
308  return result;
309}
310
311bool InProcessCommandBuffer::InitializeOnGpuThread(
312    const InitializeOnGpuThreadParams& params) {
313  CheckSequencedThread();
314  gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
315
316  DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
317
318  TransferBufferManager* manager = new TransferBufferManager();
319  transfer_buffer_manager_.reset(manager);
320  manager->Initialize();
321
322  scoped_ptr<CommandBufferService> command_buffer(
323      new CommandBufferService(transfer_buffer_manager_.get()));
324  command_buffer->SetPutOffsetChangeCallback(base::Bind(
325      &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
326  command_buffer->SetParseErrorCallback(base::Bind(
327      &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
328
329  if (!command_buffer->Initialize()) {
330    LOG(ERROR) << "Could not initialize command buffer.";
331    DestroyOnGpuThread();
332    return false;
333  }
334
335  gl_share_group_ = params.context_group
336                        ? params.context_group->gl_share_group_.get()
337                        : new gfx::GLShareGroup;
338
339#if defined(OS_ANDROID)
340  stream_texture_manager_.reset(new StreamTextureManagerInProcess);
341#endif
342
343  bool bind_generates_resource = false;
344  decoder_.reset(gles2::GLES2Decoder::Create(
345      params.context_group
346          ? params.context_group->decoder_->GetContextGroup()
347          : new gles2::ContextGroup(service_->mailbox_manager(),
348                                    NULL,
349                                    service_->shader_translator_cache(),
350                                    NULL,
351                                    bind_generates_resource)));
352
353  gpu_scheduler_.reset(
354      new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
355  command_buffer->SetGetBufferChangeCallback(base::Bind(
356      &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
357  command_buffer_ = command_buffer.Pass();
358
359  decoder_->set_engine(gpu_scheduler_.get());
360
361  if (!surface_.get()) {
362    if (params.is_offscreen)
363      surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
364    else
365      surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
366  }
367
368  if (!surface_.get()) {
369    LOG(ERROR) << "Could not create GLSurface.";
370    DestroyOnGpuThread();
371    return false;
372  }
373
374  if (service_->UseVirtualizedGLContexts() ||
375      decoder_->GetContextGroup()
376          ->feature_info()
377          ->workarounds()
378          .use_virtualized_gl_contexts) {
379    context_ = gl_share_group_->GetSharedContext();
380    if (!context_.get()) {
381      context_ = gfx::GLContext::CreateGLContext(
382          gl_share_group_.get(), surface_.get(), params.gpu_preference);
383      gl_share_group_->SetSharedContext(context_.get());
384    }
385
386    context_ = new GLContextVirtual(
387        gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
388    if (context_->Initialize(surface_.get(), params.gpu_preference)) {
389      VLOG(1) << "Created virtual GL context.";
390    } else {
391      context_ = NULL;
392    }
393  } else {
394    context_ = gfx::GLContext::CreateGLContext(
395        gl_share_group_.get(), surface_.get(), params.gpu_preference);
396  }
397
398  if (!context_.get()) {
399    LOG(ERROR) << "Could not create GLContext.";
400    DestroyOnGpuThread();
401    return false;
402  }
403
404  if (!context_->MakeCurrent(surface_.get())) {
405    LOG(ERROR) << "Could not make context current.";
406    DestroyOnGpuThread();
407    return false;
408  }
409
410  gles2::DisallowedFeatures disallowed_features;
411  disallowed_features.gpu_memory_manager = true;
412  if (!decoder_->Initialize(surface_,
413                            context_,
414                            params.is_offscreen,
415                            params.size,
416                            disallowed_features,
417                            params.attribs)) {
418    LOG(ERROR) << "Could not initialize decoder.";
419    DestroyOnGpuThread();
420    return false;
421  }
422  *params.capabilities = decoder_->GetCapabilities();
423
424  if (!params.is_offscreen) {
425    decoder_->SetResizeCallback(base::Bind(
426        &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
427  }
428  decoder_->SetWaitSyncPointCallback(
429      base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread,
430                 base::Unretained(this)));
431
432  return true;
433}
434
435void InProcessCommandBuffer::Destroy() {
436  CheckSequencedThread();
437
438  base::WaitableEvent completion(true, false);
439  bool result = false;
440  base::Callback<bool(void)> destroy_task = base::Bind(
441      &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
442  QueueTask(
443      base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
444  completion.Wait();
445}
446
447bool InProcessCommandBuffer::DestroyOnGpuThread() {
448  CheckSequencedThread();
449  gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
450  command_buffer_.reset();
451  // Clean up GL resources if possible.
452  bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
453  if (decoder_) {
454    decoder_->Destroy(have_context);
455    decoder_.reset();
456  }
457  context_ = NULL;
458  surface_ = NULL;
459  gl_share_group_ = NULL;
460#if defined(OS_ANDROID)
461  stream_texture_manager_.reset();
462#endif
463
464  return true;
465}
466
467void InProcessCommandBuffer::CheckSequencedThread() {
468  DCHECK(!sequence_checker_ ||
469         sequence_checker_->CalledOnValidSequencedThread());
470}
471
472void InProcessCommandBuffer::OnContextLost() {
473  CheckSequencedThread();
474  if (!context_lost_callback_.is_null()) {
475    context_lost_callback_.Run();
476    context_lost_callback_.Reset();
477  }
478
479  context_lost_ = true;
480}
481
482CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
483  CheckSequencedThread();
484  base::AutoLock lock(state_after_last_flush_lock_);
485  if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
486    last_state_ = state_after_last_flush_;
487  return last_state_;
488}
489
490CommandBuffer::State InProcessCommandBuffer::GetLastState() {
491  CheckSequencedThread();
492  return last_state_;
493}
494
495int32 InProcessCommandBuffer::GetLastToken() {
496  CheckSequencedThread();
497  GetStateFast();
498  return last_state_.token;
499}
500
501void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
502  CheckSequencedThread();
503  ScopedEvent handle_flush(&flush_event_);
504  base::AutoLock lock(command_buffer_lock_);
505  command_buffer_->Flush(put_offset);
506  {
507    // Update state before signaling the flush event.
508    base::AutoLock lock(state_after_last_flush_lock_);
509    state_after_last_flush_ = command_buffer_->GetLastState();
510  }
511  DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
512         (error::IsError(state_after_last_flush_.error) && context_lost_));
513
514  // If we've processed all pending commands but still have pending queries,
515  // pump idle work until the query is passed.
516  if (put_offset == state_after_last_flush_.get_offset &&
517      gpu_scheduler_->HasMoreWork()) {
518    ScheduleIdleWorkOnGpuThread();
519  }
520}
521
522void InProcessCommandBuffer::PerformIdleWork() {
523  CheckSequencedThread();
524  idle_work_pending_ = false;
525  base::AutoLock lock(command_buffer_lock_);
526  if (MakeCurrent() && gpu_scheduler_->HasMoreWork()) {
527    gpu_scheduler_->PerformIdleWork();
528    ScheduleIdleWorkOnGpuThread();
529  }
530}
531
532void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
533  CheckSequencedThread();
534  if (idle_work_pending_)
535    return;
536  idle_work_pending_ = true;
537  service_->ScheduleIdleWork(
538      base::Bind(&InProcessCommandBuffer::PerformIdleWork,
539                 gpu_thread_weak_ptr_));
540}
541
542void InProcessCommandBuffer::Flush(int32 put_offset) {
543  CheckSequencedThread();
544  if (last_state_.error != gpu::error::kNoError)
545    return;
546
547  if (last_put_offset_ == put_offset)
548    return;
549
550  last_put_offset_ = put_offset;
551  base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
552                                  gpu_thread_weak_ptr_,
553                                  put_offset);
554  QueueTask(task);
555}
556
557void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
558  CheckSequencedThread();
559  while (!InRange(start, end, GetLastToken()) &&
560         last_state_.error == gpu::error::kNoError)
561    flush_event_.Wait();
562}
563
564void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
565  CheckSequencedThread();
566
567  GetStateFast();
568  while (!InRange(start, end, last_state_.get_offset) &&
569         last_state_.error == gpu::error::kNoError) {
570    flush_event_.Wait();
571    GetStateFast();
572  }
573}
574
575void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
576  CheckSequencedThread();
577  if (last_state_.error != gpu::error::kNoError)
578    return;
579
580  {
581    base::AutoLock lock(command_buffer_lock_);
582    command_buffer_->SetGetBuffer(shm_id);
583    last_put_offset_ = 0;
584  }
585  {
586    base::AutoLock lock(state_after_last_flush_lock_);
587    state_after_last_flush_ = command_buffer_->GetLastState();
588  }
589}
590
591scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
592                                                                   int32* id) {
593  CheckSequencedThread();
594  base::AutoLock lock(command_buffer_lock_);
595  return command_buffer_->CreateTransferBuffer(size, id);
596}
597
598void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
599  CheckSequencedThread();
600  base::Closure task =
601      base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
602                 base::Unretained(this),
603                 id);
604
605  QueueTask(task);
606}
607
608void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id) {
609  base::AutoLock lock(command_buffer_lock_);
610  command_buffer_->DestroyTransferBuffer(id);
611}
612
613gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
614  return capabilities_;
615}
616
617gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
618    size_t width,
619    size_t height,
620    unsigned internalformat,
621    unsigned usage,
622    int32* id) {
623  CheckSequencedThread();
624
625  *id = -1;
626
627  scoped_ptr<gfx::GpuMemoryBuffer> buffer =
628      g_gpu_memory_buffer_factory->AllocateGpuMemoryBuffer(
629          width, height, internalformat, usage);
630  if (!buffer.get())
631    return NULL;
632
633  static int32 next_id = 1;
634  int32 new_id = next_id++;
635
636  base::Closure task =
637      base::Bind(&InProcessCommandBuffer::RegisterGpuMemoryBufferOnGpuThread,
638                 base::Unretained(this),
639                 new_id,
640                 buffer->GetHandle(),
641                 width,
642                 height,
643                 internalformat);
644
645  QueueTask(task);
646
647  *id = new_id;
648  DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
649  return gpu_memory_buffers_.add(new_id, buffer.Pass()).first->second;
650}
651
652void InProcessCommandBuffer::RegisterGpuMemoryBufferOnGpuThread(
653    int32 id,
654    const gfx::GpuMemoryBufferHandle& handle,
655    size_t width,
656    size_t height,
657    unsigned internalformat) {
658  scoped_refptr<gfx::GLImage> image =
659      g_gpu_memory_buffer_factory->CreateImageForGpuMemoryBuffer(
660          handle, gfx::Size(width, height), internalformat);
661  if (!image.get())
662    return;
663
664  // For Android specific workaround.
665  gles2::ContextGroup* context_group = decoder_->GetContextGroup();
666  if (context_group->feature_info()->workarounds().release_image_after_use)
667    image->SetReleaseAfterUse();
668
669  if (decoder_) {
670    gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
671    DCHECK(image_manager);
672    image_manager->AddImage(image.get(), id);
673  }
674}
675
676void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
677  CheckSequencedThread();
678
679  base::Closure task =
680      base::Bind(&InProcessCommandBuffer::UnregisterGpuMemoryBufferOnGpuThread,
681                 base::Unretained(this),
682                 id);
683
684  QueueTask(task);
685
686  gpu_memory_buffers_.erase(id);
687}
688
689void InProcessCommandBuffer::UnregisterGpuMemoryBufferOnGpuThread(int32 id) {
690  if (decoder_) {
691    gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
692    DCHECK(image_manager);
693    image_manager->RemoveImage(id);
694  }
695}
696
697uint32 InProcessCommandBuffer::InsertSyncPoint() {
698  uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
699  QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
700                       base::Unretained(this),
701                       sync_point));
702  return sync_point;
703}
704
705uint32 InProcessCommandBuffer::InsertFutureSyncPoint() {
706  return g_sync_point_manager.Get().GenerateSyncPoint();
707}
708
709void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point) {
710  QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
711                       base::Unretained(this),
712                       sync_point));
713}
714
715void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
716  gles2::MailboxManager* mailbox_manager =
717      decoder_->GetContextGroup()->mailbox_manager();
718  if (mailbox_manager->UsesSync()) {
719    bool make_current_success = false;
720    {
721      base::AutoLock lock(command_buffer_lock_);
722      make_current_success = MakeCurrent();
723    }
724    if (make_current_success)
725      mailbox_manager->PushTextureUpdates(sync_point);
726  }
727  g_sync_point_manager.Get().RetireSyncPoint(sync_point);
728}
729
730void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
731                                             const base::Closure& callback) {
732  CheckSequencedThread();
733  QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
734                       base::Unretained(this),
735                       sync_point,
736                       WrapCallback(callback)));
737}
738
739bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point) {
740  g_sync_point_manager.Get().WaitSyncPoint(sync_point);
741  gles2::MailboxManager* mailbox_manager =
742      decoder_->GetContextGroup()->mailbox_manager();
743  mailbox_manager->PullTextureUpdates(sync_point);
744  return true;
745}
746
747void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
748    unsigned sync_point,
749    const base::Closure& callback) {
750  if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
751    callback.Run();
752  } else {
753    service_->ScheduleIdleWork(
754        base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
755                   gpu_thread_weak_ptr_,
756                   sync_point,
757                   callback));
758  }
759}
760
761void InProcessCommandBuffer::SignalQuery(unsigned query_id,
762                                         const base::Closure& callback) {
763  CheckSequencedThread();
764  QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
765                       base::Unretained(this),
766                       query_id,
767                       WrapCallback(callback)));
768}
769
770void InProcessCommandBuffer::SignalQueryOnGpuThread(
771    unsigned query_id,
772    const base::Closure& callback) {
773  gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
774  DCHECK(query_manager_);
775
776  gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
777  if (!query)
778    callback.Run();
779  else
780    query->AddCallback(callback);
781}
782
783void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
784
785void InProcessCommandBuffer::Echo(const base::Closure& callback) {
786  QueueTask(WrapCallback(callback));
787}
788
789uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
790  base::WaitableEvent completion(true, false);
791  uint32 stream_id = 0;
792  base::Callback<uint32(void)> task =
793      base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
794                 base::Unretained(this),
795                 texture_id);
796  QueueTask(
797      base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
798  completion.Wait();
799  return stream_id;
800}
801
802uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
803    uint32 client_texture_id) {
804#if defined(OS_ANDROID)
805  return stream_texture_manager_->CreateStreamTexture(
806      client_texture_id, decoder_->GetContextGroup()->texture_manager());
807#else
808  return 0;
809#endif
810}
811
812gpu::error::Error InProcessCommandBuffer::GetLastError() {
813  CheckSequencedThread();
814  return last_state_.error;
815}
816
817bool InProcessCommandBuffer::Initialize() {
818  NOTREACHED();
819  return false;
820}
821
822namespace {
823
824void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
825                         const base::Closure& callback) {
826  if (!loop->BelongsToCurrentThread()) {
827    loop->PostTask(FROM_HERE, callback);
828  } else {
829    callback.Run();
830  }
831}
832
833void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
834  DCHECK(callback.get());
835  callback->Run();
836}
837
838}  // anonymous namespace
839
840base::Closure InProcessCommandBuffer::WrapCallback(
841    const base::Closure& callback) {
842  // Make sure the callback gets deleted on the target thread by passing
843  // ownership.
844  scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
845  base::Closure callback_on_client_thread =
846      base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
847  base::Closure wrapped_callback =
848      base::Bind(&PostCallback, base::MessageLoopProxy::current(),
849                 callback_on_client_thread);
850  return wrapped_callback;
851}
852
853#if defined(OS_ANDROID)
854scoped_refptr<gfx::SurfaceTexture>
855InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
856  DCHECK(stream_texture_manager_);
857  return stream_texture_manager_->GetSurfaceTexture(stream_id);
858}
859#endif
860
861// static
862void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
863    InProcessGpuMemoryBufferFactory* factory) {
864  g_gpu_memory_buffer_factory = factory;
865}
866
867}  // namespace gpu
868