1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if defined(OS_WIN)
6#include <windows.h>
7#endif
8
9#include "content/common/gpu/gpu_channel.h"
10
11#include <queue>
12#include <vector>
13
14#include "base/bind.h"
15#include "base/command_line.h"
16#include "base/debug/trace_event.h"
17#include "base/message_loop/message_loop_proxy.h"
18#include "base/rand_util.h"
19#include "base/strings/string_util.h"
20#include "base/timer/timer.h"
21#include "content/common/gpu/gpu_channel_manager.h"
22#include "content/common/gpu/gpu_messages.h"
23#include "content/common/gpu/sync_point_manager.h"
24#include "content/public/common/content_switches.h"
25#include "crypto/hmac.h"
26#include "gpu/command_buffer/common/mailbox.h"
27#include "gpu/command_buffer/service/gpu_scheduler.h"
28#include "gpu/command_buffer/service/image_manager.h"
29#include "gpu/command_buffer/service/mailbox_manager.h"
30#include "ipc/ipc_channel.h"
31#include "ipc/ipc_channel_proxy.h"
32#include "ui/gl/gl_context.h"
33#include "ui/gl/gl_image.h"
34#include "ui/gl/gl_surface.h"
35
36#if defined(OS_POSIX)
37#include "ipc/ipc_channel_posix.h"
38#endif
39
40#if defined(OS_ANDROID)
41#include "content/common/gpu/stream_texture_manager_android.h"
42#endif
43
44namespace content {
45namespace {
46
47// Number of milliseconds between successive vsync. Many GL commands block
48// on vsync, so thresholds for preemption should be multiples of this.
49const int64 kVsyncIntervalMs = 17;
50
51// Amount of time that we will wait for an IPC to be processed before
52// preempting. After a preemption, we must wait this long before triggering
53// another preemption.
54const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
55
56// Once we trigger a preemption, the maximum duration that we will wait
57// before clearing the preemption.
58const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
59
60// Stop the preemption once the time for the longest pending IPC drops
61// below this threshold.
62const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
63
64}  // anonymous namespace
65
66// This filter does three things:
67// - it counts and timestamps each message forwarded to the channel
68//   so that we can preempt other channels if a message takes too long to
69//   process. To guarantee fairness, we must wait a minimum amount of time
70//   before preempting and we limit the amount of time that we can preempt in
71//   one shot (see constants above).
72// - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
73//   thread, generating the sync point ID and responding immediately, and then
74//   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
75//   into the channel's queue.
76// - it generates mailbox names for clients of the GPU process on the IO thread.
77class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
78 public:
79  // Takes ownership of gpu_channel (see below).
80  GpuChannelMessageFilter(const std::string& private_key,
81                          base::WeakPtr<GpuChannel>* gpu_channel,
82                          scoped_refptr<SyncPointManager> sync_point_manager,
83                          scoped_refptr<base::MessageLoopProxy> message_loop)
84      : preemption_state_(IDLE),
85        gpu_channel_(gpu_channel),
86        channel_(NULL),
87        sync_point_manager_(sync_point_manager),
88        message_loop_(message_loop),
89        messages_forwarded_to_channel_(0),
90        a_stub_is_descheduled_(false),
91        hmac_(crypto::HMAC::SHA256) {
92    bool success = hmac_.Init(base::StringPiece(private_key));
93    DCHECK(success);
94  }
95
96  virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
97    DCHECK(!channel_);
98    channel_ = channel;
99  }
100
101  virtual void OnFilterRemoved() OVERRIDE {
102    DCHECK(channel_);
103    channel_ = NULL;
104  }
105
106  virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
107    DCHECK(channel_);
108
109    bool handled = true;
110    IPC_BEGIN_MESSAGE_MAP(GpuChannelMessageFilter, message)
111      IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNames,
112                          OnGenerateMailboxNames)
113      IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesAsync,
114                          OnGenerateMailboxNamesAsync)
115      IPC_MESSAGE_UNHANDLED(handled = false)
116    IPC_END_MESSAGE_MAP()
117
118    if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
119      // This message should not be sent explicitly by the renderer.
120      NOTREACHED();
121      handled = true;
122    }
123
124    // All other messages get processed by the GpuChannel.
125    if (!handled) {
126      messages_forwarded_to_channel_++;
127      if (preempting_flag_.get())
128        pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
129      UpdatePreemptionState();
130    }
131
132    if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
133      uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
134      IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
135      GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
136      Send(reply);
137      message_loop_->PostTask(FROM_HERE, base::Bind(
138          &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
139          gpu_channel_,
140          sync_point_manager_,
141          message.routing_id(),
142          sync_point));
143      handled = true;
144    }
145    return handled;
146  }
147
148  void MessageProcessed(uint64 messages_processed) {
149    while (!pending_messages_.empty() &&
150           pending_messages_.front().message_number <= messages_processed)
151      pending_messages_.pop();
152    UpdatePreemptionState();
153  }
154
155  void SetPreemptingFlagAndSchedulingState(
156      gpu::PreemptionFlag* preempting_flag,
157      bool a_stub_is_descheduled) {
158    preempting_flag_ = preempting_flag;
159    a_stub_is_descheduled_ = a_stub_is_descheduled;
160  }
161
162  void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
163    a_stub_is_descheduled_ = a_stub_is_descheduled;
164    UpdatePreemptionState();
165  }
166
167  bool Send(IPC::Message* message) {
168    return channel_->Send(message);
169  }
170
171 protected:
172  virtual ~GpuChannelMessageFilter() {
173    message_loop_->PostTask(FROM_HERE, base::Bind(
174        &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
175  }
176
177 private:
178  // Message handlers.
179  void OnGenerateMailboxNames(unsigned num, std::vector<gpu::Mailbox>* result) {
180    TRACE_EVENT1("gpu", "OnGenerateMailboxNames", "num", num);
181
182    result->resize(num);
183
184    for (unsigned i = 0; i < num; ++i) {
185      char name[GL_MAILBOX_SIZE_CHROMIUM];
186      base::RandBytes(name, sizeof(name) / 2);
187
188      bool success = hmac_.Sign(
189          base::StringPiece(name, sizeof(name) / 2),
190          reinterpret_cast<unsigned char*>(name) + sizeof(name) / 2,
191          sizeof(name) / 2);
192      DCHECK(success);
193
194      (*result)[i].SetName(reinterpret_cast<int8*>(name));
195    }
196  }
197
198  void OnGenerateMailboxNamesAsync(unsigned num) {
199    std::vector<gpu::Mailbox> names;
200    OnGenerateMailboxNames(num, &names);
201    Send(new GpuChannelMsg_GenerateMailboxNamesReply(names));
202  }
203
204  enum PreemptionState {
205    // Either there's no other channel to preempt, there are no messages
206    // pending processing, or we just finished preempting and have to wait
207    // before preempting again.
208    IDLE,
209    // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
210    WAITING,
211    // We can preempt whenever any IPC processing takes more than
212    // kPreemptWaitTimeMs.
213    CHECKING,
214    // We are currently preempting (i.e. no stub is descheduled).
215    PREEMPTING,
216    // We would like to preempt, but some stub is descheduled.
217    WOULD_PREEMPT_DESCHEDULED,
218  };
219
220  PreemptionState preemption_state_;
221
222  // Maximum amount of time that we can spend in PREEMPTING.
223  // It is reset when we transition to IDLE.
224  base::TimeDelta max_preemption_time_;
225
226  struct PendingMessage {
227    uint64 message_number;
228    base::TimeTicks time_received;
229
230    explicit PendingMessage(uint64 message_number)
231        : message_number(message_number),
232          time_received(base::TimeTicks::Now()) {
233    }
234  };
235
236  void UpdatePreemptionState() {
237    switch (preemption_state_) {
238      case IDLE:
239        if (preempting_flag_.get() && !pending_messages_.empty())
240          TransitionToWaiting();
241        break;
242      case WAITING:
243        // A timer will transition us to CHECKING.
244        DCHECK(timer_.IsRunning());
245        break;
246      case CHECKING:
247        if (!pending_messages_.empty()) {
248          base::TimeDelta time_elapsed =
249              base::TimeTicks::Now() - pending_messages_.front().time_received;
250          if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
251            // Schedule another check for when the IPC may go long.
252            timer_.Start(
253                FROM_HERE,
254                base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
255                    time_elapsed,
256                this, &GpuChannelMessageFilter::UpdatePreemptionState);
257          } else {
258            if (a_stub_is_descheduled_)
259              TransitionToWouldPreemptDescheduled();
260            else
261              TransitionToPreempting();
262          }
263        }
264        break;
265      case PREEMPTING:
266        // A TransitionToIdle() timer should always be running in this state.
267        DCHECK(timer_.IsRunning());
268        if (a_stub_is_descheduled_)
269          TransitionToWouldPreemptDescheduled();
270        else
271          TransitionToIdleIfCaughtUp();
272        break;
273      case WOULD_PREEMPT_DESCHEDULED:
274        // A TransitionToIdle() timer should never be running in this state.
275        DCHECK(!timer_.IsRunning());
276        if (!a_stub_is_descheduled_)
277          TransitionToPreempting();
278        else
279          TransitionToIdleIfCaughtUp();
280        break;
281      default:
282        NOTREACHED();
283    }
284  }
285
286  void TransitionToIdleIfCaughtUp() {
287    DCHECK(preemption_state_ == PREEMPTING ||
288           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
289    if (pending_messages_.empty()) {
290      TransitionToIdle();
291    } else {
292      base::TimeDelta time_elapsed =
293          base::TimeTicks::Now() - pending_messages_.front().time_received;
294      if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
295        TransitionToIdle();
296    }
297  }
298
299  void TransitionToIdle() {
300    DCHECK(preemption_state_ == PREEMPTING ||
301           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
302    // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
303    timer_.Stop();
304
305    preemption_state_ = IDLE;
306    preempting_flag_->Reset();
307    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
308
309    UpdatePreemptionState();
310  }
311
312  void TransitionToWaiting() {
313    DCHECK_EQ(preemption_state_, IDLE);
314    DCHECK(!timer_.IsRunning());
315
316    preemption_state_ = WAITING;
317    timer_.Start(
318        FROM_HERE,
319        base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
320        this, &GpuChannelMessageFilter::TransitionToChecking);
321  }
322
323  void TransitionToChecking() {
324    DCHECK_EQ(preemption_state_, WAITING);
325    DCHECK(!timer_.IsRunning());
326
327    preemption_state_ = CHECKING;
328    max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
329    UpdatePreemptionState();
330  }
331
332  void TransitionToPreempting() {
333    DCHECK(preemption_state_ == CHECKING ||
334           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
335    DCHECK(!a_stub_is_descheduled_);
336
337    // Stop any pending state update checks that we may have queued
338    // while CHECKING.
339    if (preemption_state_ == CHECKING)
340      timer_.Stop();
341
342    preemption_state_ = PREEMPTING;
343    preempting_flag_->Set();
344    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
345
346    timer_.Start(
347       FROM_HERE,
348       max_preemption_time_,
349       this, &GpuChannelMessageFilter::TransitionToIdle);
350
351    UpdatePreemptionState();
352  }
353
354  void TransitionToWouldPreemptDescheduled() {
355    DCHECK(preemption_state_ == CHECKING ||
356           preemption_state_ == PREEMPTING);
357    DCHECK(a_stub_is_descheduled_);
358
359    if (preemption_state_ == CHECKING) {
360      // Stop any pending state update checks that we may have queued
361      // while CHECKING.
362      timer_.Stop();
363    } else {
364      // Stop any TransitionToIdle() timers that we may have queued
365      // while PREEMPTING.
366      timer_.Stop();
367      max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
368      if (max_preemption_time_ < base::TimeDelta()) {
369        TransitionToIdle();
370        return;
371      }
372    }
373
374    preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
375    preempting_flag_->Reset();
376    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
377
378    UpdatePreemptionState();
379  }
380
381  static void InsertSyncPointOnMainThread(
382      base::WeakPtr<GpuChannel>* gpu_channel,
383      scoped_refptr<SyncPointManager> manager,
384      int32 routing_id,
385      uint32 sync_point) {
386    // This function must ensure that the sync point will be retired. Normally
387    // we'll find the stub based on the routing ID, and associate the sync point
388    // with it, but if that fails for any reason (channel or stub already
389    // deleted, invalid routing id), we need to retire the sync point
390    // immediately.
391    if (gpu_channel->get()) {
392      GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
393          routing_id);
394      if (stub) {
395        stub->AddSyncPoint(sync_point);
396        GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
397        gpu_channel->get()->OnMessageReceived(message);
398        return;
399      } else {
400        gpu_channel->get()->MessageProcessed();
401      }
402    }
403    manager->RetireSyncPoint(sync_point);
404  }
405
406  static void DeleteWeakPtrOnMainThread(
407      base::WeakPtr<GpuChannel>* gpu_channel) {
408    delete gpu_channel;
409  }
410
411  // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
412  // IO thread, it's only passed through - therefore the WeakPtr assumptions are
413  // respected.
414  base::WeakPtr<GpuChannel>* gpu_channel_;
415  IPC::Channel* channel_;
416  scoped_refptr<SyncPointManager> sync_point_manager_;
417  scoped_refptr<base::MessageLoopProxy> message_loop_;
418  scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
419
420  std::queue<PendingMessage> pending_messages_;
421
422  // Count of the number of IPCs forwarded to the GpuChannel.
423  uint64 messages_forwarded_to_channel_;
424
425  base::OneShotTimer<GpuChannelMessageFilter> timer_;
426
427  bool a_stub_is_descheduled_;
428
429  crypto::HMAC hmac_;
430};
431
432GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
433                       GpuWatchdog* watchdog,
434                       gfx::GLShareGroup* share_group,
435                       gpu::gles2::MailboxManager* mailbox,
436                       int client_id,
437                       bool software)
438    : gpu_channel_manager_(gpu_channel_manager),
439      messages_processed_(0),
440      client_id_(client_id),
441      share_group_(share_group ? share_group : new gfx::GLShareGroup),
442      mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
443      image_manager_(new gpu::gles2::ImageManager),
444      watchdog_(watchdog),
445      software_(software),
446      handle_messages_scheduled_(false),
447      processed_get_state_fast_(false),
448      currently_processing_message_(NULL),
449      weak_factory_(this),
450      num_stubs_descheduled_(0) {
451  DCHECK(gpu_channel_manager);
452  DCHECK(client_id);
453
454  channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
455  const CommandLine* command_line = CommandLine::ForCurrentProcess();
456  log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
457  disallowed_features_.multisampling =
458      command_line->HasSwitch(switches::kDisableGLMultisampling);
459#if defined(OS_ANDROID)
460  stream_texture_manager_.reset(new StreamTextureManagerAndroid(this));
461#endif
462}
463
464
465bool GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
466                      base::WaitableEvent* shutdown_event) {
467  DCHECK(!channel_.get());
468
469  // Map renderer ID to a (single) channel to that process.
470  channel_.reset(new IPC::SyncChannel(
471      channel_id_,
472      IPC::Channel::MODE_SERVER,
473      this,
474      io_message_loop,
475      false,
476      shutdown_event));
477
478  base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
479      weak_factory_.GetWeakPtr()));
480
481  filter_ = new GpuChannelMessageFilter(
482      mailbox_manager_->private_key(),
483      weak_ptr,
484      gpu_channel_manager_->sync_point_manager(),
485      base::MessageLoopProxy::current());
486  io_message_loop_ = io_message_loop;
487  channel_->AddFilter(filter_.get());
488
489  return true;
490}
491
492std::string GpuChannel::GetChannelName() {
493  return channel_id_;
494}
495
496#if defined(OS_POSIX)
497int GpuChannel::TakeRendererFileDescriptor() {
498  if (!channel_) {
499    NOTREACHED();
500    return -1;
501  }
502  return channel_->TakeClientFileDescriptor();
503}
504#endif  // defined(OS_POSIX)
505
506bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
507  if (log_messages_) {
508    DVLOG(1) << "received message @" << &message << " on channel @" << this
509             << " with type " << message.type();
510  }
511
512  if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
513    if (processed_get_state_fast_) {
514      // Require a non-GetStateFast message in between two GetStateFast
515      // messages, to ensure progress is made.
516      std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
517
518      while (point != deferred_messages_.end() &&
519             (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
520        ++point;
521      }
522
523      if (point != deferred_messages_.end()) {
524        ++point;
525      }
526
527      deferred_messages_.insert(point, new IPC::Message(message));
528    } else {
529      // Move GetStateFast commands to the head of the queue, so the renderer
530      // doesn't have to wait any longer than necessary.
531      deferred_messages_.push_front(new IPC::Message(message));
532    }
533  } else {
534    deferred_messages_.push_back(new IPC::Message(message));
535  }
536
537  OnScheduled();
538
539  return true;
540}
541
542void GpuChannel::OnChannelError() {
543  gpu_channel_manager_->RemoveChannel(client_id_);
544}
545
546bool GpuChannel::Send(IPC::Message* message) {
547  // The GPU process must never send a synchronous IPC message to the renderer
548  // process. This could result in deadlock.
549  DCHECK(!message->is_sync());
550  if (log_messages_) {
551    DVLOG(1) << "sending message @" << message << " on channel @" << this
552             << " with type " << message->type();
553  }
554
555  if (!channel_) {
556    delete message;
557    return false;
558  }
559
560  return channel_->Send(message);
561}
562
563void GpuChannel::RequeueMessage() {
564  DCHECK(currently_processing_message_);
565  deferred_messages_.push_front(
566      new IPC::Message(*currently_processing_message_));
567  messages_processed_--;
568  currently_processing_message_ = NULL;
569}
570
571void GpuChannel::OnScheduled() {
572  if (handle_messages_scheduled_)
573    return;
574  // Post a task to handle any deferred messages. The deferred message queue is
575  // not emptied here, which ensures that OnMessageReceived will continue to
576  // defer newly received messages until the ones in the queue have all been
577  // handled by HandleMessage. HandleMessage is invoked as a
578  // task to prevent reentrancy.
579  base::MessageLoop::current()->PostTask(
580      FROM_HERE,
581      base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
582  handle_messages_scheduled_ = true;
583}
584
585void GpuChannel::StubSchedulingChanged(bool scheduled) {
586  bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
587  if (scheduled) {
588    num_stubs_descheduled_--;
589    OnScheduled();
590  } else {
591    num_stubs_descheduled_++;
592  }
593  DCHECK_LE(num_stubs_descheduled_, stubs_.size());
594  bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
595
596  if (a_stub_is_descheduled != a_stub_was_descheduled) {
597    if (preempting_flag_.get()) {
598      io_message_loop_->PostTask(
599          FROM_HERE,
600          base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
601                     filter_,
602                     a_stub_is_descheduled));
603    }
604  }
605}
606
607void GpuChannel::CreateViewCommandBuffer(
608    const gfx::GLSurfaceHandle& window,
609    int32 surface_id,
610    const GPUCreateCommandBufferConfig& init_params,
611    int32* route_id) {
612  TRACE_EVENT1("gpu",
613               "GpuChannel::CreateViewCommandBuffer",
614               "surface_id",
615               surface_id);
616
617  *route_id = MSG_ROUTING_NONE;
618
619#if defined(ENABLE_GPU)
620
621  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
622
623  // Virtualize compositor contexts on OS X to prevent performance regressions
624  // when enabling FCM.
625  // http://crbug.com/180463
626  bool use_virtualized_gl_context = false;
627#if defined(OS_MACOSX)
628  use_virtualized_gl_context = true;
629#endif
630
631  *route_id = GenerateRouteID();
632  scoped_ptr<GpuCommandBufferStub> stub(
633      new GpuCommandBufferStub(this,
634                               share_group,
635                               window,
636                               mailbox_manager_.get(),
637                               image_manager_.get(),
638                               gfx::Size(),
639                               disallowed_features_,
640                               init_params.allowed_extensions,
641                               init_params.attribs,
642                               init_params.gpu_preference,
643                               use_virtualized_gl_context,
644                               *route_id,
645                               surface_id,
646                               watchdog_,
647                               software_,
648                               init_params.active_url));
649  if (preempted_flag_.get())
650    stub->SetPreemptByFlag(preempted_flag_);
651  router_.AddRoute(*route_id, stub.get());
652  stubs_.AddWithID(stub.release(), *route_id);
653#endif  // ENABLE_GPU
654}
655
656GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
657  return stubs_.Lookup(route_id);
658}
659
660void GpuChannel::CreateImage(
661    gfx::PluginWindowHandle window,
662    int32 image_id,
663    gfx::Size* size) {
664  TRACE_EVENT1("gpu",
665               "GpuChannel::CreateImage",
666               "image_id",
667               image_id);
668
669  *size = gfx::Size();
670
671  if (image_manager_->LookupImage(image_id)) {
672    LOG(ERROR) << "CreateImage failed, image_id already in use.";
673    return;
674  }
675
676  scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
677  if (!image.get())
678    return;
679
680  image_manager_->AddImage(image.get(), image_id);
681  *size = image->GetSize();
682}
683
684void GpuChannel::DeleteImage(int32 image_id) {
685  TRACE_EVENT1("gpu",
686               "GpuChannel::DeleteImage",
687               "image_id",
688               image_id);
689
690  image_manager_->RemoveImage(image_id);
691}
692
693void GpuChannel::LoseAllContexts() {
694  gpu_channel_manager_->LoseAllContexts();
695}
696
697void GpuChannel::MarkAllContextsLost() {
698  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
699       !it.IsAtEnd(); it.Advance()) {
700    it.GetCurrentValue()->MarkContextLost();
701  }
702}
703
704void GpuChannel::DestroySoon() {
705  base::MessageLoop::current()->PostTask(
706      FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
707}
708
709int GpuChannel::GenerateRouteID() {
710  static int last_id = 0;
711  return ++last_id;
712}
713
714void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
715  router_.AddRoute(route_id, listener);
716}
717
718void GpuChannel::RemoveRoute(int32 route_id) {
719  router_.RemoveRoute(route_id);
720}
721
722gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
723  if (!preempting_flag_.get()) {
724    preempting_flag_ = new gpu::PreemptionFlag;
725    io_message_loop_->PostTask(
726        FROM_HERE, base::Bind(
727            &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
728            filter_, preempting_flag_, num_stubs_descheduled_ > 0));
729  }
730  return preempting_flag_.get();
731}
732
733void GpuChannel::SetPreemptByFlag(
734    scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
735  preempted_flag_ = preempted_flag;
736
737  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
738       !it.IsAtEnd(); it.Advance()) {
739    it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
740  }
741}
742
743GpuChannel::~GpuChannel() {
744  if (preempting_flag_.get())
745    preempting_flag_->Reset();
746}
747
748void GpuChannel::OnDestroy() {
749  TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
750  gpu_channel_manager_->RemoveChannel(client_id_);
751}
752
753bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
754  bool handled = true;
755  IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
756    IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
757                                    OnCreateOffscreenCommandBuffer)
758    IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
759                                    OnDestroyCommandBuffer)
760#if defined(OS_ANDROID)
761    IPC_MESSAGE_HANDLER(GpuChannelMsg_RegisterStreamTextureProxy,
762                        OnRegisterStreamTextureProxy)
763    IPC_MESSAGE_HANDLER(GpuChannelMsg_EstablishStreamTexture,
764                        OnEstablishStreamTexture)
765    IPC_MESSAGE_HANDLER(GpuChannelMsg_SetStreamTextureSize,
766                        OnSetStreamTextureSize)
767#endif
768    IPC_MESSAGE_HANDLER(
769        GpuChannelMsg_CollectRenderingStatsForSurface,
770        OnCollectRenderingStatsForSurface)
771    IPC_MESSAGE_UNHANDLED(handled = false)
772  IPC_END_MESSAGE_MAP()
773  DCHECK(handled) << msg.type();
774  return handled;
775}
776
777void GpuChannel::HandleMessage() {
778  handle_messages_scheduled_ = false;
779  if (deferred_messages_.empty())
780    return;
781
782  bool should_fast_track_ack = false;
783  IPC::Message* m = deferred_messages_.front();
784  GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
785
786  do {
787    if (stub) {
788      if (!stub->IsScheduled())
789        return;
790      if (stub->IsPreempted()) {
791        OnScheduled();
792        return;
793      }
794    }
795
796    scoped_ptr<IPC::Message> message(m);
797    deferred_messages_.pop_front();
798    bool message_processed = true;
799
800    processed_get_state_fast_ =
801        (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
802
803    currently_processing_message_ = message.get();
804    bool result;
805    if (message->routing_id() == MSG_ROUTING_CONTROL)
806      result = OnControlMessageReceived(*message);
807    else
808      result = router_.RouteMessage(*message);
809    currently_processing_message_ = NULL;
810
811    if (!result) {
812      // Respond to sync messages even if router failed to route.
813      if (message->is_sync()) {
814        IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
815        reply->set_reply_error();
816        Send(reply);
817      }
818    } else {
819      // If the command buffer becomes unscheduled as a result of handling the
820      // message but still has more commands to process, synthesize an IPC
821      // message to flush that command buffer.
822      if (stub) {
823        if (stub->HasUnprocessedCommands()) {
824          deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
825              stub->route_id()));
826          message_processed = false;
827        }
828      }
829    }
830    if (message_processed)
831      MessageProcessed();
832
833    // We want the EchoACK following the SwapBuffers to be sent as close as
834    // possible, avoiding scheduling other channels in the meantime.
835    should_fast_track_ack = false;
836    if (!deferred_messages_.empty()) {
837      m = deferred_messages_.front();
838      stub = stubs_.Lookup(m->routing_id());
839      should_fast_track_ack =
840          (m->type() == GpuCommandBufferMsg_Echo::ID) &&
841          stub && stub->IsScheduled();
842    }
843  } while (should_fast_track_ack);
844
845  if (!deferred_messages_.empty()) {
846    OnScheduled();
847  }
848}
849
850void GpuChannel::OnCreateOffscreenCommandBuffer(
851    const gfx::Size& size,
852    const GPUCreateCommandBufferConfig& init_params,
853    int32* route_id) {
854  TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
855  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
856
857  *route_id = GenerateRouteID();
858
859  scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
860      this,
861      share_group,
862      gfx::GLSurfaceHandle(),
863      mailbox_manager_.get(),
864      image_manager_.get(),
865      size,
866      disallowed_features_,
867      init_params.allowed_extensions,
868      init_params.attribs,
869      init_params.gpu_preference,
870      false,
871      *route_id,
872      0,
873      watchdog_,
874      software_,
875      init_params.active_url));
876  if (preempted_flag_.get())
877    stub->SetPreemptByFlag(preempted_flag_);
878  router_.AddRoute(*route_id, stub.get());
879  stubs_.AddWithID(stub.release(), *route_id);
880  TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
881               "route_id", route_id);
882}
883
884void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
885  TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
886               "route_id", route_id);
887
888  GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
889  if (!stub)
890    return;
891  bool need_reschedule = (stub && !stub->IsScheduled());
892  router_.RemoveRoute(route_id);
893  stubs_.Remove(route_id);
894  // In case the renderer is currently blocked waiting for a sync reply from the
895  // stub, we need to make sure to reschedule the GpuChannel here.
896  if (need_reschedule) {
897    // This stub won't get a chance to reschedule, so update the count now.
898    StubSchedulingChanged(true);
899  }
900}
901
902#if defined(OS_ANDROID)
903void GpuChannel::OnRegisterStreamTextureProxy(
904    int32 stream_id, int32* route_id) {
905  // Note that route_id is only used for notifications sent out from here.
906  // StreamTextureManager owns all texture objects and for incoming messages
907  // it finds the correct object based on stream_id.
908  *route_id = GenerateRouteID();
909  stream_texture_manager_->RegisterStreamTextureProxy(stream_id, *route_id);
910}
911
912void GpuChannel::OnEstablishStreamTexture(
913    int32 stream_id, int32 primary_id, int32 secondary_id) {
914  stream_texture_manager_->EstablishStreamTexture(
915      stream_id, primary_id, secondary_id);
916}
917
918void GpuChannel::OnSetStreamTextureSize(
919    int32 stream_id, const gfx::Size& size) {
920  stream_texture_manager_->SetStreamTextureSize(stream_id, size);
921}
922#endif
923
924void GpuChannel::OnCollectRenderingStatsForSurface(
925    int32 surface_id, GpuRenderingStats* stats) {
926  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
927       !it.IsAtEnd(); it.Advance()) {
928    int texture_upload_count =
929        it.GetCurrentValue()->decoder()->GetTextureUploadCount();
930    base::TimeDelta total_texture_upload_time =
931        it.GetCurrentValue()->decoder()->GetTotalTextureUploadTime();
932    base::TimeDelta total_processing_commands_time =
933        it.GetCurrentValue()->decoder()->GetTotalProcessingCommandsTime();
934
935    stats->global_texture_upload_count += texture_upload_count;
936    stats->global_total_texture_upload_time += total_texture_upload_time;
937    stats->global_total_processing_commands_time +=
938        total_processing_commands_time;
939    if (it.GetCurrentValue()->surface_id() == surface_id) {
940      stats->texture_upload_count += texture_upload_count;
941      stats->total_texture_upload_time += total_texture_upload_time;
942      stats->total_processing_commands_time += total_processing_commands_time;
943    }
944  }
945}
946
947void GpuChannel::MessageProcessed() {
948  messages_processed_++;
949  if (preempting_flag_.get()) {
950    io_message_loop_->PostTask(
951        FROM_HERE,
952        base::Bind(&GpuChannelMessageFilter::MessageProcessed,
953                   filter_,
954                   messages_processed_));
955  }
956}
957
958void GpuChannel::CacheShader(const std::string& key,
959                             const std::string& shader) {
960  gpu_channel_manager_->Send(
961      new GpuHostMsg_CacheShader(client_id_, key, shader));
962}
963
964}  // namespace content
965