1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if defined(OS_WIN)
6#include <windows.h>
7#endif
8
9#include "content/common/gpu/gpu_channel.h"
10
11#include <queue>
12#include <vector>
13
14#include "base/bind.h"
15#include "base/command_line.h"
16#include "base/debug/trace_event.h"
17#include "base/message_loop/message_loop_proxy.h"
18#include "base/stl_util.h"
19#include "base/strings/string_util.h"
20#include "base/timer/timer.h"
21#include "content/common/gpu/devtools_gpu_agent.h"
22#include "content/common/gpu/gpu_channel_manager.h"
23#include "content/common/gpu/gpu_messages.h"
24#include "content/common/gpu/sync_point_manager.h"
25#include "content/public/common/content_switches.h"
26#include "gpu/command_buffer/common/mailbox.h"
27#include "gpu/command_buffer/service/gpu_scheduler.h"
28#include "gpu/command_buffer/service/mailbox_manager.h"
29#include "ipc/ipc_channel.h"
30#include "ipc/message_filter.h"
31#include "ui/gl/gl_context.h"
32#include "ui/gl/gl_surface.h"
33
34#if defined(OS_POSIX)
35#include "ipc/ipc_channel_posix.h"
36#endif
37
38namespace content {
39namespace {
40
41// Number of milliseconds between successive vsync. Many GL commands block
42// on vsync, so thresholds for preemption should be multiples of this.
43const int64 kVsyncIntervalMs = 17;
44
45// Amount of time that we will wait for an IPC to be processed before
46// preempting. After a preemption, we must wait this long before triggering
47// another preemption.
48const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
49
50// Once we trigger a preemption, the maximum duration that we will wait
51// before clearing the preemption.
52const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
53
54// Stop the preemption once the time for the longest pending IPC drops
55// below this threshold.
56const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
57
58}  // anonymous namespace
59
60// This filter does three things:
61// - it counts and timestamps each message forwarded to the channel
62//   so that we can preempt other channels if a message takes too long to
63//   process. To guarantee fairness, we must wait a minimum amount of time
64//   before preempting and we limit the amount of time that we can preempt in
65//   one shot (see constants above).
66// - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
67//   thread, generating the sync point ID and responding immediately, and then
68//   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
69//   into the channel's queue.
70// - it generates mailbox names for clients of the GPU process on the IO thread.
71class GpuChannelMessageFilter : public IPC::MessageFilter {
72 public:
73  GpuChannelMessageFilter(base::WeakPtr<GpuChannel> gpu_channel,
74                          scoped_refptr<SyncPointManager> sync_point_manager,
75                          scoped_refptr<base::MessageLoopProxy> message_loop,
76                          bool future_sync_points)
77      : preemption_state_(IDLE),
78        gpu_channel_(gpu_channel),
79        sender_(NULL),
80        sync_point_manager_(sync_point_manager),
81        message_loop_(message_loop),
82        messages_forwarded_to_channel_(0),
83        a_stub_is_descheduled_(false),
84        future_sync_points_(future_sync_points) {}
85
86  virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE {
87    DCHECK(!sender_);
88    sender_ = sender;
89  }
90
91  virtual void OnFilterRemoved() OVERRIDE {
92    DCHECK(sender_);
93    sender_ = NULL;
94  }
95
96  virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
97    DCHECK(sender_);
98
99    bool handled = false;
100    if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
101        !future_sync_points_) {
102      DLOG(ERROR) << "Untrusted client should not send "
103                     "GpuCommandBufferMsg_RetireSyncPoint message";
104      return true;
105    }
106
107    if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
108      Tuple1<bool> retire;
109      IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
110      if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
111                                                              &retire)) {
112        reply->set_reply_error();
113        Send(reply);
114        return true;
115      }
116      if (!future_sync_points_ && !retire.a) {
117        LOG(ERROR) << "Untrusted contexts can't create future sync points";
118        reply->set_reply_error();
119        Send(reply);
120        return true;
121      }
122      uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
123      GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
124      Send(reply);
125      message_loop_->PostTask(
126          FROM_HERE,
127          base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread,
128                     gpu_channel_,
129                     sync_point_manager_,
130                     message.routing_id(),
131                     retire.a,
132                     sync_point));
133      handled = true;
134    }
135
136    // All other messages get processed by the GpuChannel.
137    messages_forwarded_to_channel_++;
138    if (preempting_flag_.get())
139      pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
140    UpdatePreemptionState();
141
142    return handled;
143  }
144
145  void MessageProcessed(uint64 messages_processed) {
146    while (!pending_messages_.empty() &&
147           pending_messages_.front().message_number <= messages_processed)
148      pending_messages_.pop();
149    UpdatePreemptionState();
150  }
151
152  void SetPreemptingFlagAndSchedulingState(
153      gpu::PreemptionFlag* preempting_flag,
154      bool a_stub_is_descheduled) {
155    preempting_flag_ = preempting_flag;
156    a_stub_is_descheduled_ = a_stub_is_descheduled;
157  }
158
159  void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
160    a_stub_is_descheduled_ = a_stub_is_descheduled;
161    UpdatePreemptionState();
162  }
163
164  bool Send(IPC::Message* message) {
165    return sender_->Send(message);
166  }
167
168 protected:
169  virtual ~GpuChannelMessageFilter() {}
170
171 private:
172  enum PreemptionState {
173    // Either there's no other channel to preempt, there are no messages
174    // pending processing, or we just finished preempting and have to wait
175    // before preempting again.
176    IDLE,
177    // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
178    WAITING,
179    // We can preempt whenever any IPC processing takes more than
180    // kPreemptWaitTimeMs.
181    CHECKING,
182    // We are currently preempting (i.e. no stub is descheduled).
183    PREEMPTING,
184    // We would like to preempt, but some stub is descheduled.
185    WOULD_PREEMPT_DESCHEDULED,
186  };
187
188  PreemptionState preemption_state_;
189
190  // Maximum amount of time that we can spend in PREEMPTING.
191  // It is reset when we transition to IDLE.
192  base::TimeDelta max_preemption_time_;
193
194  struct PendingMessage {
195    uint64 message_number;
196    base::TimeTicks time_received;
197
198    explicit PendingMessage(uint64 message_number)
199        : message_number(message_number),
200          time_received(base::TimeTicks::Now()) {
201    }
202  };
203
204  void UpdatePreemptionState() {
205    switch (preemption_state_) {
206      case IDLE:
207        if (preempting_flag_.get() && !pending_messages_.empty())
208          TransitionToWaiting();
209        break;
210      case WAITING:
211        // A timer will transition us to CHECKING.
212        DCHECK(timer_.IsRunning());
213        break;
214      case CHECKING:
215        if (!pending_messages_.empty()) {
216          base::TimeDelta time_elapsed =
217              base::TimeTicks::Now() - pending_messages_.front().time_received;
218          if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
219            // Schedule another check for when the IPC may go long.
220            timer_.Start(
221                FROM_HERE,
222                base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
223                    time_elapsed,
224                this, &GpuChannelMessageFilter::UpdatePreemptionState);
225          } else {
226            if (a_stub_is_descheduled_)
227              TransitionToWouldPreemptDescheduled();
228            else
229              TransitionToPreempting();
230          }
231        }
232        break;
233      case PREEMPTING:
234        // A TransitionToIdle() timer should always be running in this state.
235        DCHECK(timer_.IsRunning());
236        if (a_stub_is_descheduled_)
237          TransitionToWouldPreemptDescheduled();
238        else
239          TransitionToIdleIfCaughtUp();
240        break;
241      case WOULD_PREEMPT_DESCHEDULED:
242        // A TransitionToIdle() timer should never be running in this state.
243        DCHECK(!timer_.IsRunning());
244        if (!a_stub_is_descheduled_)
245          TransitionToPreempting();
246        else
247          TransitionToIdleIfCaughtUp();
248        break;
249      default:
250        NOTREACHED();
251    }
252  }
253
254  void TransitionToIdleIfCaughtUp() {
255    DCHECK(preemption_state_ == PREEMPTING ||
256           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
257    if (pending_messages_.empty()) {
258      TransitionToIdle();
259    } else {
260      base::TimeDelta time_elapsed =
261          base::TimeTicks::Now() - pending_messages_.front().time_received;
262      if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
263        TransitionToIdle();
264    }
265  }
266
267  void TransitionToIdle() {
268    DCHECK(preemption_state_ == PREEMPTING ||
269           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
270    // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
271    timer_.Stop();
272
273    preemption_state_ = IDLE;
274    preempting_flag_->Reset();
275    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
276
277    UpdatePreemptionState();
278  }
279
280  void TransitionToWaiting() {
281    DCHECK_EQ(preemption_state_, IDLE);
282    DCHECK(!timer_.IsRunning());
283
284    preemption_state_ = WAITING;
285    timer_.Start(
286        FROM_HERE,
287        base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
288        this, &GpuChannelMessageFilter::TransitionToChecking);
289  }
290
291  void TransitionToChecking() {
292    DCHECK_EQ(preemption_state_, WAITING);
293    DCHECK(!timer_.IsRunning());
294
295    preemption_state_ = CHECKING;
296    max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
297    UpdatePreemptionState();
298  }
299
300  void TransitionToPreempting() {
301    DCHECK(preemption_state_ == CHECKING ||
302           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
303    DCHECK(!a_stub_is_descheduled_);
304
305    // Stop any pending state update checks that we may have queued
306    // while CHECKING.
307    if (preemption_state_ == CHECKING)
308      timer_.Stop();
309
310    preemption_state_ = PREEMPTING;
311    preempting_flag_->Set();
312    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
313
314    timer_.Start(
315       FROM_HERE,
316       max_preemption_time_,
317       this, &GpuChannelMessageFilter::TransitionToIdle);
318
319    UpdatePreemptionState();
320  }
321
322  void TransitionToWouldPreemptDescheduled() {
323    DCHECK(preemption_state_ == CHECKING ||
324           preemption_state_ == PREEMPTING);
325    DCHECK(a_stub_is_descheduled_);
326
327    if (preemption_state_ == CHECKING) {
328      // Stop any pending state update checks that we may have queued
329      // while CHECKING.
330      timer_.Stop();
331    } else {
332      // Stop any TransitionToIdle() timers that we may have queued
333      // while PREEMPTING.
334      timer_.Stop();
335      max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
336      if (max_preemption_time_ < base::TimeDelta()) {
337        TransitionToIdle();
338        return;
339      }
340    }
341
342    preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
343    preempting_flag_->Reset();
344    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
345
346    UpdatePreemptionState();
347  }
348
349  static void InsertSyncPointOnMainThread(
350      base::WeakPtr<GpuChannel> gpu_channel,
351      scoped_refptr<SyncPointManager> manager,
352      int32 routing_id,
353      bool retire,
354      uint32 sync_point) {
355    // This function must ensure that the sync point will be retired. Normally
356    // we'll find the stub based on the routing ID, and associate the sync point
357    // with it, but if that fails for any reason (channel or stub already
358    // deleted, invalid routing id), we need to retire the sync point
359    // immediately.
360    if (gpu_channel) {
361      GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
362      if (stub) {
363        stub->AddSyncPoint(sync_point);
364        if (retire) {
365          GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
366          gpu_channel->OnMessageReceived(message);
367        }
368        return;
369      } else {
370        gpu_channel->MessageProcessed();
371      }
372    }
373    manager->RetireSyncPoint(sync_point);
374  }
375
376  // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
377  // passed through - therefore the WeakPtr assumptions are respected.
378  base::WeakPtr<GpuChannel> gpu_channel_;
379  IPC::Sender* sender_;
380  scoped_refptr<SyncPointManager> sync_point_manager_;
381  scoped_refptr<base::MessageLoopProxy> message_loop_;
382  scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
383
384  std::queue<PendingMessage> pending_messages_;
385
386  // Count of the number of IPCs forwarded to the GpuChannel.
387  uint64 messages_forwarded_to_channel_;
388
389  base::OneShotTimer<GpuChannelMessageFilter> timer_;
390
391  bool a_stub_is_descheduled_;
392
393  // True if this channel can create future sync points.
394  bool future_sync_points_;
395};
396
397GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
398                       GpuWatchdog* watchdog,
399                       gfx::GLShareGroup* share_group,
400                       gpu::gles2::MailboxManager* mailbox,
401                       int client_id,
402                       bool software,
403                       bool allow_future_sync_points)
404    : gpu_channel_manager_(gpu_channel_manager),
405      messages_processed_(0),
406      client_id_(client_id),
407      share_group_(share_group ? share_group : new gfx::GLShareGroup),
408      mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
409      watchdog_(watchdog),
410      software_(software),
411      handle_messages_scheduled_(false),
412      currently_processing_message_(NULL),
413      num_stubs_descheduled_(0),
414      allow_future_sync_points_(allow_future_sync_points),
415      weak_factory_(this) {
416  DCHECK(gpu_channel_manager);
417  DCHECK(client_id);
418
419  channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
420  const base::CommandLine* command_line =
421      base::CommandLine::ForCurrentProcess();
422  log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
423}
424
425GpuChannel::~GpuChannel() {
426  STLDeleteElements(&deferred_messages_);
427  if (preempting_flag_.get())
428    preempting_flag_->Reset();
429}
430
431void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
432                      base::WaitableEvent* shutdown_event) {
433  DCHECK(!channel_.get());
434
435  // Map renderer ID to a (single) channel to that process.
436  channel_ = IPC::SyncChannel::Create(channel_id_,
437                                      IPC::Channel::MODE_SERVER,
438                                      this,
439                                      io_message_loop,
440                                      false,
441                                      shutdown_event);
442
443  filter_ =
444      new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(),
445                                  gpu_channel_manager_->sync_point_manager(),
446                                  base::MessageLoopProxy::current(),
447                                  allow_future_sync_points_);
448  io_message_loop_ = io_message_loop;
449  channel_->AddFilter(filter_.get());
450
451  devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
452}
453
454std::string GpuChannel::GetChannelName() {
455  return channel_id_;
456}
457
458#if defined(OS_POSIX)
459int GpuChannel::TakeRendererFileDescriptor() {
460  if (!channel_) {
461    NOTREACHED();
462    return -1;
463  }
464  return channel_->TakeClientFileDescriptor();
465}
466#endif  // defined(OS_POSIX)
467
468bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
469  if (log_messages_) {
470    DVLOG(1) << "received message @" << &message << " on channel @" << this
471             << " with type " << message.type();
472  }
473
474  if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
475      message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
476    // Move Wait commands to the head of the queue, so the renderer
477    // doesn't have to wait any longer than necessary.
478    deferred_messages_.push_front(new IPC::Message(message));
479  } else {
480    deferred_messages_.push_back(new IPC::Message(message));
481  }
482
483  OnScheduled();
484
485  return true;
486}
487
488void GpuChannel::OnChannelError() {
489  gpu_channel_manager_->RemoveChannel(client_id_);
490}
491
492bool GpuChannel::Send(IPC::Message* message) {
493  // The GPU process must never send a synchronous IPC message to the renderer
494  // process. This could result in deadlock.
495  DCHECK(!message->is_sync());
496  if (log_messages_) {
497    DVLOG(1) << "sending message @" << message << " on channel @" << this
498             << " with type " << message->type();
499  }
500
501  if (!channel_) {
502    delete message;
503    return false;
504  }
505
506  return channel_->Send(message);
507}
508
509void GpuChannel::RequeueMessage() {
510  DCHECK(currently_processing_message_);
511  deferred_messages_.push_front(
512      new IPC::Message(*currently_processing_message_));
513  messages_processed_--;
514  currently_processing_message_ = NULL;
515}
516
517void GpuChannel::OnScheduled() {
518  if (handle_messages_scheduled_)
519    return;
520  // Post a task to handle any deferred messages. The deferred message queue is
521  // not emptied here, which ensures that OnMessageReceived will continue to
522  // defer newly received messages until the ones in the queue have all been
523  // handled by HandleMessage. HandleMessage is invoked as a
524  // task to prevent reentrancy.
525  base::MessageLoop::current()->PostTask(
526      FROM_HERE,
527      base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
528  handle_messages_scheduled_ = true;
529}
530
531void GpuChannel::StubSchedulingChanged(bool scheduled) {
532  bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
533  if (scheduled) {
534    num_stubs_descheduled_--;
535    OnScheduled();
536  } else {
537    num_stubs_descheduled_++;
538  }
539  DCHECK_LE(num_stubs_descheduled_, stubs_.size());
540  bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
541
542  if (a_stub_is_descheduled != a_stub_was_descheduled) {
543    if (preempting_flag_.get()) {
544      io_message_loop_->PostTask(
545          FROM_HERE,
546          base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
547                     filter_,
548                     a_stub_is_descheduled));
549    }
550  }
551}
552
553CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
554    const gfx::GLSurfaceHandle& window,
555    int32 surface_id,
556    const GPUCreateCommandBufferConfig& init_params,
557    int32 route_id) {
558  TRACE_EVENT1("gpu",
559               "GpuChannel::CreateViewCommandBuffer",
560               "surface_id",
561               surface_id);
562
563  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
564
565  // Virtualize compositor contexts on OS X to prevent performance regressions
566  // when enabling FCM.
567  // http://crbug.com/180463
568  bool use_virtualized_gl_context = false;
569#if defined(OS_MACOSX)
570  use_virtualized_gl_context = true;
571#endif
572
573  scoped_ptr<GpuCommandBufferStub> stub(
574      new GpuCommandBufferStub(this,
575                               share_group,
576                               window,
577                               mailbox_manager_.get(),
578                               gfx::Size(),
579                               disallowed_features_,
580                               init_params.attribs,
581                               init_params.gpu_preference,
582                               use_virtualized_gl_context,
583                               route_id,
584                               surface_id,
585                               watchdog_,
586                               software_,
587                               init_params.active_url));
588  if (preempted_flag_.get())
589    stub->SetPreemptByFlag(preempted_flag_);
590  if (!router_.AddRoute(route_id, stub.get())) {
591    DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
592                   "failed to add route";
593    return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
594  }
595  stubs_.AddWithID(stub.release(), route_id);
596  return CREATE_COMMAND_BUFFER_SUCCEEDED;
597}
598
599GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
600  return stubs_.Lookup(route_id);
601}
602
603void GpuChannel::LoseAllContexts() {
604  gpu_channel_manager_->LoseAllContexts();
605}
606
607void GpuChannel::MarkAllContextsLost() {
608  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
609       !it.IsAtEnd(); it.Advance()) {
610    it.GetCurrentValue()->MarkContextLost();
611  }
612}
613
614bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
615  return router_.AddRoute(route_id, listener);
616}
617
618void GpuChannel::RemoveRoute(int32 route_id) {
619  router_.RemoveRoute(route_id);
620}
621
622gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
623  if (!preempting_flag_.get()) {
624    preempting_flag_ = new gpu::PreemptionFlag;
625    io_message_loop_->PostTask(
626        FROM_HERE, base::Bind(
627            &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
628            filter_, preempting_flag_, num_stubs_descheduled_ > 0));
629  }
630  return preempting_flag_.get();
631}
632
633void GpuChannel::SetPreemptByFlag(
634    scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
635  preempted_flag_ = preempted_flag;
636
637  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
638       !it.IsAtEnd(); it.Advance()) {
639    it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
640  }
641}
642
643void GpuChannel::OnDestroy() {
644  TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
645  gpu_channel_manager_->RemoveChannel(client_id_);
646}
647
648bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
649  bool handled = true;
650  IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
651    IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
652                        OnCreateOffscreenCommandBuffer)
653    IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
654                        OnDestroyCommandBuffer)
655    IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
656                        OnDevToolsStartEventsRecording)
657    IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
658                        OnDevToolsStopEventsRecording)
659    IPC_MESSAGE_UNHANDLED(handled = false)
660  IPC_END_MESSAGE_MAP()
661  DCHECK(handled) << msg.type();
662  return handled;
663}
664
665size_t GpuChannel::MatchSwapBufferMessagesPattern(
666    IPC::Message* current_message) {
667  DCHECK(current_message);
668  if (deferred_messages_.empty() || !current_message)
669    return 0;
670  // Only care about AsyncFlush message.
671  if (current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID)
672    return 0;
673
674  size_t index = 0;
675  int32 routing_id = current_message->routing_id();
676
677  // Fetch the first message and move index to point to the second message.
678  IPC::Message* first_message = deferred_messages_[index++];
679
680  // If the current message is AsyncFlush, the expected message sequence for
681  // SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message.
682  if (current_message->type() == GpuCommandBufferMsg_AsyncFlush::ID &&
683      first_message->type() == GpuCommandBufferMsg_Echo::ID &&
684      first_message->routing_id() == routing_id) {
685    return 1;
686  }
687
688  // No matched message is found.
689  return 0;
690}
691
692void GpuChannel::HandleMessage() {
693  handle_messages_scheduled_ = false;
694  if (deferred_messages_.empty())
695    return;
696
697  size_t matched_messages_num = 0;
698  bool should_handle_swapbuffer_msgs_immediate = false;
699  IPC::Message* m = NULL;
700  GpuCommandBufferStub* stub = NULL;
701
702  do {
703    m = deferred_messages_.front();
704    stub = stubs_.Lookup(m->routing_id());
705    if (stub) {
706      if (!stub->IsScheduled())
707        return;
708      if (stub->IsPreempted()) {
709        OnScheduled();
710        return;
711      }
712    }
713
714    scoped_ptr<IPC::Message> message(m);
715    deferred_messages_.pop_front();
716    bool message_processed = true;
717
718    currently_processing_message_ = message.get();
719    bool result;
720    if (message->routing_id() == MSG_ROUTING_CONTROL)
721      result = OnControlMessageReceived(*message);
722    else
723      result = router_.RouteMessage(*message);
724    currently_processing_message_ = NULL;
725
726    if (!result) {
727      // Respond to sync messages even if router failed to route.
728      if (message->is_sync()) {
729        IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
730        reply->set_reply_error();
731        Send(reply);
732      }
733    } else {
734      // If the command buffer becomes unscheduled as a result of handling the
735      // message but still has more commands to process, synthesize an IPC
736      // message to flush that command buffer.
737      if (stub) {
738        if (stub->HasUnprocessedCommands()) {
739          deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
740              stub->route_id()));
741          message_processed = false;
742        }
743      }
744    }
745    if (message_processed)
746      MessageProcessed();
747
748    if (deferred_messages_.empty())
749      break;
750
751    // We process the pending messages immediately if these messages matches
752    // the pattern of SwapBuffers, for example, GLRenderer always issues
753    // SwapBuffers calls with a specific IPC message patterns, for example,
754    // it should be AsyncFlush->Echo sequence.
755    //
756    // Instead of posting a task to message loop, it could avoid the possibility
757    // of being blocked by other channels, and make SwapBuffers executed as soon
758    // as possible.
759    if (!should_handle_swapbuffer_msgs_immediate) {
760      // Start from the current processing message to match SwapBuffer pattern.
761      matched_messages_num = MatchSwapBufferMessagesPattern(message.get());
762      should_handle_swapbuffer_msgs_immediate =
763          matched_messages_num > 0 && stub;
764    } else {
765      DCHECK_GT(matched_messages_num, 0u);
766      --matched_messages_num;
767      if (!stub || matched_messages_num == 0)
768        should_handle_swapbuffer_msgs_immediate = false;
769    }
770  } while (should_handle_swapbuffer_msgs_immediate);
771
772  if (!deferred_messages_.empty()) {
773    OnScheduled();
774  }
775}
776
777void GpuChannel::OnCreateOffscreenCommandBuffer(
778    const gfx::Size& size,
779    const GPUCreateCommandBufferConfig& init_params,
780    int32 route_id,
781    bool* succeeded) {
782  TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
783  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
784
785  scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
786      this,
787      share_group,
788      gfx::GLSurfaceHandle(),
789      mailbox_manager_.get(),
790      size,
791      disallowed_features_,
792      init_params.attribs,
793      init_params.gpu_preference,
794      false,
795      route_id,
796      0,
797      watchdog_,
798      software_,
799      init_params.active_url));
800  if (preempted_flag_.get())
801    stub->SetPreemptByFlag(preempted_flag_);
802  if (!router_.AddRoute(route_id, stub.get())) {
803    DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
804                   "failed to add route";
805    *succeeded = false;
806    return;
807  }
808  stubs_.AddWithID(stub.release(), route_id);
809  TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
810               "route_id", route_id);
811  *succeeded = true;
812}
813
814void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
815  TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
816               "route_id", route_id);
817
818  GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
819  if (!stub)
820    return;
821  bool need_reschedule = (stub && !stub->IsScheduled());
822  router_.RemoveRoute(route_id);
823  stubs_.Remove(route_id);
824  // In case the renderer is currently blocked waiting for a sync reply from the
825  // stub, we need to make sure to reschedule the GpuChannel here.
826  if (need_reschedule) {
827    // This stub won't get a chance to reschedule, so update the count now.
828    StubSchedulingChanged(true);
829  }
830}
831
832void GpuChannel::OnDevToolsStartEventsRecording(int32 route_id,
833                                                bool* succeeded) {
834  *succeeded = devtools_gpu_agent_->StartEventsRecording(route_id);
835}
836
837void GpuChannel::OnDevToolsStopEventsRecording() {
838  devtools_gpu_agent_->StopEventsRecording();
839}
840
841void GpuChannel::MessageProcessed() {
842  messages_processed_++;
843  if (preempting_flag_.get()) {
844    io_message_loop_->PostTask(
845        FROM_HERE,
846        base::Bind(&GpuChannelMessageFilter::MessageProcessed,
847                   filter_,
848                   messages_processed_));
849  }
850}
851
852void GpuChannel::CacheShader(const std::string& key,
853                             const std::string& shader) {
854  gpu_channel_manager_->Send(
855      new GpuHostMsg_CacheShader(client_id_, key, shader));
856}
857
858void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
859  channel_->AddFilter(filter);
860}
861
862void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
863  channel_->RemoveFilter(filter);
864}
865
866uint64 GpuChannel::GetMemoryUsage() {
867  uint64 size = 0;
868  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
869       !it.IsAtEnd(); it.Advance()) {
870    size += it.GetCurrentValue()->GetMemoryUsage();
871  }
872  return size;
873}
874
875}  // namespace content
876