1ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// Use of this source code is governed by a BSD-style license that can be
3ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// found in the LICENSE file.
4ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
5ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#if defined(OS_WIN)
6ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include <windows.h>
7ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#endif
8ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
9ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "content/common/gpu/gpu_channel.h"
10ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
11ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include <queue>
12ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include <vector>
13ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
14ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "base/bind.h"
15ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "base/command_line.h"
16ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "base/debug/trace_event.h"
17ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "base/message_loop/message_loop_proxy.h"
18ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "base/stl_util.h"
19ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "base/strings/string_util.h"
20ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "base/timer/timer.h"
21ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "content/common/gpu/devtools_gpu_agent.h"
22ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "content/common/gpu/gpu_channel_manager.h"
23ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "content/common/gpu/gpu_messages.h"
24ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "content/common/gpu/sync_point_manager.h"
25ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "content/public/common/content_switches.h"
26ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "gpu/command_buffer/common/mailbox.h"
27ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "gpu/command_buffer/service/gpu_scheduler.h"
28ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "gpu/command_buffer/service/mailbox_manager.h"
29ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "ipc/ipc_channel.h"
30ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "ipc/message_filter.h"
31ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "ui/gl/gl_context.h"
32ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "ui/gl/gl_surface.h"
33ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
34ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#if defined(OS_POSIX)
35ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#include "ipc/ipc_channel_posix.h"
36ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov#endif
37ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
38ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganovnamespace content {
39ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganovnamespace {
40ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
41ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// Number of milliseconds between successive vsync. Many GL commands block
42ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// on vsync, so thresholds for preemption should be multiples of this.
43ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganovconst int64 kVsyncIntervalMs = 17;
44ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
45ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// Amount of time that we will wait for an IPC to be processed before
46ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// preempting. After a preemption, we must wait this long before triggering
47ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// another preemption.
48ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganovconst int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
49ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
50ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// Once we trigger a preemption, the maximum duration that we will wait
51ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// before clearing the preemption.
52ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganovconst int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
53ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
54ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// Stop the preemption once the time for the longest pending IPC drops
55ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// below this threshold.
56ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganovconst int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
57ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
58ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov}  // anonymous namespace
59ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
60ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// This filter does three things:
61ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// - it counts and timestamps each message forwarded to the channel
62ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov//   so that we can preempt other channels if a message takes too long to
63ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov//   process. To guarantee fairness, we must wait a minimum amount of time
64ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov//   before preempting and we limit the amount of time that we can preempt in
65ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov//   one shot (see constants above).
66ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
67ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov//   thread, generating the sync point ID and responding immediately, and then
68ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov//   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
69ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov//   into the channel's queue.
70ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov// - it generates mailbox names for clients of the GPU process on the IO thread.
71ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganovclass GpuChannelMessageFilter : public IPC::MessageFilter {
72ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov public:
73ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  GpuChannelMessageFilter(base::WeakPtr<GpuChannel> gpu_channel,
74ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                          scoped_refptr<SyncPointManager> sync_point_manager,
75ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                          scoped_refptr<base::MessageLoopProxy> message_loop,
76ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                          bool future_sync_points)
77ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      : preemption_state_(IDLE),
78ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        gpu_channel_(gpu_channel),
79ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        sender_(NULL),
80ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        sync_point_manager_(sync_point_manager),
81ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        message_loop_(message_loop),
82ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        messages_forwarded_to_channel_(0),
83ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        a_stub_is_descheduled_(false),
84ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        future_sync_points_(future_sync_points) {}
85ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
86ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE {
87ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(!sender_);
88ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    sender_ = sender;
89ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
90ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
91ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  virtual void OnFilterRemoved() OVERRIDE {
92ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(sender_);
93ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    sender_ = NULL;
94ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
95ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
96ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
97ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(sender_);
98ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
99ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    bool handled = false;
100ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
101ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        !future_sync_points_) {
102ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      DLOG(ERROR) << "Untrusted client should not send "
103ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                     "GpuCommandBufferMsg_RetireSyncPoint message";
104ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      return true;
105ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    }
106ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
107ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
108ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      Tuple1<bool> retire;
109ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
110ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
111ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                                                              &retire)) {
112ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        reply->set_reply_error();
113ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        Send(reply);
114ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        return true;
115ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      }
116ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      if (!future_sync_points_ && !retire.a) {
117ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        LOG(ERROR) << "Untrusted contexts can't create future sync points";
118ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        reply->set_reply_error();
119ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        Send(reply);
120ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        return true;
121ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      }
122ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
123ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
124ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      Send(reply);
125ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      message_loop_->PostTask(
126ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          FROM_HERE,
127ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread,
128ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                     gpu_channel_,
129ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                     sync_point_manager_,
130ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                     message.routing_id(),
131ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                     retire.a,
132ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                     sync_point));
133ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      handled = true;
134ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    }
135ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
136ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // All other messages get processed by the GpuChannel.
137ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    messages_forwarded_to_channel_++;
138ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    if (preempting_flag_.get())
139ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
140ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    UpdatePreemptionState();
141ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
142ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    return handled;
143ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
144ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
145ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void MessageProcessed(uint64 messages_processed) {
146ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    while (!pending_messages_.empty() &&
147ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov           pending_messages_.front().message_number <= messages_processed)
148ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      pending_messages_.pop();
149ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    UpdatePreemptionState();
150ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
151ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
152ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void SetPreemptingFlagAndSchedulingState(
153ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      gpu::PreemptionFlag* preempting_flag,
154ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      bool a_stub_is_descheduled) {
155ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preempting_flag_ = preempting_flag;
156ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    a_stub_is_descheduled_ = a_stub_is_descheduled;
157ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
158ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
159ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
160ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    a_stub_is_descheduled_ = a_stub_is_descheduled;
161ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    UpdatePreemptionState();
162ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
163ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
164ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  bool Send(IPC::Message* message) {
165ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    return sender_->Send(message);
166ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
167ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
168ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov protected:
169ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  virtual ~GpuChannelMessageFilter() {}
170ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
171ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov private:
172ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  enum PreemptionState {
173ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // Either there's no other channel to preempt, there are no messages
174ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // pending processing, or we just finished preempting and have to wait
175ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // before preempting again.
176ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    IDLE,
177ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
178ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    WAITING,
179ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // We can preempt whenever any IPC processing takes more than
180ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // kPreemptWaitTimeMs.
181ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    CHECKING,
182ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // We are currently preempting (i.e. no stub is descheduled).
183ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    PREEMPTING,
184ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // We would like to preempt, but some stub is descheduled.
185ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    WOULD_PREEMPT_DESCHEDULED,
186ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  };
187ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
188ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  PreemptionState preemption_state_;
189ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
190ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  // Maximum amount of time that we can spend in PREEMPTING.
191ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  // It is reset when we transition to IDLE.
192ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  base::TimeDelta max_preemption_time_;
193ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
194ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  struct PendingMessage {
195ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    uint64 message_number;
196ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    base::TimeTicks time_received;
197ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
198ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    explicit PendingMessage(uint64 message_number)
199ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        : message_number(message_number),
200ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          time_received(base::TimeTicks::Now()) {
201ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    }
202ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  };
203ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
204ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void UpdatePreemptionState() {
205ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    switch (preemption_state_) {
206ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      case IDLE:
207ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        if (preempting_flag_.get() && !pending_messages_.empty())
208ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          TransitionToWaiting();
209ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        break;
210ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      case WAITING:
211ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        // A timer will transition us to CHECKING.
212ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        DCHECK(timer_.IsRunning());
213ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        break;
214ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      case CHECKING:
215ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        if (!pending_messages_.empty()) {
216ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          base::TimeDelta time_elapsed =
217ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov              base::TimeTicks::Now() - pending_messages_.front().time_received;
218ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
219ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov            // Schedule another check for when the IPC may go long.
220ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov            timer_.Start(
221ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                FROM_HERE,
222ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
223ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                    time_elapsed,
224ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov                this, &GpuChannelMessageFilter::UpdatePreemptionState);
225ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          } else {
226ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov            if (a_stub_is_descheduled_)
227ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov              TransitionToWouldPreemptDescheduled();
228ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov            else
229ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov              TransitionToPreempting();
230ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          }
231ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        }
232ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        break;
233ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      case PREEMPTING:
234ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        // A TransitionToIdle() timer should always be running in this state.
235ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        DCHECK(timer_.IsRunning());
236ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        if (a_stub_is_descheduled_)
237ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          TransitionToWouldPreemptDescheduled();
238ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        else
239ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          TransitionToIdleIfCaughtUp();
240ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        break;
241ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      case WOULD_PREEMPT_DESCHEDULED:
242ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        // A TransitionToIdle() timer should never be running in this state.
243ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        DCHECK(!timer_.IsRunning());
244ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        if (!a_stub_is_descheduled_)
245ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          TransitionToPreempting();
246ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        else
247ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          TransitionToIdleIfCaughtUp();
248ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        break;
249ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      default:
250ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        NOTREACHED();
251ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    }
252ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
253ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
254ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void TransitionToIdleIfCaughtUp() {
255ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(preemption_state_ == PREEMPTING ||
256ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
257ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    if (pending_messages_.empty()) {
258ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      TransitionToIdle();
259ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    } else {
260ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      base::TimeDelta time_elapsed =
261ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          base::TimeTicks::Now() - pending_messages_.front().time_received;
262ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
263ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        TransitionToIdle();
264ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    }
265ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
266ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
267ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void TransitionToIdle() {
268ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(preemption_state_ == PREEMPTING ||
269ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
270ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
271ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    timer_.Stop();
272ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
273ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preemption_state_ = IDLE;
274ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preempting_flag_->Reset();
275ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
276ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
277ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    UpdatePreemptionState();
278ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
279ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
280ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void TransitionToWaiting() {
281ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK_EQ(preemption_state_, IDLE);
282ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(!timer_.IsRunning());
283ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
284ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preemption_state_ = WAITING;
285ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    timer_.Start(
286ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        FROM_HERE,
287ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
288ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        this, &GpuChannelMessageFilter::TransitionToChecking);
289ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
290ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
291ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void TransitionToChecking() {
292ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK_EQ(preemption_state_, WAITING);
293ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(!timer_.IsRunning());
294ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
295ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preemption_state_ = CHECKING;
296ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
297ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    UpdatePreemptionState();
298ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
299ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
300ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void TransitionToPreempting() {
301ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(preemption_state_ == CHECKING ||
302ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
303ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(!a_stub_is_descheduled_);
304ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
305ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // Stop any pending state update checks that we may have queued
306ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // while CHECKING.
307ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    if (preemption_state_ == CHECKING)
308ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      timer_.Stop();
309ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
310ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preemption_state_ = PREEMPTING;
311ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preempting_flag_->Set();
312ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
313ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
314ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    timer_.Start(
315ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov       FROM_HERE,
316ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov       max_preemption_time_,
317ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov       this, &GpuChannelMessageFilter::TransitionToIdle);
318ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
319ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    UpdatePreemptionState();
320ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
321ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
322ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  void TransitionToWouldPreemptDescheduled() {
323ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(preemption_state_ == CHECKING ||
324ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov           preemption_state_ == PREEMPTING);
325ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    DCHECK(a_stub_is_descheduled_);
326ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
327ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    if (preemption_state_ == CHECKING) {
328ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      // Stop any pending state update checks that we may have queued
329ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      // while CHECKING.
330ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      timer_.Stop();
331ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    } else {
332ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      // Stop any TransitionToIdle() timers that we may have queued
333ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      // while PREEMPTING.
334ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      timer_.Stop();
335ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
336ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      if (max_preemption_time_ < base::TimeDelta()) {
337ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        TransitionToIdle();
338ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        return;
339ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      }
340ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    }
341ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
342ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
343ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    preempting_flag_->Reset();
344ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
345ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
346ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    UpdatePreemptionState();
347ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
348ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov
349ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  static void InsertSyncPointOnMainThread(
350ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      base::WeakPtr<GpuChannel> gpu_channel,
351ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      scoped_refptr<SyncPointManager> manager,
352ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      int32 routing_id,
353ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      bool retire,
354ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      uint32 sync_point) {
355ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // This function must ensure that the sync point will be retired. Normally
356ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // we'll find the stub based on the routing ID, and associate the sync point
357ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // with it, but if that fails for any reason (channel or stub already
358ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // deleted, invalid routing id), we need to retire the sync point
359ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    // immediately.
360ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    if (gpu_channel) {
361ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
362ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      if (stub) {
363ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        stub->AddSyncPoint(sync_point);
364ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        if (retire) {
365ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
366ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov          gpu_channel->OnMessageReceived(message);
367ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        }
368ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        return;
369ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      } else {
370ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov        gpu_channel->MessageProcessed();
371ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov      }
372ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    }
373ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov    manager->RetireSyncPoint(sync_point);
374ee451cb395940862dad63c85adfe8f2fd55e864cSvet Ganov  }
375
376  // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
377  // passed through - therefore the WeakPtr assumptions are respected.
378  base::WeakPtr<GpuChannel> gpu_channel_;
379  IPC::Sender* sender_;
380  scoped_refptr<SyncPointManager> sync_point_manager_;
381  scoped_refptr<base::MessageLoopProxy> message_loop_;
382  scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
383
384  std::queue<PendingMessage> pending_messages_;
385
386  // Count of the number of IPCs forwarded to the GpuChannel.
387  uint64 messages_forwarded_to_channel_;
388
389  base::OneShotTimer<GpuChannelMessageFilter> timer_;
390
391  bool a_stub_is_descheduled_;
392
393  // True if this channel can create future sync points.
394  bool future_sync_points_;
395};
396
397GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
398                       GpuWatchdog* watchdog,
399                       gfx::GLShareGroup* share_group,
400                       gpu::gles2::MailboxManager* mailbox,
401                       int client_id,
402                       bool software,
403                       bool allow_future_sync_points)
404    : gpu_channel_manager_(gpu_channel_manager),
405      messages_processed_(0),
406      client_id_(client_id),
407      share_group_(share_group ? share_group : new gfx::GLShareGroup),
408      mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
409      watchdog_(watchdog),
410      software_(software),
411      handle_messages_scheduled_(false),
412      currently_processing_message_(NULL),
413      num_stubs_descheduled_(0),
414      allow_future_sync_points_(allow_future_sync_points),
415      weak_factory_(this) {
416  DCHECK(gpu_channel_manager);
417  DCHECK(client_id);
418
419  channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
420  const base::CommandLine* command_line =
421      base::CommandLine::ForCurrentProcess();
422  log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
423}
424
425GpuChannel::~GpuChannel() {
426  STLDeleteElements(&deferred_messages_);
427  if (preempting_flag_.get())
428    preempting_flag_->Reset();
429}
430
431void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
432                      base::WaitableEvent* shutdown_event) {
433  DCHECK(!channel_.get());
434
435  // Map renderer ID to a (single) channel to that process.
436  channel_ = IPC::SyncChannel::Create(channel_id_,
437                                      IPC::Channel::MODE_SERVER,
438                                      this,
439                                      io_message_loop,
440                                      false,
441                                      shutdown_event);
442
443  filter_ =
444      new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(),
445                                  gpu_channel_manager_->sync_point_manager(),
446                                  base::MessageLoopProxy::current(),
447                                  allow_future_sync_points_);
448  io_message_loop_ = io_message_loop;
449  channel_->AddFilter(filter_.get());
450
451  devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
452}
453
454std::string GpuChannel::GetChannelName() {
455  return channel_id_;
456}
457
458#if defined(OS_POSIX)
459int GpuChannel::TakeRendererFileDescriptor() {
460  if (!channel_) {
461    NOTREACHED();
462    return -1;
463  }
464  return channel_->TakeClientFileDescriptor();
465}
466#endif  // defined(OS_POSIX)
467
468bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
469  if (log_messages_) {
470    DVLOG(1) << "received message @" << &message << " on channel @" << this
471             << " with type " << message.type();
472  }
473
474  if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
475      message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
476    // Move Wait commands to the head of the queue, so the renderer
477    // doesn't have to wait any longer than necessary.
478    deferred_messages_.push_front(new IPC::Message(message));
479  } else {
480    deferred_messages_.push_back(new IPC::Message(message));
481  }
482
483  OnScheduled();
484
485  return true;
486}
487
488void GpuChannel::OnChannelError() {
489  gpu_channel_manager_->RemoveChannel(client_id_);
490}
491
492bool GpuChannel::Send(IPC::Message* message) {
493  // The GPU process must never send a synchronous IPC message to the renderer
494  // process. This could result in deadlock.
495  DCHECK(!message->is_sync());
496  if (log_messages_) {
497    DVLOG(1) << "sending message @" << message << " on channel @" << this
498             << " with type " << message->type();
499  }
500
501  if (!channel_) {
502    delete message;
503    return false;
504  }
505
506  return channel_->Send(message);
507}
508
509void GpuChannel::RequeueMessage() {
510  DCHECK(currently_processing_message_);
511  deferred_messages_.push_front(
512      new IPC::Message(*currently_processing_message_));
513  messages_processed_--;
514  currently_processing_message_ = NULL;
515}
516
517void GpuChannel::OnScheduled() {
518  if (handle_messages_scheduled_)
519    return;
520  // Post a task to handle any deferred messages. The deferred message queue is
521  // not emptied here, which ensures that OnMessageReceived will continue to
522  // defer newly received messages until the ones in the queue have all been
523  // handled by HandleMessage. HandleMessage is invoked as a
524  // task to prevent reentrancy.
525  base::MessageLoop::current()->PostTask(
526      FROM_HERE,
527      base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
528  handle_messages_scheduled_ = true;
529}
530
531void GpuChannel::StubSchedulingChanged(bool scheduled) {
532  bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
533  if (scheduled) {
534    num_stubs_descheduled_--;
535    OnScheduled();
536  } else {
537    num_stubs_descheduled_++;
538  }
539  DCHECK_LE(num_stubs_descheduled_, stubs_.size());
540  bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
541
542  if (a_stub_is_descheduled != a_stub_was_descheduled) {
543    if (preempting_flag_.get()) {
544      io_message_loop_->PostTask(
545          FROM_HERE,
546          base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
547                     filter_,
548                     a_stub_is_descheduled));
549    }
550  }
551}
552
553CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
554    const gfx::GLSurfaceHandle& window,
555    int32 surface_id,
556    const GPUCreateCommandBufferConfig& init_params,
557    int32 route_id) {
558  TRACE_EVENT1("gpu",
559               "GpuChannel::CreateViewCommandBuffer",
560               "surface_id",
561               surface_id);
562
563  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
564
565  // Virtualize compositor contexts on OS X to prevent performance regressions
566  // when enabling FCM.
567  // http://crbug.com/180463
568  bool use_virtualized_gl_context = false;
569#if defined(OS_MACOSX)
570  use_virtualized_gl_context = true;
571#endif
572
573  scoped_ptr<GpuCommandBufferStub> stub(
574      new GpuCommandBufferStub(this,
575                               share_group,
576                               window,
577                               mailbox_manager_.get(),
578                               gfx::Size(),
579                               disallowed_features_,
580                               init_params.attribs,
581                               init_params.gpu_preference,
582                               use_virtualized_gl_context,
583                               route_id,
584                               surface_id,
585                               watchdog_,
586                               software_,
587                               init_params.active_url));
588  if (preempted_flag_.get())
589    stub->SetPreemptByFlag(preempted_flag_);
590  if (!router_.AddRoute(route_id, stub.get())) {
591    DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
592                   "failed to add route";
593    return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
594  }
595  stubs_.AddWithID(stub.release(), route_id);
596  return CREATE_COMMAND_BUFFER_SUCCEEDED;
597}
598
599GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
600  return stubs_.Lookup(route_id);
601}
602
603void GpuChannel::LoseAllContexts() {
604  gpu_channel_manager_->LoseAllContexts();
605}
606
607void GpuChannel::MarkAllContextsLost() {
608  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
609       !it.IsAtEnd(); it.Advance()) {
610    it.GetCurrentValue()->MarkContextLost();
611  }
612}
613
614bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
615  return router_.AddRoute(route_id, listener);
616}
617
618void GpuChannel::RemoveRoute(int32 route_id) {
619  router_.RemoveRoute(route_id);
620}
621
622gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
623  if (!preempting_flag_.get()) {
624    preempting_flag_ = new gpu::PreemptionFlag;
625    io_message_loop_->PostTask(
626        FROM_HERE, base::Bind(
627            &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
628            filter_, preempting_flag_, num_stubs_descheduled_ > 0));
629  }
630  return preempting_flag_.get();
631}
632
633void GpuChannel::SetPreemptByFlag(
634    scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
635  preempted_flag_ = preempted_flag;
636
637  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
638       !it.IsAtEnd(); it.Advance()) {
639    it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
640  }
641}
642
643void GpuChannel::OnDestroy() {
644  TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
645  gpu_channel_manager_->RemoveChannel(client_id_);
646}
647
648bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
649  bool handled = true;
650  IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
651    IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
652                        OnCreateOffscreenCommandBuffer)
653    IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
654                        OnDestroyCommandBuffer)
655    IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
656                        OnDevToolsStartEventsRecording)
657    IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
658                        OnDevToolsStopEventsRecording)
659    IPC_MESSAGE_UNHANDLED(handled = false)
660  IPC_END_MESSAGE_MAP()
661  DCHECK(handled) << msg.type();
662  return handled;
663}
664
665size_t GpuChannel::MatchSwapBufferMessagesPattern(
666    IPC::Message* current_message) {
667  DCHECK(current_message);
668  if (deferred_messages_.empty() || !current_message)
669    return 0;
670  // Only care about AsyncFlush message.
671  if (current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID)
672    return 0;
673
674  size_t index = 0;
675  int32 routing_id = current_message->routing_id();
676
677  // Fetch the first message and move index to point to the second message.
678  IPC::Message* first_message = deferred_messages_[index++];
679
680  // If the current message is AsyncFlush, the expected message sequence for
681  // SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message.
682  if (current_message->type() == GpuCommandBufferMsg_AsyncFlush::ID &&
683      first_message->type() == GpuCommandBufferMsg_Echo::ID &&
684      first_message->routing_id() == routing_id) {
685    return 1;
686  }
687
688  // No matched message is found.
689  return 0;
690}
691
692void GpuChannel::HandleMessage() {
693  handle_messages_scheduled_ = false;
694  if (deferred_messages_.empty())
695    return;
696
697  size_t matched_messages_num = 0;
698  bool should_handle_swapbuffer_msgs_immediate = false;
699  IPC::Message* m = NULL;
700  GpuCommandBufferStub* stub = NULL;
701
702  do {
703    m = deferred_messages_.front();
704    stub = stubs_.Lookup(m->routing_id());
705    if (stub) {
706      if (!stub->IsScheduled())
707        return;
708      if (stub->IsPreempted()) {
709        OnScheduled();
710        return;
711      }
712    }
713
714    scoped_ptr<IPC::Message> message(m);
715    deferred_messages_.pop_front();
716    bool message_processed = true;
717
718    currently_processing_message_ = message.get();
719    bool result;
720    if (message->routing_id() == MSG_ROUTING_CONTROL)
721      result = OnControlMessageReceived(*message);
722    else
723      result = router_.RouteMessage(*message);
724    currently_processing_message_ = NULL;
725
726    if (!result) {
727      // Respond to sync messages even if router failed to route.
728      if (message->is_sync()) {
729        IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
730        reply->set_reply_error();
731        Send(reply);
732      }
733    } else {
734      // If the command buffer becomes unscheduled as a result of handling the
735      // message but still has more commands to process, synthesize an IPC
736      // message to flush that command buffer.
737      if (stub) {
738        if (stub->HasUnprocessedCommands()) {
739          deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
740              stub->route_id()));
741          message_processed = false;
742        }
743      }
744    }
745    if (message_processed)
746      MessageProcessed();
747
748    if (deferred_messages_.empty())
749      break;
750
751    // We process the pending messages immediately if these messages matches
752    // the pattern of SwapBuffers, for example, GLRenderer always issues
753    // SwapBuffers calls with a specific IPC message patterns, for example,
754    // it should be AsyncFlush->Echo sequence.
755    //
756    // Instead of posting a task to message loop, it could avoid the possibility
757    // of being blocked by other channels, and make SwapBuffers executed as soon
758    // as possible.
759    if (!should_handle_swapbuffer_msgs_immediate) {
760      // Start from the current processing message to match SwapBuffer pattern.
761      matched_messages_num = MatchSwapBufferMessagesPattern(message.get());
762      should_handle_swapbuffer_msgs_immediate =
763          matched_messages_num > 0 && stub;
764    } else {
765      DCHECK_GT(matched_messages_num, 0u);
766      --matched_messages_num;
767      if (!stub || matched_messages_num == 0)
768        should_handle_swapbuffer_msgs_immediate = false;
769    }
770  } while (should_handle_swapbuffer_msgs_immediate);
771
772  if (!deferred_messages_.empty()) {
773    OnScheduled();
774  }
775}
776
777void GpuChannel::OnCreateOffscreenCommandBuffer(
778    const gfx::Size& size,
779    const GPUCreateCommandBufferConfig& init_params,
780    int32 route_id,
781    bool* succeeded) {
782  TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
783  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
784
785  scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
786      this,
787      share_group,
788      gfx::GLSurfaceHandle(),
789      mailbox_manager_.get(),
790      size,
791      disallowed_features_,
792      init_params.attribs,
793      init_params.gpu_preference,
794      false,
795      route_id,
796      0,
797      watchdog_,
798      software_,
799      init_params.active_url));
800  if (preempted_flag_.get())
801    stub->SetPreemptByFlag(preempted_flag_);
802  if (!router_.AddRoute(route_id, stub.get())) {
803    DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
804                   "failed to add route";
805    *succeeded = false;
806    return;
807  }
808  stubs_.AddWithID(stub.release(), route_id);
809  TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
810               "route_id", route_id);
811  *succeeded = true;
812}
813
814void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
815  TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
816               "route_id", route_id);
817
818  GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
819  if (!stub)
820    return;
821  bool need_reschedule = (stub && !stub->IsScheduled());
822  router_.RemoveRoute(route_id);
823  stubs_.Remove(route_id);
824  // In case the renderer is currently blocked waiting for a sync reply from the
825  // stub, we need to make sure to reschedule the GpuChannel here.
826  if (need_reschedule) {
827    // This stub won't get a chance to reschedule, so update the count now.
828    StubSchedulingChanged(true);
829  }
830}
831
832void GpuChannel::OnDevToolsStartEventsRecording(int32 route_id,
833                                                bool* succeeded) {
834  *succeeded = devtools_gpu_agent_->StartEventsRecording(route_id);
835}
836
837void GpuChannel::OnDevToolsStopEventsRecording() {
838  devtools_gpu_agent_->StopEventsRecording();
839}
840
841void GpuChannel::MessageProcessed() {
842  messages_processed_++;
843  if (preempting_flag_.get()) {
844    io_message_loop_->PostTask(
845        FROM_HERE,
846        base::Bind(&GpuChannelMessageFilter::MessageProcessed,
847                   filter_,
848                   messages_processed_));
849  }
850}
851
852void GpuChannel::CacheShader(const std::string& key,
853                             const std::string& shader) {
854  gpu_channel_manager_->Send(
855      new GpuHostMsg_CacheShader(client_id_, key, shader));
856}
857
858void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
859  channel_->AddFilter(filter);
860}
861
862void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
863  channel_->RemoveFilter(filter);
864}
865
866uint64 GpuChannel::GetMemoryUsage() {
867  uint64 size = 0;
868  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
869       !it.IsAtEnd(); it.Advance()) {
870    size += it.GetCurrentValue()->GetMemoryUsage();
871  }
872  return size;
873}
874
875}  // namespace content
876