1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if defined(OS_WIN)
6#include <windows.h>
7#endif
8
9#include "content/common/gpu/gpu_channel.h"
10
11#include <queue>
12#include <vector>
13
14#include "base/bind.h"
15#include "base/command_line.h"
16#include "base/debug/trace_event.h"
17#include "base/message_loop/message_loop_proxy.h"
18#include "base/strings/string_util.h"
19#include "base/timer/timer.h"
20#include "content/common/gpu/devtools_gpu_agent.h"
21#include "content/common/gpu/gpu_channel_manager.h"
22#include "content/common/gpu/gpu_messages.h"
23#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24#include "content/common/gpu/sync_point_manager.h"
25#include "content/public/common/content_switches.h"
26#include "crypto/random.h"
27#include "gpu/command_buffer/common/mailbox.h"
28#include "gpu/command_buffer/service/gpu_scheduler.h"
29#include "gpu/command_buffer/service/image_manager.h"
30#include "gpu/command_buffer/service/mailbox_manager.h"
31#include "ipc/ipc_channel.h"
32#include "ipc/ipc_channel_proxy.h"
33#include "ui/gl/gl_context.h"
34#include "ui/gl/gl_image.h"
35#include "ui/gl/gl_surface.h"
36
37#if defined(OS_POSIX)
38#include "ipc/ipc_channel_posix.h"
39#endif
40
41#if defined(OS_ANDROID)
42#include "content/common/gpu/stream_texture_manager_android.h"
43#endif
44
45namespace content {
46namespace {
47
48// Number of milliseconds between successive vsync. Many GL commands block
49// on vsync, so thresholds for preemption should be multiples of this.
50const int64 kVsyncIntervalMs = 17;
51
52// Amount of time that we will wait for an IPC to be processed before
53// preempting. After a preemption, we must wait this long before triggering
54// another preemption.
55const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
56
57// Once we trigger a preemption, the maximum duration that we will wait
58// before clearing the preemption.
59const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
60
61// Stop the preemption once the time for the longest pending IPC drops
62// below this threshold.
63const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
64
65}  // anonymous namespace
66
67// This filter does three things:
68// - it counts and timestamps each message forwarded to the channel
69//   so that we can preempt other channels if a message takes too long to
70//   process. To guarantee fairness, we must wait a minimum amount of time
71//   before preempting and we limit the amount of time that we can preempt in
72//   one shot (see constants above).
73// - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
74//   thread, generating the sync point ID and responding immediately, and then
75//   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
76//   into the channel's queue.
77// - it generates mailbox names for clients of the GPU process on the IO thread.
78class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
79 public:
80  // Takes ownership of gpu_channel (see below).
81  GpuChannelMessageFilter(base::WeakPtr<GpuChannel>* gpu_channel,
82                          scoped_refptr<SyncPointManager> sync_point_manager,
83                          scoped_refptr<base::MessageLoopProxy> message_loop)
84      : preemption_state_(IDLE),
85        gpu_channel_(gpu_channel),
86        channel_(NULL),
87        sync_point_manager_(sync_point_manager),
88        message_loop_(message_loop),
89        messages_forwarded_to_channel_(0),
90        a_stub_is_descheduled_(false) {
91  }
92
93  virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
94    DCHECK(!channel_);
95    channel_ = channel;
96  }
97
98  virtual void OnFilterRemoved() OVERRIDE {
99    DCHECK(channel_);
100    channel_ = NULL;
101  }
102
103  virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
104    DCHECK(channel_);
105
106    bool handled = true;
107    IPC_BEGIN_MESSAGE_MAP(GpuChannelMessageFilter, message)
108      IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNames,
109                          OnGenerateMailboxNames)
110      IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesAsync,
111                          OnGenerateMailboxNamesAsync)
112      IPC_MESSAGE_UNHANDLED(handled = false)
113    IPC_END_MESSAGE_MAP()
114
115    if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
116      // This message should not be sent explicitly by the renderer.
117      NOTREACHED();
118      handled = true;
119    }
120
121    // All other messages get processed by the GpuChannel.
122    if (!handled) {
123      messages_forwarded_to_channel_++;
124      if (preempting_flag_.get())
125        pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
126      UpdatePreemptionState();
127    }
128
129    if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
130      uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
131      IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
132      GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
133      Send(reply);
134      message_loop_->PostTask(FROM_HERE, base::Bind(
135          &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
136          gpu_channel_,
137          sync_point_manager_,
138          message.routing_id(),
139          sync_point));
140      handled = true;
141    }
142    return handled;
143  }
144
145  void MessageProcessed(uint64 messages_processed) {
146    while (!pending_messages_.empty() &&
147           pending_messages_.front().message_number <= messages_processed)
148      pending_messages_.pop();
149    UpdatePreemptionState();
150  }
151
152  void SetPreemptingFlagAndSchedulingState(
153      gpu::PreemptionFlag* preempting_flag,
154      bool a_stub_is_descheduled) {
155    preempting_flag_ = preempting_flag;
156    a_stub_is_descheduled_ = a_stub_is_descheduled;
157  }
158
159  void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
160    a_stub_is_descheduled_ = a_stub_is_descheduled;
161    UpdatePreemptionState();
162  }
163
164  bool Send(IPC::Message* message) {
165    return channel_->Send(message);
166  }
167
168 protected:
169  virtual ~GpuChannelMessageFilter() {
170    message_loop_->PostTask(FROM_HERE, base::Bind(
171        &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
172  }
173
174 private:
175  // Message handlers.
176  void OnGenerateMailboxNames(unsigned num, std::vector<gpu::Mailbox>* result) {
177    TRACE_EVENT1("gpu", "OnGenerateMailboxNames", "num", num);
178
179    result->resize(num);
180
181    for (unsigned i = 0; i < num; ++i)
182      crypto::RandBytes((*result)[i].name, sizeof((*result)[i].name));
183  }
184
185  void OnGenerateMailboxNamesAsync(unsigned num) {
186    std::vector<gpu::Mailbox> names;
187    OnGenerateMailboxNames(num, &names);
188    Send(new GpuChannelMsg_GenerateMailboxNamesReply(names));
189  }
190
191  enum PreemptionState {
192    // Either there's no other channel to preempt, there are no messages
193    // pending processing, or we just finished preempting and have to wait
194    // before preempting again.
195    IDLE,
196    // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
197    WAITING,
198    // We can preempt whenever any IPC processing takes more than
199    // kPreemptWaitTimeMs.
200    CHECKING,
201    // We are currently preempting (i.e. no stub is descheduled).
202    PREEMPTING,
203    // We would like to preempt, but some stub is descheduled.
204    WOULD_PREEMPT_DESCHEDULED,
205  };
206
207  PreemptionState preemption_state_;
208
209  // Maximum amount of time that we can spend in PREEMPTING.
210  // It is reset when we transition to IDLE.
211  base::TimeDelta max_preemption_time_;
212
213  struct PendingMessage {
214    uint64 message_number;
215    base::TimeTicks time_received;
216
217    explicit PendingMessage(uint64 message_number)
218        : message_number(message_number),
219          time_received(base::TimeTicks::Now()) {
220    }
221  };
222
223  void UpdatePreemptionState() {
224    switch (preemption_state_) {
225      case IDLE:
226        if (preempting_flag_.get() && !pending_messages_.empty())
227          TransitionToWaiting();
228        break;
229      case WAITING:
230        // A timer will transition us to CHECKING.
231        DCHECK(timer_.IsRunning());
232        break;
233      case CHECKING:
234        if (!pending_messages_.empty()) {
235          base::TimeDelta time_elapsed =
236              base::TimeTicks::Now() - pending_messages_.front().time_received;
237          if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
238            // Schedule another check for when the IPC may go long.
239            timer_.Start(
240                FROM_HERE,
241                base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
242                    time_elapsed,
243                this, &GpuChannelMessageFilter::UpdatePreemptionState);
244          } else {
245            if (a_stub_is_descheduled_)
246              TransitionToWouldPreemptDescheduled();
247            else
248              TransitionToPreempting();
249          }
250        }
251        break;
252      case PREEMPTING:
253        // A TransitionToIdle() timer should always be running in this state.
254        DCHECK(timer_.IsRunning());
255        if (a_stub_is_descheduled_)
256          TransitionToWouldPreemptDescheduled();
257        else
258          TransitionToIdleIfCaughtUp();
259        break;
260      case WOULD_PREEMPT_DESCHEDULED:
261        // A TransitionToIdle() timer should never be running in this state.
262        DCHECK(!timer_.IsRunning());
263        if (!a_stub_is_descheduled_)
264          TransitionToPreempting();
265        else
266          TransitionToIdleIfCaughtUp();
267        break;
268      default:
269        NOTREACHED();
270    }
271  }
272
273  void TransitionToIdleIfCaughtUp() {
274    DCHECK(preemption_state_ == PREEMPTING ||
275           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
276    if (pending_messages_.empty()) {
277      TransitionToIdle();
278    } else {
279      base::TimeDelta time_elapsed =
280          base::TimeTicks::Now() - pending_messages_.front().time_received;
281      if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
282        TransitionToIdle();
283    }
284  }
285
286  void TransitionToIdle() {
287    DCHECK(preemption_state_ == PREEMPTING ||
288           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
289    // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
290    timer_.Stop();
291
292    preemption_state_ = IDLE;
293    preempting_flag_->Reset();
294    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
295
296    UpdatePreemptionState();
297  }
298
299  void TransitionToWaiting() {
300    DCHECK_EQ(preemption_state_, IDLE);
301    DCHECK(!timer_.IsRunning());
302
303    preemption_state_ = WAITING;
304    timer_.Start(
305        FROM_HERE,
306        base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
307        this, &GpuChannelMessageFilter::TransitionToChecking);
308  }
309
310  void TransitionToChecking() {
311    DCHECK_EQ(preemption_state_, WAITING);
312    DCHECK(!timer_.IsRunning());
313
314    preemption_state_ = CHECKING;
315    max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
316    UpdatePreemptionState();
317  }
318
319  void TransitionToPreempting() {
320    DCHECK(preemption_state_ == CHECKING ||
321           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
322    DCHECK(!a_stub_is_descheduled_);
323
324    // Stop any pending state update checks that we may have queued
325    // while CHECKING.
326    if (preemption_state_ == CHECKING)
327      timer_.Stop();
328
329    preemption_state_ = PREEMPTING;
330    preempting_flag_->Set();
331    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
332
333    timer_.Start(
334       FROM_HERE,
335       max_preemption_time_,
336       this, &GpuChannelMessageFilter::TransitionToIdle);
337
338    UpdatePreemptionState();
339  }
340
341  void TransitionToWouldPreemptDescheduled() {
342    DCHECK(preemption_state_ == CHECKING ||
343           preemption_state_ == PREEMPTING);
344    DCHECK(a_stub_is_descheduled_);
345
346    if (preemption_state_ == CHECKING) {
347      // Stop any pending state update checks that we may have queued
348      // while CHECKING.
349      timer_.Stop();
350    } else {
351      // Stop any TransitionToIdle() timers that we may have queued
352      // while PREEMPTING.
353      timer_.Stop();
354      max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
355      if (max_preemption_time_ < base::TimeDelta()) {
356        TransitionToIdle();
357        return;
358      }
359    }
360
361    preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
362    preempting_flag_->Reset();
363    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
364
365    UpdatePreemptionState();
366  }
367
368  static void InsertSyncPointOnMainThread(
369      base::WeakPtr<GpuChannel>* gpu_channel,
370      scoped_refptr<SyncPointManager> manager,
371      int32 routing_id,
372      uint32 sync_point) {
373    // This function must ensure that the sync point will be retired. Normally
374    // we'll find the stub based on the routing ID, and associate the sync point
375    // with it, but if that fails for any reason (channel or stub already
376    // deleted, invalid routing id), we need to retire the sync point
377    // immediately.
378    if (gpu_channel->get()) {
379      GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
380          routing_id);
381      if (stub) {
382        stub->AddSyncPoint(sync_point);
383        GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
384        gpu_channel->get()->OnMessageReceived(message);
385        return;
386      } else {
387        gpu_channel->get()->MessageProcessed();
388      }
389    }
390    manager->RetireSyncPoint(sync_point);
391  }
392
393  static void DeleteWeakPtrOnMainThread(
394      base::WeakPtr<GpuChannel>* gpu_channel) {
395    delete gpu_channel;
396  }
397
398  // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
399  // IO thread, it's only passed through - therefore the WeakPtr assumptions are
400  // respected.
401  base::WeakPtr<GpuChannel>* gpu_channel_;
402  IPC::Channel* channel_;
403  scoped_refptr<SyncPointManager> sync_point_manager_;
404  scoped_refptr<base::MessageLoopProxy> message_loop_;
405  scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
406
407  std::queue<PendingMessage> pending_messages_;
408
409  // Count of the number of IPCs forwarded to the GpuChannel.
410  uint64 messages_forwarded_to_channel_;
411
412  base::OneShotTimer<GpuChannelMessageFilter> timer_;
413
414  bool a_stub_is_descheduled_;
415};
416
417GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
418                       GpuWatchdog* watchdog,
419                       gfx::GLShareGroup* share_group,
420                       gpu::gles2::MailboxManager* mailbox,
421                       int client_id,
422                       bool software)
423    : gpu_channel_manager_(gpu_channel_manager),
424      messages_processed_(0),
425      client_id_(client_id),
426      share_group_(share_group ? share_group : new gfx::GLShareGroup),
427      mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
428      image_manager_(new gpu::gles2::ImageManager),
429      watchdog_(watchdog),
430      software_(software),
431      handle_messages_scheduled_(false),
432      processed_get_state_fast_(false),
433      currently_processing_message_(NULL),
434      weak_factory_(this),
435      num_stubs_descheduled_(0) {
436  DCHECK(gpu_channel_manager);
437  DCHECK(client_id);
438
439  channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
440  const CommandLine* command_line = CommandLine::ForCurrentProcess();
441  log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
442  disallowed_features_.multisampling =
443      command_line->HasSwitch(switches::kDisableGLMultisampling);
444#if defined(OS_ANDROID)
445  stream_texture_manager_.reset(new StreamTextureManagerAndroid(this));
446#endif
447}
448
449
450bool GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
451                      base::WaitableEvent* shutdown_event) {
452  DCHECK(!channel_.get());
453
454  // Map renderer ID to a (single) channel to that process.
455  channel_.reset(new IPC::SyncChannel(
456      channel_id_,
457      IPC::Channel::MODE_SERVER,
458      this,
459      io_message_loop,
460      false,
461      shutdown_event));
462
463  base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
464      weak_factory_.GetWeakPtr()));
465
466  filter_ = new GpuChannelMessageFilter(
467      weak_ptr,
468      gpu_channel_manager_->sync_point_manager(),
469      base::MessageLoopProxy::current());
470  io_message_loop_ = io_message_loop;
471  channel_->AddFilter(filter_.get());
472
473  devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
474
475  return true;
476}
477
478std::string GpuChannel::GetChannelName() {
479  return channel_id_;
480}
481
482#if defined(OS_POSIX)
483int GpuChannel::TakeRendererFileDescriptor() {
484  if (!channel_) {
485    NOTREACHED();
486    return -1;
487  }
488  return channel_->TakeClientFileDescriptor();
489}
490#endif  // defined(OS_POSIX)
491
492bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
493  if (log_messages_) {
494    DVLOG(1) << "received message @" << &message << " on channel @" << this
495             << " with type " << message.type();
496  }
497
498  if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
499    if (processed_get_state_fast_) {
500      // Require a non-GetStateFast message in between two GetStateFast
501      // messages, to ensure progress is made.
502      std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
503
504      while (point != deferred_messages_.end() &&
505             (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
506        ++point;
507      }
508
509      if (point != deferred_messages_.end()) {
510        ++point;
511      }
512
513      deferred_messages_.insert(point, new IPC::Message(message));
514    } else {
515      // Move GetStateFast commands to the head of the queue, so the renderer
516      // doesn't have to wait any longer than necessary.
517      deferred_messages_.push_front(new IPC::Message(message));
518    }
519  } else {
520    deferred_messages_.push_back(new IPC::Message(message));
521  }
522
523  OnScheduled();
524
525  return true;
526}
527
528void GpuChannel::OnChannelError() {
529  gpu_channel_manager_->RemoveChannel(client_id_);
530}
531
532bool GpuChannel::Send(IPC::Message* message) {
533  // The GPU process must never send a synchronous IPC message to the renderer
534  // process. This could result in deadlock.
535  DCHECK(!message->is_sync());
536  if (log_messages_) {
537    DVLOG(1) << "sending message @" << message << " on channel @" << this
538             << " with type " << message->type();
539  }
540
541  if (!channel_) {
542    delete message;
543    return false;
544  }
545
546  return channel_->Send(message);
547}
548
549void GpuChannel::RequeueMessage() {
550  DCHECK(currently_processing_message_);
551  deferred_messages_.push_front(
552      new IPC::Message(*currently_processing_message_));
553  messages_processed_--;
554  currently_processing_message_ = NULL;
555}
556
557void GpuChannel::OnScheduled() {
558  if (handle_messages_scheduled_)
559    return;
560  // Post a task to handle any deferred messages. The deferred message queue is
561  // not emptied here, which ensures that OnMessageReceived will continue to
562  // defer newly received messages until the ones in the queue have all been
563  // handled by HandleMessage. HandleMessage is invoked as a
564  // task to prevent reentrancy.
565  base::MessageLoop::current()->PostTask(
566      FROM_HERE,
567      base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
568  handle_messages_scheduled_ = true;
569}
570
571void GpuChannel::StubSchedulingChanged(bool scheduled) {
572  bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
573  if (scheduled) {
574    num_stubs_descheduled_--;
575    OnScheduled();
576  } else {
577    num_stubs_descheduled_++;
578  }
579  DCHECK_LE(num_stubs_descheduled_, stubs_.size());
580  bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
581
582  if (a_stub_is_descheduled != a_stub_was_descheduled) {
583    if (preempting_flag_.get()) {
584      io_message_loop_->PostTask(
585          FROM_HERE,
586          base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
587                     filter_,
588                     a_stub_is_descheduled));
589    }
590  }
591}
592
593void GpuChannel::CreateViewCommandBuffer(
594    const gfx::GLSurfaceHandle& window,
595    int32 surface_id,
596    const GPUCreateCommandBufferConfig& init_params,
597    int32* route_id) {
598  TRACE_EVENT1("gpu",
599               "GpuChannel::CreateViewCommandBuffer",
600               "surface_id",
601               surface_id);
602
603  *route_id = MSG_ROUTING_NONE;
604
605  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
606
607  // Virtualize compositor contexts on OS X to prevent performance regressions
608  // when enabling FCM.
609  // http://crbug.com/180463
610  bool use_virtualized_gl_context = false;
611#if defined(OS_MACOSX)
612  use_virtualized_gl_context = true;
613#endif
614
615  *route_id = GenerateRouteID();
616  scoped_ptr<GpuCommandBufferStub> stub(
617      new GpuCommandBufferStub(this,
618                               share_group,
619                               window,
620                               mailbox_manager_.get(),
621                               image_manager_.get(),
622                               gfx::Size(),
623                               disallowed_features_,
624                               init_params.attribs,
625                               init_params.gpu_preference,
626                               use_virtualized_gl_context,
627                               *route_id,
628                               surface_id,
629                               watchdog_,
630                               software_,
631                               init_params.active_url));
632  if (preempted_flag_.get())
633    stub->SetPreemptByFlag(preempted_flag_);
634  router_.AddRoute(*route_id, stub.get());
635  stubs_.AddWithID(stub.release(), *route_id);
636}
637
638GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
639  return stubs_.Lookup(route_id);
640}
641
642void GpuChannel::CreateImage(
643    gfx::PluginWindowHandle window,
644    int32 image_id,
645    gfx::Size* size) {
646  TRACE_EVENT1("gpu",
647               "GpuChannel::CreateImage",
648               "image_id",
649               image_id);
650
651  *size = gfx::Size();
652
653  if (image_manager_->LookupImage(image_id)) {
654    LOG(ERROR) << "CreateImage failed, image_id already in use.";
655    return;
656  }
657
658  scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
659  if (!image.get())
660    return;
661
662  image_manager_->AddImage(image.get(), image_id);
663  *size = image->GetSize();
664}
665
666void GpuChannel::DeleteImage(int32 image_id) {
667  TRACE_EVENT1("gpu",
668               "GpuChannel::DeleteImage",
669               "image_id",
670               image_id);
671
672  image_manager_->RemoveImage(image_id);
673}
674
675void GpuChannel::LoseAllContexts() {
676  gpu_channel_manager_->LoseAllContexts();
677}
678
679void GpuChannel::MarkAllContextsLost() {
680  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
681       !it.IsAtEnd(); it.Advance()) {
682    it.GetCurrentValue()->MarkContextLost();
683  }
684}
685
686void GpuChannel::DestroySoon() {
687  base::MessageLoop::current()->PostTask(
688      FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
689}
690
691int GpuChannel::GenerateRouteID() {
692  static int last_id = 0;
693  return ++last_id;
694}
695
696void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
697  router_.AddRoute(route_id, listener);
698}
699
700void GpuChannel::RemoveRoute(int32 route_id) {
701  router_.RemoveRoute(route_id);
702}
703
704gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
705  if (!preempting_flag_.get()) {
706    preempting_flag_ = new gpu::PreemptionFlag;
707    io_message_loop_->PostTask(
708        FROM_HERE, base::Bind(
709            &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
710            filter_, preempting_flag_, num_stubs_descheduled_ > 0));
711  }
712  return preempting_flag_.get();
713}
714
715void GpuChannel::SetPreemptByFlag(
716    scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
717  preempted_flag_ = preempted_flag;
718
719  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
720       !it.IsAtEnd(); it.Advance()) {
721    it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
722  }
723}
724
725GpuChannel::~GpuChannel() {
726  if (preempting_flag_.get())
727    preempting_flag_->Reset();
728}
729
730void GpuChannel::OnDestroy() {
731  TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
732  gpu_channel_manager_->RemoveChannel(client_id_);
733}
734
735bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
736  bool handled = true;
737  IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
738    IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
739                        OnCreateOffscreenCommandBuffer)
740    IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
741                        OnDestroyCommandBuffer)
742    IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder, OnCreateVideoEncoder)
743    IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder,
744                        OnDestroyVideoEncoder)
745    IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
746                        OnDevToolsStartEventsRecording)
747    IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
748                        OnDevToolsStopEventsRecording)
749#if defined(OS_ANDROID)
750    IPC_MESSAGE_HANDLER(GpuChannelMsg_RegisterStreamTextureProxy,
751                        OnRegisterStreamTextureProxy)
752    IPC_MESSAGE_HANDLER(GpuChannelMsg_EstablishStreamTexture,
753                        OnEstablishStreamTexture)
754    IPC_MESSAGE_HANDLER(GpuChannelMsg_SetStreamTextureSize,
755                        OnSetStreamTextureSize)
756#endif
757    IPC_MESSAGE_HANDLER(
758        GpuChannelMsg_CollectRenderingStatsForSurface,
759        OnCollectRenderingStatsForSurface)
760    IPC_MESSAGE_UNHANDLED(handled = false)
761  IPC_END_MESSAGE_MAP()
762  DCHECK(handled) << msg.type();
763  return handled;
764}
765
766void GpuChannel::HandleMessage() {
767  handle_messages_scheduled_ = false;
768  if (deferred_messages_.empty())
769    return;
770
771  bool should_fast_track_ack = false;
772  IPC::Message* m = deferred_messages_.front();
773  GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
774
775  do {
776    if (stub) {
777      if (!stub->IsScheduled())
778        return;
779      if (stub->IsPreempted()) {
780        OnScheduled();
781        return;
782      }
783    }
784
785    scoped_ptr<IPC::Message> message(m);
786    deferred_messages_.pop_front();
787    bool message_processed = true;
788
789    processed_get_state_fast_ =
790        (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
791
792    currently_processing_message_ = message.get();
793    bool result;
794    if (message->routing_id() == MSG_ROUTING_CONTROL)
795      result = OnControlMessageReceived(*message);
796    else
797      result = router_.RouteMessage(*message);
798    currently_processing_message_ = NULL;
799
800    if (!result) {
801      // Respond to sync messages even if router failed to route.
802      if (message->is_sync()) {
803        IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
804        reply->set_reply_error();
805        Send(reply);
806      }
807    } else {
808      // If the command buffer becomes unscheduled as a result of handling the
809      // message but still has more commands to process, synthesize an IPC
810      // message to flush that command buffer.
811      if (stub) {
812        if (stub->HasUnprocessedCommands()) {
813          deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
814              stub->route_id()));
815          message_processed = false;
816        }
817      }
818    }
819    if (message_processed)
820      MessageProcessed();
821
822    // We want the EchoACK following the SwapBuffers to be sent as close as
823    // possible, avoiding scheduling other channels in the meantime.
824    should_fast_track_ack = false;
825    if (!deferred_messages_.empty()) {
826      m = deferred_messages_.front();
827      stub = stubs_.Lookup(m->routing_id());
828      should_fast_track_ack =
829          (m->type() == GpuCommandBufferMsg_Echo::ID) &&
830          stub && stub->IsScheduled();
831    }
832  } while (should_fast_track_ack);
833
834  if (!deferred_messages_.empty()) {
835    OnScheduled();
836  }
837}
838
839void GpuChannel::OnCreateOffscreenCommandBuffer(
840    const gfx::Size& size,
841    const GPUCreateCommandBufferConfig& init_params,
842    int32* route_id) {
843  TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
844  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
845
846  *route_id = GenerateRouteID();
847
848  scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
849      this,
850      share_group,
851      gfx::GLSurfaceHandle(),
852      mailbox_manager_.get(),
853      image_manager_.get(),
854      size,
855      disallowed_features_,
856      init_params.attribs,
857      init_params.gpu_preference,
858      false,
859      *route_id,
860      0,
861      watchdog_,
862      software_,
863      init_params.active_url));
864  if (preempted_flag_.get())
865    stub->SetPreemptByFlag(preempted_flag_);
866  router_.AddRoute(*route_id, stub.get());
867  stubs_.AddWithID(stub.release(), *route_id);
868  TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
869               "route_id", route_id);
870}
871
872void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
873  TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
874               "route_id", route_id);
875
876  GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
877  if (!stub)
878    return;
879  bool need_reschedule = (stub && !stub->IsScheduled());
880  router_.RemoveRoute(route_id);
881  stubs_.Remove(route_id);
882  // In case the renderer is currently blocked waiting for a sync reply from the
883  // stub, we need to make sure to reschedule the GpuChannel here.
884  if (need_reschedule) {
885    // This stub won't get a chance to reschedule, so update the count now.
886    StubSchedulingChanged(true);
887  }
888}
889
890void GpuChannel::OnCreateVideoEncoder(int32* route_id) {
891  TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
892
893  *route_id = GenerateRouteID();
894  GpuVideoEncodeAccelerator* encoder =
895      new GpuVideoEncodeAccelerator(this, *route_id);
896  router_.AddRoute(*route_id, encoder);
897  video_encoders_.AddWithID(encoder, *route_id);
898}
899
900void GpuChannel::OnDestroyVideoEncoder(int32 route_id) {
901  TRACE_EVENT1(
902      "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id);
903  GpuVideoEncodeAccelerator* encoder = video_encoders_.Lookup(route_id);
904  if (!encoder)
905    return;
906  router_.RemoveRoute(route_id);
907  video_encoders_.Remove(route_id);
908}
909
910void GpuChannel::OnDevToolsStartEventsRecording(int32* route_id) {
911  devtools_gpu_agent_->StartEventsRecording(route_id);
912}
913
914void GpuChannel::OnDevToolsStopEventsRecording() {
915  devtools_gpu_agent_->StopEventsRecording();
916}
917
918#if defined(OS_ANDROID)
919void GpuChannel::OnRegisterStreamTextureProxy(
920    int32 stream_id, int32* route_id) {
921  // Note that route_id is only used for notifications sent out from here.
922  // StreamTextureManager owns all texture objects and for incoming messages
923  // it finds the correct object based on stream_id.
924  *route_id = GenerateRouteID();
925  stream_texture_manager_->RegisterStreamTextureProxy(stream_id, *route_id);
926}
927
928void GpuChannel::OnEstablishStreamTexture(
929    int32 stream_id, int32 primary_id, int32 secondary_id) {
930  stream_texture_manager_->EstablishStreamTexture(
931      stream_id, primary_id, secondary_id);
932}
933
934void GpuChannel::OnSetStreamTextureSize(
935    int32 stream_id, const gfx::Size& size) {
936  stream_texture_manager_->SetStreamTextureSize(stream_id, size);
937}
938#endif
939
940void GpuChannel::OnCollectRenderingStatsForSurface(
941    int32 surface_id, GpuRenderingStats* stats) {
942  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
943       !it.IsAtEnd(); it.Advance()) {
944    int texture_upload_count =
945        it.GetCurrentValue()->decoder()->GetTextureUploadCount();
946    base::TimeDelta total_texture_upload_time =
947        it.GetCurrentValue()->decoder()->GetTotalTextureUploadTime();
948    base::TimeDelta total_processing_commands_time =
949        it.GetCurrentValue()->decoder()->GetTotalProcessingCommandsTime();
950
951    stats->global_texture_upload_count += texture_upload_count;
952    stats->global_total_texture_upload_time += total_texture_upload_time;
953    stats->global_total_processing_commands_time +=
954        total_processing_commands_time;
955    if (it.GetCurrentValue()->surface_id() == surface_id) {
956      stats->texture_upload_count += texture_upload_count;
957      stats->total_texture_upload_time += total_texture_upload_time;
958      stats->total_processing_commands_time += total_processing_commands_time;
959    }
960  }
961
962  GPUVideoMemoryUsageStats usage_stats;
963  gpu_channel_manager_->gpu_memory_manager()->GetVideoMemoryUsageStats(
964      &usage_stats);
965  stats->global_video_memory_bytes_allocated = usage_stats.bytes_allocated;
966}
967
968void GpuChannel::MessageProcessed() {
969  messages_processed_++;
970  if (preempting_flag_.get()) {
971    io_message_loop_->PostTask(
972        FROM_HERE,
973        base::Bind(&GpuChannelMessageFilter::MessageProcessed,
974                   filter_,
975                   messages_processed_));
976  }
977}
978
979void GpuChannel::CacheShader(const std::string& key,
980                             const std::string& shader) {
981  gpu_channel_manager_->Send(
982      new GpuHostMsg_CacheShader(client_id_, key, shader));
983}
984
985void GpuChannel::AddFilter(IPC::ChannelProxy::MessageFilter* filter) {
986  channel_->AddFilter(filter);
987}
988
989void GpuChannel::RemoveFilter(IPC::ChannelProxy::MessageFilter* filter) {
990  channel_->RemoveFilter(filter);
991}
992
993}  // namespace content
994