gpu_channel.cc revision 2a99a7e74a7f215066514fe81d2bfa6639d9eddd
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if defined(OS_WIN)
6#include <windows.h>
7#endif
8
9#include "content/common/gpu/gpu_channel.h"
10
11#include <queue>
12#include <vector>
13
14#include "base/bind.h"
15#include "base/command_line.h"
16#include "base/debug/trace_event.h"
17#include "base/message_loop_proxy.h"
18#include "base/process_util.h"
19#include "base/rand_util.h"
20#include "base/string_util.h"
21#include "base/timer.h"
22#include "content/common/child_process.h"
23#include "content/common/gpu/gpu_channel_manager.h"
24#include "content/common/gpu/gpu_messages.h"
25#include "content/common/gpu/sync_point_manager.h"
26#include "content/public/common/content_switches.h"
27#include "crypto/hmac.h"
28#include "gpu/command_buffer/common/mailbox.h"
29#include "gpu/command_buffer/service/gpu_scheduler.h"
30#include "gpu/command_buffer/service/image_manager.h"
31#include "gpu/command_buffer/service/mailbox_manager.h"
32#include "ipc/ipc_channel.h"
33#include "ipc/ipc_channel_proxy.h"
34#include "ui/gl/gl_context.h"
35#include "ui/gl/gl_image.h"
36#include "ui/gl/gl_surface.h"
37
38#if defined(OS_POSIX)
39#include "ipc/ipc_channel_posix.h"
40#endif
41
42#if defined(OS_ANDROID)
43#include "content/common/gpu/stream_texture_manager_android.h"
44#endif
45
46namespace content {
47namespace {
48
49// Number of milliseconds between successive vsync. Many GL commands block
50// on vsync, so thresholds for preemption should be multiples of this.
51const int64 kVsyncIntervalMs = 17;
52
53// Amount of time that we will wait for an IPC to be processed before
54// preempting. After a preemption, we must wait this long before triggering
55// another preemption.
56const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
57
58// Once we trigger a preemption, the maximum duration that we will wait
59// before clearing the preemption.
60const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
61
62// Stop the preemption once the time for the longest pending IPC drops
63// below this threshold.
64const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
65
66}  // anonymous namespace
67
68// This filter does three things:
69// - it counts and timestamps each message forwarded to the channel
70//   so that we can preempt other channels if a message takes too long to
71//   process. To guarantee fairness, we must wait a minimum amount of time
72//   before preempting and we limit the amount of time that we can preempt in
73//   one shot (see constants above).
74// - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
75//   thread, generating the sync point ID and responding immediately, and then
76//   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
77//   into the channel's queue.
78// - it generates mailbox names for clients of the GPU process on the IO thread.
79class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
80 public:
81  // Takes ownership of gpu_channel (see below).
82  GpuChannelMessageFilter(const std::string& private_key,
83                          base::WeakPtr<GpuChannel>* gpu_channel,
84                          scoped_refptr<SyncPointManager> sync_point_manager,
85                          scoped_refptr<base::MessageLoopProxy> message_loop)
86      : preemption_state_(IDLE),
87        gpu_channel_(gpu_channel),
88        channel_(NULL),
89        sync_point_manager_(sync_point_manager),
90        message_loop_(message_loop),
91        messages_forwarded_to_channel_(0),
92        a_stub_is_descheduled_(false),
93        hmac_(crypto::HMAC::SHA256) {
94    bool success = hmac_.Init(base::StringPiece(private_key));
95    DCHECK(success);
96  }
97
98  virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
99    DCHECK(!channel_);
100    channel_ = channel;
101  }
102
103  virtual void OnFilterRemoved() OVERRIDE {
104    DCHECK(channel_);
105    channel_ = NULL;
106  }
107
108  virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
109    DCHECK(channel_);
110
111    bool handled = true;
112    IPC_BEGIN_MESSAGE_MAP(GpuChannelMessageFilter, message)
113      IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNames,
114                          OnGenerateMailboxNames)
115      IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesAsync,
116                          OnGenerateMailboxNamesAsync)
117      IPC_MESSAGE_UNHANDLED(handled = false)
118    IPC_END_MESSAGE_MAP()
119
120    if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
121      // This message should not be sent explicitly by the renderer.
122      NOTREACHED();
123      handled = true;
124    }
125
126    // All other messages get processed by the GpuChannel.
127    if (!handled) {
128      messages_forwarded_to_channel_++;
129      if (preempting_flag_.get())
130        pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
131      UpdatePreemptionState();
132    }
133
134    if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
135      uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
136      IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
137      GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
138      Send(reply);
139      message_loop_->PostTask(FROM_HERE, base::Bind(
140          &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
141          gpu_channel_,
142          sync_point_manager_,
143          message.routing_id(),
144          sync_point));
145      handled = true;
146    }
147    return handled;
148  }
149
150  void MessageProcessed(uint64 messages_processed) {
151    while (!pending_messages_.empty() &&
152           pending_messages_.front().message_number <= messages_processed)
153      pending_messages_.pop();
154    UpdatePreemptionState();
155  }
156
157  void SetPreemptingFlagAndSchedulingState(
158      gpu::PreemptionFlag* preempting_flag,
159      bool a_stub_is_descheduled) {
160    preempting_flag_ = preempting_flag;
161    a_stub_is_descheduled_ = a_stub_is_descheduled;
162  }
163
164  void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
165    a_stub_is_descheduled_ = a_stub_is_descheduled;
166    UpdatePreemptionState();
167  }
168
169  bool Send(IPC::Message* message) {
170    return channel_->Send(message);
171  }
172
173 protected:
174  virtual ~GpuChannelMessageFilter() {
175    message_loop_->PostTask(FROM_HERE, base::Bind(
176        &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
177  }
178
179 private:
180  // Message handlers.
181  void OnGenerateMailboxNames(unsigned num, std::vector<gpu::Mailbox>* result) {
182    TRACE_EVENT1("gpu", "OnGenerateMailboxNames", "num", num);
183
184    result->resize(num);
185
186    for (unsigned i = 0; i < num; ++i) {
187      char name[GL_MAILBOX_SIZE_CHROMIUM];
188      base::RandBytes(name, sizeof(name) / 2);
189
190      bool success = hmac_.Sign(
191          base::StringPiece(name, sizeof(name) / 2),
192          reinterpret_cast<unsigned char*>(name) + sizeof(name) / 2,
193          sizeof(name) / 2);
194      DCHECK(success);
195
196      (*result)[i].SetName(reinterpret_cast<int8*>(name));
197    }
198  }
199
200  void OnGenerateMailboxNamesAsync(unsigned num) {
201    std::vector<gpu::Mailbox> names;
202    OnGenerateMailboxNames(num, &names);
203    Send(new GpuChannelMsg_GenerateMailboxNamesReply(names));
204  }
205
206  enum PreemptionState {
207    // Either there's no other channel to preempt, there are no messages
208    // pending processing, or we just finished preempting and have to wait
209    // before preempting again.
210    IDLE,
211    // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
212    WAITING,
213    // We can preempt whenever any IPC processing takes more than
214    // kPreemptWaitTimeMs.
215    CHECKING,
216    // We are currently preempting (i.e. no stub is descheduled).
217    PREEMPTING,
218    // We would like to preempt, but some stub is descheduled.
219    WOULD_PREEMPT_DESCHEDULED,
220  };
221
222  PreemptionState preemption_state_;
223
224  // Maximum amount of time that we can spend in PREEMPTING.
225  // It is reset when we transition to IDLE.
226  base::TimeDelta max_preemption_time_;
227
228  struct PendingMessage {
229    uint64 message_number;
230    base::TimeTicks time_received;
231
232    explicit PendingMessage(uint64 message_number)
233        : message_number(message_number),
234          time_received(base::TimeTicks::Now()) {
235    }
236  };
237
238  void UpdatePreemptionState() {
239    switch (preemption_state_) {
240      case IDLE:
241        if (preempting_flag_.get() && !pending_messages_.empty())
242          TransitionToWaiting();
243        break;
244      case WAITING:
245        // A timer will transition us to CHECKING.
246        DCHECK(timer_.IsRunning());
247        break;
248      case CHECKING:
249        if (!pending_messages_.empty()) {
250          base::TimeDelta time_elapsed =
251              base::TimeTicks::Now() - pending_messages_.front().time_received;
252          if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
253            // Schedule another check for when the IPC may go long.
254            timer_.Start(
255                FROM_HERE,
256                base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
257                    time_elapsed,
258                this, &GpuChannelMessageFilter::UpdatePreemptionState);
259          } else {
260            if (a_stub_is_descheduled_)
261              TransitionToWouldPreemptDescheduled();
262            else
263              TransitionToPreempting();
264          }
265        }
266        break;
267      case PREEMPTING:
268        // A TransitionToIdle() timer should always be running in this state.
269        DCHECK(timer_.IsRunning());
270        if (a_stub_is_descheduled_)
271          TransitionToWouldPreemptDescheduled();
272        else
273          TransitionToIdleIfCaughtUp();
274        break;
275      case WOULD_PREEMPT_DESCHEDULED:
276        // A TransitionToIdle() timer should never be running in this state.
277        DCHECK(!timer_.IsRunning());
278        if (!a_stub_is_descheduled_)
279          TransitionToPreempting();
280        else
281          TransitionToIdleIfCaughtUp();
282        break;
283      default:
284        NOTREACHED();
285    }
286  }
287
288  void TransitionToIdleIfCaughtUp() {
289    DCHECK(preemption_state_ == PREEMPTING ||
290           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
291    if (pending_messages_.empty()) {
292      TransitionToIdle();
293    } else {
294      base::TimeDelta time_elapsed =
295          base::TimeTicks::Now() - pending_messages_.front().time_received;
296      if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
297        TransitionToIdle();
298    }
299  }
300
301  void TransitionToIdle() {
302    DCHECK(preemption_state_ == PREEMPTING ||
303           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
304    // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
305    timer_.Stop();
306
307    preemption_state_ = IDLE;
308    preempting_flag_->Reset();
309    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
310
311    UpdatePreemptionState();
312  }
313
314  void TransitionToWaiting() {
315    DCHECK_EQ(preemption_state_, IDLE);
316    DCHECK(!timer_.IsRunning());
317
318    preemption_state_ = WAITING;
319    timer_.Start(
320        FROM_HERE,
321        base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
322        this, &GpuChannelMessageFilter::TransitionToChecking);
323  }
324
325  void TransitionToChecking() {
326    DCHECK_EQ(preemption_state_, WAITING);
327    DCHECK(!timer_.IsRunning());
328
329    preemption_state_ = CHECKING;
330    max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
331    UpdatePreemptionState();
332  }
333
334  void TransitionToPreempting() {
335    DCHECK(preemption_state_ == CHECKING ||
336           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
337    DCHECK(!a_stub_is_descheduled_);
338
339    // Stop any pending state update checks that we may have queued
340    // while CHECKING.
341    if (preemption_state_ == CHECKING)
342      timer_.Stop();
343
344    preemption_state_ = PREEMPTING;
345    preempting_flag_->Set();
346    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
347
348    timer_.Start(
349       FROM_HERE,
350       max_preemption_time_,
351       this, &GpuChannelMessageFilter::TransitionToIdle);
352
353    UpdatePreemptionState();
354  }
355
356  void TransitionToWouldPreemptDescheduled() {
357    DCHECK(preemption_state_ == CHECKING ||
358           preemption_state_ == PREEMPTING);
359    DCHECK(a_stub_is_descheduled_);
360
361    if (preemption_state_ == CHECKING) {
362      // Stop any pending state update checks that we may have queued
363      // while CHECKING.
364      timer_.Stop();
365    } else {
366      // Stop any TransitionToIdle() timers that we may have queued
367      // while PREEMPTING.
368      timer_.Stop();
369      max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
370      if (max_preemption_time_ < base::TimeDelta()) {
371        TransitionToIdle();
372        return;
373      }
374    }
375
376    preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
377    preempting_flag_->Reset();
378    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
379
380    UpdatePreemptionState();
381  }
382
383  static void InsertSyncPointOnMainThread(
384      base::WeakPtr<GpuChannel>* gpu_channel,
385      scoped_refptr<SyncPointManager> manager,
386      int32 routing_id,
387      uint32 sync_point) {
388    // This function must ensure that the sync point will be retired. Normally
389    // we'll find the stub based on the routing ID, and associate the sync point
390    // with it, but if that fails for any reason (channel or stub already
391    // deleted, invalid routing id), we need to retire the sync point
392    // immediately.
393    if (gpu_channel->get()) {
394      GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
395          routing_id);
396      if (stub) {
397        stub->AddSyncPoint(sync_point);
398        GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
399        gpu_channel->get()->OnMessageReceived(message);
400        return;
401      } else {
402        gpu_channel->get()->MessageProcessed();
403      }
404    }
405    manager->RetireSyncPoint(sync_point);
406  }
407
408  static void DeleteWeakPtrOnMainThread(
409      base::WeakPtr<GpuChannel>* gpu_channel) {
410    delete gpu_channel;
411  }
412
413  // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
414  // IO thread, it's only passed through - therefore the WeakPtr assumptions are
415  // respected.
416  base::WeakPtr<GpuChannel>* gpu_channel_;
417  IPC::Channel* channel_;
418  scoped_refptr<SyncPointManager> sync_point_manager_;
419  scoped_refptr<base::MessageLoopProxy> message_loop_;
420  scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
421
422  std::queue<PendingMessage> pending_messages_;
423
424  // Count of the number of IPCs forwarded to the GpuChannel.
425  uint64 messages_forwarded_to_channel_;
426
427  base::OneShotTimer<GpuChannelMessageFilter> timer_;
428
429  bool a_stub_is_descheduled_;
430
431  crypto::HMAC hmac_;
432};
433
434GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
435                       GpuWatchdog* watchdog,
436                       gfx::GLShareGroup* share_group,
437                       gpu::gles2::MailboxManager* mailbox,
438                       int client_id,
439                       bool software)
440    : gpu_channel_manager_(gpu_channel_manager),
441      messages_processed_(0),
442      client_id_(client_id),
443      share_group_(share_group ? share_group : new gfx::GLShareGroup),
444      mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
445      image_manager_(new gpu::gles2::ImageManager),
446      watchdog_(watchdog),
447      software_(software),
448      handle_messages_scheduled_(false),
449      processed_get_state_fast_(false),
450      currently_processing_message_(NULL),
451      weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
452      num_stubs_descheduled_(0) {
453  DCHECK(gpu_channel_manager);
454  DCHECK(client_id);
455
456  channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
457  const CommandLine* command_line = CommandLine::ForCurrentProcess();
458  log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
459  disallowed_features_.multisampling =
460      command_line->HasSwitch(switches::kDisableGLMultisampling);
461#if defined(OS_ANDROID)
462  stream_texture_manager_.reset(new StreamTextureManagerAndroid(this));
463#endif
464}
465
466
467bool GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
468                      base::WaitableEvent* shutdown_event) {
469  DCHECK(!channel_.get());
470
471  // Map renderer ID to a (single) channel to that process.
472  channel_.reset(new IPC::SyncChannel(
473      channel_id_,
474      IPC::Channel::MODE_SERVER,
475      this,
476      io_message_loop,
477      false,
478      shutdown_event));
479
480  base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
481      weak_factory_.GetWeakPtr()));
482
483  filter_ = new GpuChannelMessageFilter(
484      mailbox_manager_->private_key(),
485      weak_ptr,
486      gpu_channel_manager_->sync_point_manager(),
487      base::MessageLoopProxy::current());
488  io_message_loop_ = io_message_loop;
489  channel_->AddFilter(filter_);
490
491  return true;
492}
493
494std::string GpuChannel::GetChannelName() {
495  return channel_id_;
496}
497
498#if defined(OS_POSIX)
499int GpuChannel::TakeRendererFileDescriptor() {
500  if (!channel_.get()) {
501    NOTREACHED();
502    return -1;
503  }
504  return channel_->TakeClientFileDescriptor();
505}
506#endif  // defined(OS_POSIX)
507
508bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
509  bool message_processed = true;
510  if (log_messages_) {
511    DVLOG(1) << "received message @" << &message << " on channel @" << this
512             << " with type " << message.type();
513  }
514
515  if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
516    if (processed_get_state_fast_) {
517      // Require a non-GetStateFast message in between two GetStateFast
518      // messages, to ensure progress is made.
519      std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
520
521      while (point != deferred_messages_.end() &&
522             (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
523        ++point;
524      }
525
526      if (point != deferred_messages_.end()) {
527        ++point;
528      }
529
530      deferred_messages_.insert(point, new IPC::Message(message));
531      message_processed = false;
532    } else {
533      // Move GetStateFast commands to the head of the queue, so the renderer
534      // doesn't have to wait any longer than necessary.
535      deferred_messages_.push_front(new IPC::Message(message));
536      message_processed = false;
537    }
538  } else {
539    deferred_messages_.push_back(new IPC::Message(message));
540    message_processed = false;
541  }
542
543  if (message_processed)
544    MessageProcessed();
545
546  OnScheduled();
547
548  return true;
549}
550
551void GpuChannel::OnChannelError() {
552  gpu_channel_manager_->RemoveChannel(client_id_);
553}
554
555bool GpuChannel::Send(IPC::Message* message) {
556  // The GPU process must never send a synchronous IPC message to the renderer
557  // process. This could result in deadlock.
558  DCHECK(!message->is_sync());
559  if (log_messages_) {
560    DVLOG(1) << "sending message @" << message << " on channel @" << this
561             << " with type " << message->type();
562  }
563
564  if (!channel_.get()) {
565    delete message;
566    return false;
567  }
568
569  return channel_->Send(message);
570}
571
572void GpuChannel::RequeueMessage() {
573  DCHECK(currently_processing_message_);
574  deferred_messages_.push_front(
575      new IPC::Message(*currently_processing_message_));
576  messages_processed_--;
577  currently_processing_message_ = NULL;
578}
579
580void GpuChannel::OnScheduled() {
581  if (handle_messages_scheduled_)
582    return;
583  // Post a task to handle any deferred messages. The deferred message queue is
584  // not emptied here, which ensures that OnMessageReceived will continue to
585  // defer newly received messages until the ones in the queue have all been
586  // handled by HandleMessage. HandleMessage is invoked as a
587  // task to prevent reentrancy.
588  MessageLoop::current()->PostTask(
589      FROM_HERE,
590      base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
591  handle_messages_scheduled_ = true;
592}
593
594void GpuChannel::StubSchedulingChanged(bool scheduled) {
595  bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
596  if (scheduled) {
597    num_stubs_descheduled_--;
598    OnScheduled();
599  } else {
600    num_stubs_descheduled_++;
601  }
602  DCHECK_LE(num_stubs_descheduled_, stubs_.size());
603  bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
604
605  if (a_stub_is_descheduled != a_stub_was_descheduled) {
606    if (preempting_flag_.get()) {
607      io_message_loop_->PostTask(
608          FROM_HERE,
609          base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
610                     filter_, a_stub_is_descheduled));
611    }
612  }
613}
614
615void GpuChannel::CreateViewCommandBuffer(
616    const gfx::GLSurfaceHandle& window,
617    int32 surface_id,
618    const GPUCreateCommandBufferConfig& init_params,
619    int32* route_id) {
620  TRACE_EVENT1("gpu",
621               "GpuChannel::CreateViewCommandBuffer",
622               "surface_id",
623               surface_id);
624
625  *route_id = MSG_ROUTING_NONE;
626
627#if defined(ENABLE_GPU)
628
629  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
630
631  *route_id = GenerateRouteID();
632  scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
633      this,
634      share_group,
635      window,
636      mailbox_manager_,
637      image_manager_,
638      gfx::Size(),
639      disallowed_features_,
640      init_params.allowed_extensions,
641      init_params.attribs,
642      init_params.gpu_preference,
643      *route_id,
644      surface_id,
645      watchdog_,
646      software_,
647      init_params.active_url));
648  if (preempted_flag_.get())
649    stub->SetPreemptByFlag(preempted_flag_);
650  router_.AddRoute(*route_id, stub.get());
651  stubs_.AddWithID(stub.release(), *route_id);
652#endif  // ENABLE_GPU
653}
654
655GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
656  return stubs_.Lookup(route_id);
657}
658
659void GpuChannel::CreateImage(
660    gfx::PluginWindowHandle window,
661    int32 image_id,
662    gfx::Size* size) {
663  TRACE_EVENT1("gpu",
664               "GpuChannel::CreateImage",
665               "image_id",
666               image_id);
667
668  *size = gfx::Size();
669
670  if (image_manager_->LookupImage(image_id)) {
671    LOG(ERROR) << "CreateImage failed, image_id already in use.";
672    return;
673  }
674
675  scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
676  if (!image)
677    return;
678
679  image_manager_->AddImage(image.get(), image_id);
680  *size = image->GetSize();
681}
682
683void GpuChannel::DeleteImage(int32 image_id) {
684  TRACE_EVENT1("gpu",
685               "GpuChannel::DeleteImage",
686               "image_id",
687               image_id);
688
689  image_manager_->RemoveImage(image_id);
690}
691
692void GpuChannel::LoseAllContexts() {
693  gpu_channel_manager_->LoseAllContexts();
694}
695
696void GpuChannel::DestroySoon() {
697  MessageLoop::current()->PostTask(
698      FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
699}
700
701int GpuChannel::GenerateRouteID() {
702  static int last_id = 0;
703  return ++last_id;
704}
705
706void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
707  router_.AddRoute(route_id, listener);
708}
709
710void GpuChannel::RemoveRoute(int32 route_id) {
711  router_.RemoveRoute(route_id);
712}
713
714gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
715  if (!preempting_flag_.get()) {
716    preempting_flag_ = new gpu::PreemptionFlag;
717    io_message_loop_->PostTask(
718        FROM_HERE, base::Bind(
719            &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
720            filter_, preempting_flag_, num_stubs_descheduled_ > 0));
721  }
722  return preempting_flag_.get();
723}
724
725void GpuChannel::SetPreemptByFlag(
726    scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
727  preempted_flag_ = preempted_flag;
728
729  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
730       !it.IsAtEnd(); it.Advance()) {
731    it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
732  }
733}
734
735GpuChannel::~GpuChannel() {
736  if (preempting_flag_.get())
737    preempting_flag_->Reset();
738}
739
740void GpuChannel::OnDestroy() {
741  TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
742  gpu_channel_manager_->RemoveChannel(client_id_);
743}
744
745bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
746  bool handled = true;
747  IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
748    IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
749                                    OnCreateOffscreenCommandBuffer)
750    IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
751                                    OnDestroyCommandBuffer)
752#if defined(OS_ANDROID)
753    IPC_MESSAGE_HANDLER(GpuChannelMsg_RegisterStreamTextureProxy,
754                        OnRegisterStreamTextureProxy)
755    IPC_MESSAGE_HANDLER(GpuChannelMsg_EstablishStreamTexture,
756                        OnEstablishStreamTexture)
757#endif
758    IPC_MESSAGE_HANDLER(
759        GpuChannelMsg_CollectRenderingStatsForSurface,
760        OnCollectRenderingStatsForSurface)
761    IPC_MESSAGE_UNHANDLED(handled = false)
762  IPC_END_MESSAGE_MAP()
763  DCHECK(handled) << msg.type();
764  return handled;
765}
766
767void GpuChannel::HandleMessage() {
768  handle_messages_scheduled_ = false;
769  if (deferred_messages_.empty())
770    return;
771
772  bool should_fast_track_ack = false;
773  IPC::Message* m = deferred_messages_.front();
774  GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
775
776  do {
777    if (stub) {
778      if (!stub->IsScheduled())
779        return;
780      if (stub->IsPreempted()) {
781        OnScheduled();
782        return;
783      }
784    }
785
786    scoped_ptr<IPC::Message> message(m);
787    deferred_messages_.pop_front();
788    bool message_processed = true;
789
790    processed_get_state_fast_ =
791        (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
792
793    currently_processing_message_ = message.get();
794    bool result;
795    if (message->routing_id() == MSG_ROUTING_CONTROL)
796      result = OnControlMessageReceived(*message);
797    else
798      result = router_.RouteMessage(*message);
799    currently_processing_message_ = NULL;
800
801    if (!result) {
802      // Respond to sync messages even if router failed to route.
803      if (message->is_sync()) {
804        IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
805        reply->set_reply_error();
806        Send(reply);
807      }
808    } else {
809      // If the command buffer becomes unscheduled as a result of handling the
810      // message but still has more commands to process, synthesize an IPC
811      // message to flush that command buffer.
812      if (stub) {
813        if (stub->HasUnprocessedCommands()) {
814          deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
815              stub->route_id()));
816          message_processed = false;
817        }
818      }
819    }
820    if (message_processed)
821      MessageProcessed();
822
823    // We want the EchoACK following the SwapBuffers to be sent as close as
824    // possible, avoiding scheduling other channels in the meantime.
825    should_fast_track_ack = false;
826    if (!deferred_messages_.empty()) {
827      m = deferred_messages_.front();
828      stub = stubs_.Lookup(m->routing_id());
829      should_fast_track_ack =
830          (m->type() == GpuCommandBufferMsg_Echo::ID) &&
831          stub && stub->IsScheduled();
832    }
833  } while (should_fast_track_ack);
834
835  if (!deferred_messages_.empty()) {
836    OnScheduled();
837  }
838}
839
840void GpuChannel::OnCreateOffscreenCommandBuffer(
841    const gfx::Size& size,
842    const GPUCreateCommandBufferConfig& init_params,
843    int32* route_id) {
844  TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
845  GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
846
847  *route_id = GenerateRouteID();
848
849  scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
850      this,
851      share_group,
852      gfx::GLSurfaceHandle(),
853      mailbox_manager_.get(),
854      image_manager_.get(),
855      size,
856      disallowed_features_,
857      init_params.allowed_extensions,
858      init_params.attribs,
859      init_params.gpu_preference,
860      *route_id,
861      0, watchdog_,
862      software_,
863      init_params.active_url));
864  if (preempted_flag_.get())
865    stub->SetPreemptByFlag(preempted_flag_);
866  router_.AddRoute(*route_id, stub.get());
867  stubs_.AddWithID(stub.release(), *route_id);
868  TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
869               "route_id", route_id);
870}
871
872void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
873  TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
874               "route_id", route_id);
875
876  if (router_.ResolveRoute(route_id)) {
877    GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
878    bool need_reschedule = (stub && !stub->IsScheduled());
879    router_.RemoveRoute(route_id);
880    stubs_.Remove(route_id);
881    // In case the renderer is currently blocked waiting for a sync reply from
882    // the stub, we need to make sure to reschedule the GpuChannel here.
883    if (need_reschedule) {
884      // This stub won't get a chance to reschedule, so update the count
885      // now.
886      StubSchedulingChanged(true);
887    }
888  }
889}
890
891#if defined(OS_ANDROID)
892void GpuChannel::OnRegisterStreamTextureProxy(
893    int32 stream_id,  const gfx::Size& initial_size, int32* route_id) {
894  // Note that route_id is only used for notifications sent out from here.
895  // StreamTextureManager owns all texture objects and for incoming messages
896  // it finds the correct object based on stream_id.
897  *route_id = GenerateRouteID();
898  stream_texture_manager_->RegisterStreamTextureProxy(
899      stream_id, initial_size, *route_id);
900}
901
902void GpuChannel::OnEstablishStreamTexture(
903    int32 stream_id, int32 primary_id, int32 secondary_id) {
904  stream_texture_manager_->EstablishStreamTexture(
905      stream_id, primary_id, secondary_id);
906}
907#endif
908
909void GpuChannel::OnCollectRenderingStatsForSurface(
910    int32 surface_id, GpuRenderingStats* stats) {
911  for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
912       !it.IsAtEnd(); it.Advance()) {
913    int texture_upload_count =
914        it.GetCurrentValue()->decoder()->GetTextureUploadCount();
915    base::TimeDelta total_texture_upload_time =
916        it.GetCurrentValue()->decoder()->GetTotalTextureUploadTime();
917    base::TimeDelta total_processing_commands_time =
918        it.GetCurrentValue()->decoder()->GetTotalProcessingCommandsTime();
919
920    stats->global_texture_upload_count += texture_upload_count;
921    stats->global_total_texture_upload_time += total_texture_upload_time;
922    stats->global_total_processing_commands_time +=
923        total_processing_commands_time;
924    if (it.GetCurrentValue()->surface_id() == surface_id) {
925      stats->texture_upload_count += texture_upload_count;
926      stats->total_texture_upload_time += total_texture_upload_time;
927      stats->total_processing_commands_time += total_processing_commands_time;
928    }
929  }
930}
931
932void GpuChannel::MessageProcessed() {
933  messages_processed_++;
934  if (preempting_flag_.get()) {
935    io_message_loop_->PostTask(
936        FROM_HERE,
937        base::Bind(&GpuChannelMessageFilter::MessageProcessed,
938                   filter_, messages_processed_));
939  }
940}
941
942void GpuChannel::CacheShader(const std::string& key,
943                             const std::string& shader) {
944  gpu_channel_manager_->Send(
945      new GpuHostMsg_CacheShader(client_id_, key, shader));
946}
947
948}  // namespace content
949