gpu_channel_host.cc revision 7d4cd473f85ac64c3747c96c277f9e506a0d2246
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "content/common/gpu/client/gpu_channel_host.h"
6
7#include <algorithm>
8
9#include "base/bind.h"
10#include "base/debug/trace_event.h"
11#include "base/message_loop.h"
12#include "base/message_loop/message_loop_proxy.h"
13#include "base/posix/eintr_wrapper.h"
14#include "base/threading/thread_restrictions.h"
15#include "content/common/gpu/client/command_buffer_proxy_impl.h"
16#include "content/common/gpu/gpu_messages.h"
17#include "googleurl/src/gurl.h"
18#include "gpu/command_buffer/common/mailbox.h"
19#include "ipc/ipc_sync_message_filter.h"
20
21#if defined(OS_WIN)
22#include "content/public/common/sandbox_init.h"
23#endif
24
25using base::AutoLock;
26using base::MessageLoopProxy;
27
28namespace content {
29
30GpuListenerInfo::GpuListenerInfo() {}
31
32GpuListenerInfo::~GpuListenerInfo() {}
33
34// static
35scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
36    GpuChannelHostFactory* factory,
37    int gpu_host_id,
38    int client_id,
39    const gpu::GPUInfo& gpu_info,
40    const IPC::ChannelHandle& channel_handle) {
41  DCHECK(factory->IsMainThread());
42  scoped_refptr<GpuChannelHost> host = new GpuChannelHost(
43      factory, gpu_host_id, client_id, gpu_info);
44  host->Connect(channel_handle);
45  return host;
46}
47
48GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory,
49                               int gpu_host_id,
50                               int client_id,
51                               const gpu::GPUInfo& gpu_info)
52    : factory_(factory),
53      client_id_(client_id),
54      gpu_host_id_(gpu_host_id),
55      gpu_info_(gpu_info) {
56  next_transfer_buffer_id_.GetNext();
57}
58
59void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle) {
60  // Open a channel to the GPU process. We pass NULL as the main listener here
61  // since we need to filter everything to route it to the right thread.
62  scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
63  channel_.reset(new IPC::SyncChannel(channel_handle,
64                                      IPC::Channel::MODE_CLIENT,
65                                      NULL,
66                                      io_loop.get(),
67                                      true,
68                                      factory_->GetShutDownEvent()));
69
70  sync_filter_ = new IPC::SyncMessageFilter(
71      factory_->GetShutDownEvent());
72
73  channel_->AddFilter(sync_filter_.get());
74
75  channel_filter_ = new MessageFilter();
76
77  // Install the filter last, because we intercept all leftover
78  // messages.
79  channel_->AddFilter(channel_filter_.get());
80}
81
82bool GpuChannelHost::Send(IPC::Message* msg) {
83  // Callee takes ownership of message, regardless of whether Send is
84  // successful. See IPC::Sender.
85  scoped_ptr<IPC::Message> message(msg);
86  // The GPU process never sends synchronous IPCs so clear the unblock flag to
87  // preserve order.
88  message->set_unblock(false);
89
90  // Currently we need to choose between two different mechanisms for sending.
91  // On the main thread we use the regular channel Send() method, on another
92  // thread we use SyncMessageFilter. We also have to be careful interpreting
93  // IsMainThread() since it might return false during shutdown,
94  // impl we are actually calling from the main thread (discard message then).
95  //
96  // TODO: Can we just always use sync_filter_ since we setup the channel
97  //       without a main listener?
98  if (factory_->IsMainThread()) {
99    // http://crbug.com/125264
100    base::ThreadRestrictions::ScopedAllowWait allow_wait;
101    return channel_->Send(message.release());
102  } else if (base::MessageLoop::current()) {
103    return sync_filter_->Send(message.release());
104  }
105
106  return false;
107}
108
109CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
110    int32 surface_id,
111    CommandBufferProxyImpl* share_group,
112    const std::string& allowed_extensions,
113    const std::vector<int32>& attribs,
114    const GURL& active_url,
115    gfx::GpuPreference gpu_preference) {
116  TRACE_EVENT1("gpu",
117               "GpuChannelHost::CreateViewCommandBuffer",
118               "surface_id",
119               surface_id);
120
121  GPUCreateCommandBufferConfig init_params;
122  init_params.share_group_id =
123      share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
124  init_params.allowed_extensions = allowed_extensions;
125  init_params.attribs = attribs;
126  init_params.active_url = active_url;
127  init_params.gpu_preference = gpu_preference;
128  int32 route_id = factory_->CreateViewCommandBuffer(surface_id, init_params);
129  if (route_id == MSG_ROUTING_NONE)
130    return NULL;
131
132  CommandBufferProxyImpl* command_buffer =
133      new CommandBufferProxyImpl(this, route_id);
134  AddRoute(route_id, command_buffer->AsWeakPtr());
135
136  AutoLock lock(context_lock_);
137  proxies_[route_id] = command_buffer;
138  return command_buffer;
139}
140
141CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
142    const gfx::Size& size,
143    CommandBufferProxyImpl* share_group,
144    const std::string& allowed_extensions,
145    const std::vector<int32>& attribs,
146    const GURL& active_url,
147    gfx::GpuPreference gpu_preference) {
148  TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
149
150  GPUCreateCommandBufferConfig init_params;
151  init_params.share_group_id =
152      share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
153  init_params.allowed_extensions = allowed_extensions;
154  init_params.attribs = attribs;
155  init_params.active_url = active_url;
156  init_params.gpu_preference = gpu_preference;
157  int32 route_id;
158  if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size,
159                                                           init_params,
160                                                           &route_id))) {
161    return NULL;
162  }
163
164  if (route_id == MSG_ROUTING_NONE)
165    return NULL;
166
167  CommandBufferProxyImpl* command_buffer =
168      new CommandBufferProxyImpl(this, route_id);
169  AddRoute(route_id, command_buffer->AsWeakPtr());
170
171  AutoLock lock(context_lock_);
172  proxies_[route_id] = command_buffer;
173  return command_buffer;
174}
175
176scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
177    int command_buffer_route_id,
178    media::VideoCodecProfile profile,
179    media::VideoDecodeAccelerator::Client* client) {
180  AutoLock lock(context_lock_);
181  ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
182  DCHECK(it != proxies_.end());
183  CommandBufferProxyImpl* proxy = it->second;
184  return proxy->CreateVideoDecoder(profile, client).Pass();
185}
186
187void GpuChannelHost::DestroyCommandBuffer(
188    CommandBufferProxyImpl* command_buffer) {
189  TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
190
191  int route_id = command_buffer->GetRouteID();
192  Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
193  RemoveRoute(route_id);
194
195  AutoLock lock(context_lock_);
196  proxies_.erase(route_id);
197  delete command_buffer;
198}
199
200bool GpuChannelHost::CollectRenderingStatsForSurface(
201    int surface_id, GpuRenderingStats* stats) {
202  TRACE_EVENT0("gpu", "GpuChannelHost::CollectRenderingStats");
203
204  return Send(new GpuChannelMsg_CollectRenderingStatsForSurface(surface_id,
205                                                                stats));
206}
207
208void GpuChannelHost::AddRoute(
209    int route_id, base::WeakPtr<IPC::Listener> listener) {
210  DCHECK(MessageLoopProxy::current().get());
211
212  scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
213  io_loop->PostTask(FROM_HERE,
214                    base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
215                               channel_filter_.get(), route_id, listener,
216                               MessageLoopProxy::current()));
217}
218
219void GpuChannelHost::RemoveRoute(int route_id) {
220  scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
221  io_loop->PostTask(FROM_HERE,
222                    base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
223                               channel_filter_.get(), route_id));
224}
225
226base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
227    base::SharedMemoryHandle source_handle) {
228  if (IsLost())
229    return base::SharedMemory::NULLHandle();
230
231#if defined(OS_WIN)
232  // Windows needs to explicitly duplicate the handle out to another process.
233  base::SharedMemoryHandle target_handle;
234  if (!BrokerDuplicateHandle(source_handle,
235                             channel_->peer_pid(),
236                             &target_handle,
237                             0,
238                             DUPLICATE_SAME_ACCESS)) {
239    return base::SharedMemory::NULLHandle();
240  }
241
242  return target_handle;
243#else
244  int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
245  if (duped_handle < 0)
246    return base::SharedMemory::NULLHandle();
247
248  return base::FileDescriptor(duped_handle, true);
249#endif
250}
251
252bool GpuChannelHost::GenerateMailboxNames(unsigned num,
253                                          std::vector<gpu::Mailbox>* names) {
254  DCHECK(names->empty());
255  TRACE_EVENT0("gpu", "GenerateMailboxName");
256  size_t generate_count = channel_filter_->GetMailboxNames(num, names);
257
258  if (names->size() < num) {
259    std::vector<gpu::Mailbox> new_names;
260    if (!Send(new GpuChannelMsg_GenerateMailboxNames(num - names->size(),
261                                                     &new_names)))
262      return false;
263    names->insert(names->end(), new_names.begin(), new_names.end());
264  }
265
266  if (generate_count > 0)
267    Send(new GpuChannelMsg_GenerateMailboxNamesAsync(generate_count));
268
269  return true;
270}
271
272int32 GpuChannelHost::ReserveTransferBufferId() {
273  return next_transfer_buffer_id_.GetNext();
274}
275
276GpuChannelHost::~GpuChannelHost() {
277  // channel_ must be destroyed on the main thread.
278  if (!factory_->IsMainThread())
279    factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
280}
281
282
283GpuChannelHost::MessageFilter::MessageFilter()
284    : lost_(false),
285      requested_mailboxes_(0) {
286}
287
288GpuChannelHost::MessageFilter::~MessageFilter() {}
289
290void GpuChannelHost::MessageFilter::AddRoute(
291    int route_id,
292    base::WeakPtr<IPC::Listener> listener,
293    scoped_refptr<MessageLoopProxy> loop) {
294  DCHECK(listeners_.find(route_id) == listeners_.end());
295  GpuListenerInfo info;
296  info.listener = listener;
297  info.loop = loop;
298  listeners_[route_id] = info;
299}
300
301void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
302  ListenerMap::iterator it = listeners_.find(route_id);
303  if (it != listeners_.end())
304    listeners_.erase(it);
305}
306
307bool GpuChannelHost::MessageFilter::OnMessageReceived(
308    const IPC::Message& message) {
309  // Never handle sync message replies or we will deadlock here.
310  if (message.is_reply())
311    return false;
312
313  if (message.routing_id() == MSG_ROUTING_CONTROL)
314    return OnControlMessageReceived(message);
315
316  ListenerMap::iterator it = listeners_.find(message.routing_id());
317
318  if (it != listeners_.end()) {
319    const GpuListenerInfo& info = it->second;
320    info.loop->PostTask(
321        FROM_HERE,
322        base::Bind(
323            base::IgnoreResult(&IPC::Listener::OnMessageReceived),
324            info.listener,
325            message));
326  }
327
328  return true;
329}
330
331void GpuChannelHost::MessageFilter::OnChannelError() {
332  // Set the lost state before signalling the proxies. That way, if they
333  // themselves post a task to recreate the context, they will not try to re-use
334  // this channel host.
335  {
336    AutoLock lock(lock_);
337    lost_ = true;
338  }
339
340  // Inform all the proxies that an error has occurred. This will be reported
341  // via OpenGL as a lost context.
342  for (ListenerMap::iterator it = listeners_.begin();
343       it != listeners_.end();
344       it++) {
345    const GpuListenerInfo& info = it->second;
346    info.loop->PostTask(
347        FROM_HERE,
348        base::Bind(&IPC::Listener::OnChannelError, info.listener));
349  }
350
351  listeners_.clear();
352}
353
354bool GpuChannelHost::MessageFilter::IsLost() const {
355  AutoLock lock(lock_);
356  return lost_;
357}
358
359size_t GpuChannelHost::MessageFilter::GetMailboxNames(
360    size_t num, std::vector<gpu::Mailbox>* names) {
361  AutoLock lock(lock_);
362  size_t count = std::min(num, mailbox_name_pool_.size());
363  names->insert(names->begin(),
364                mailbox_name_pool_.end() - count,
365                mailbox_name_pool_.end());
366  mailbox_name_pool_.erase(mailbox_name_pool_.end() - count,
367                           mailbox_name_pool_.end());
368
369  const size_t ideal_mailbox_pool_size = 100;
370  size_t total = mailbox_name_pool_.size() + requested_mailboxes_;
371  DCHECK_LE(total, ideal_mailbox_pool_size);
372  if (total >= ideal_mailbox_pool_size / 2)
373    return 0;
374  size_t request = ideal_mailbox_pool_size - total;
375  requested_mailboxes_ += request;
376  return request;
377}
378
379bool GpuChannelHost::MessageFilter::OnControlMessageReceived(
380    const IPC::Message& message) {
381  bool handled = true;
382
383  IPC_BEGIN_MESSAGE_MAP(GpuChannelHost::MessageFilter, message)
384  IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesReply,
385                      OnGenerateMailboxNamesReply)
386  IPC_MESSAGE_UNHANDLED(handled = false)
387  IPC_END_MESSAGE_MAP()
388
389  DCHECK(handled);
390  return handled;
391}
392
393void GpuChannelHost::MessageFilter::OnGenerateMailboxNamesReply(
394    const std::vector<gpu::Mailbox>& names) {
395  TRACE_EVENT0("gpu", "OnGenerateMailboxNamesReply");
396  AutoLock lock(lock_);
397  DCHECK_LE(names.size(), requested_mailboxes_);
398  requested_mailboxes_ -= names.size();
399  mailbox_name_pool_.insert(mailbox_name_pool_.end(),
400                            names.begin(),
401                            names.end());
402}
403
404
405}  // namespace content
406