1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "content/renderer/media/renderer_gpu_video_decoder_factories.h"
6
7#include <GLES2/gl2.h>
8#include <GLES2/gl2ext.h>
9
10#include "base/bind.h"
11#include "content/child/child_thread.h"
12#include "content/common/gpu/client/gpu_channel_host.h"
13#include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h"
14#include "gpu/command_buffer/client/gles2_implementation.h"
15#include "gpu/ipc/command_buffer_proxy.h"
16#include "third_party/skia/include/core/SkPixelRef.h"
17
18namespace content {
19
20RendererGpuVideoDecoderFactories::~RendererGpuVideoDecoderFactories() {}
21RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories(
22    GpuChannelHost* gpu_channel_host,
23    const scoped_refptr<base::MessageLoopProxy>& message_loop,
24    WebGraphicsContext3DCommandBufferImpl* context)
25    : message_loop_(message_loop),
26      main_message_loop_(base::MessageLoopProxy::current()),
27      gpu_channel_host_(gpu_channel_host),
28      aborted_waiter_(true, false),
29      message_loop_async_waiter_(false, false),
30      render_thread_async_waiter_(false, false) {
31  if (message_loop_->BelongsToCurrentThread()) {
32    AsyncGetContext(context);
33    message_loop_async_waiter_.Reset();
34    return;
35  }
36  // Wait for the context to be acquired.
37  message_loop_->PostTask(FROM_HERE, base::Bind(
38      &RendererGpuVideoDecoderFactories::AsyncGetContext,
39      // Unretained to avoid ref/deref'ing |*this|, which is not yet stored in a
40      // scoped_refptr.  Safe because the Wait() below keeps us alive until this
41      // task completes.
42      base::Unretained(this),
43      // OK to pass raw because the pointee is only deleted on the compositor
44      // thread, and only as the result of a PostTask from the render thread
45      // which can only happen after this function returns, so our PostTask will
46      // run first.
47      context));
48  message_loop_async_waiter_.Wait();
49}
50
51RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories()
52    : aborted_waiter_(true, false),
53      message_loop_async_waiter_(false, false),
54      render_thread_async_waiter_(false, false) {}
55
56void RendererGpuVideoDecoderFactories::AsyncGetContext(
57    WebGraphicsContext3DCommandBufferImpl* context) {
58  context_ = context->AsWeakPtr();
59  if (context_.get()) {
60    if (context_->makeContextCurrent()) {
61      // Called once per media player, but is a no-op after the first one in
62      // each renderer.
63      context_->insertEventMarkerEXT("GpuVDAContext3D");
64    }
65  }
66  message_loop_async_waiter_.Signal();
67}
68
69media::VideoDecodeAccelerator*
70RendererGpuVideoDecoderFactories::CreateVideoDecodeAccelerator(
71    media::VideoCodecProfile profile,
72    media::VideoDecodeAccelerator::Client* client) {
73  if (message_loop_->BelongsToCurrentThread()) {
74    AsyncCreateVideoDecodeAccelerator(profile, client);
75    message_loop_async_waiter_.Reset();
76    return vda_.release();
77  }
78  // The VDA is returned in the vda_ member variable by the
79  // AsyncCreateVideoDecodeAccelerator() function.
80  message_loop_->PostTask(FROM_HERE, base::Bind(
81      &RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator,
82      this, profile, client));
83
84  base::WaitableEvent* objects[] = {&aborted_waiter_,
85                                    &message_loop_async_waiter_};
86  if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) {
87    // If we are aborting and the VDA is created by the
88    // AsyncCreateVideoDecodeAccelerator() function later we need to ensure
89    // that it is destroyed on the same thread.
90    message_loop_->PostTask(FROM_HERE, base::Bind(
91        &RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator,
92        this));
93    return NULL;
94  }
95  return vda_.release();
96}
97
98void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator(
99      media::VideoCodecProfile profile,
100      media::VideoDecodeAccelerator::Client* client) {
101  DCHECK(message_loop_->BelongsToCurrentThread());
102
103  if (context_.get() && context_->GetCommandBufferProxy()) {
104    vda_ = gpu_channel_host_->CreateVideoDecoder(
105        context_->GetCommandBufferProxy()->GetRouteID(), profile, client);
106  }
107  message_loop_async_waiter_.Signal();
108}
109
110uint32 RendererGpuVideoDecoderFactories::CreateTextures(
111    int32 count, const gfx::Size& size,
112    std::vector<uint32>* texture_ids,
113    std::vector<gpu::Mailbox>* texture_mailboxes,
114    uint32 texture_target) {
115  uint32 sync_point = 0;
116
117  if (message_loop_->BelongsToCurrentThread()) {
118    AsyncCreateTextures(count, size, texture_target, &sync_point);
119    texture_ids->swap(created_textures_);
120    texture_mailboxes->swap(created_texture_mailboxes_);
121    message_loop_async_waiter_.Reset();
122    return sync_point;
123  }
124  message_loop_->PostTask(FROM_HERE, base::Bind(
125      &RendererGpuVideoDecoderFactories::AsyncCreateTextures, this,
126      count, size, texture_target, &sync_point));
127
128  base::WaitableEvent* objects[] = {&aborted_waiter_,
129                                    &message_loop_async_waiter_};
130  if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
131    return 0;
132  texture_ids->swap(created_textures_);
133  texture_mailboxes->swap(created_texture_mailboxes_);
134  return sync_point;
135}
136
137void RendererGpuVideoDecoderFactories::AsyncCreateTextures(
138    int32 count, const gfx::Size& size, uint32 texture_target,
139    uint32* sync_point) {
140  DCHECK(message_loop_->BelongsToCurrentThread());
141  DCHECK(texture_target);
142
143  if (!context_.get()) {
144    message_loop_async_waiter_.Signal();
145    return;
146  }
147  gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
148  created_textures_.resize(count);
149  created_texture_mailboxes_.resize(count);
150  gles2->GenTextures(count, &created_textures_[0]);
151  for (int i = 0; i < count; ++i) {
152    gles2->ActiveTexture(GL_TEXTURE0);
153    uint32 texture_id = created_textures_[i];
154    gles2->BindTexture(texture_target, texture_id);
155    gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
156    gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
157    gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
158    gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
159    if (texture_target == GL_TEXTURE_2D) {
160      gles2->TexImage2D(texture_target, 0, GL_RGBA, size.width(), size.height(),
161                        0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
162    }
163    gles2->GenMailboxCHROMIUM(created_texture_mailboxes_[i].name);
164    gles2->ProduceTextureCHROMIUM(texture_target,
165                                  created_texture_mailboxes_[i].name);
166  }
167
168  // We need a glFlush here to guarantee the decoder (in the GPU process) can
169  // use the texture ids we return here.  Since textures are expected to be
170  // reused, this should not be unacceptably expensive.
171  gles2->Flush();
172  DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
173
174  *sync_point = gles2->InsertSyncPointCHROMIUM();
175  message_loop_async_waiter_.Signal();
176}
177
178void RendererGpuVideoDecoderFactories::DeleteTexture(uint32 texture_id) {
179  if (message_loop_->BelongsToCurrentThread()) {
180    AsyncDeleteTexture(texture_id);
181    return;
182  }
183  message_loop_->PostTask(FROM_HERE, base::Bind(
184      &RendererGpuVideoDecoderFactories::AsyncDeleteTexture, this, texture_id));
185}
186
187void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) {
188  DCHECK(message_loop_->BelongsToCurrentThread());
189  if (!context_.get())
190    return;
191
192  gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
193  gles2->DeleteTextures(1, &texture_id);
194  DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
195}
196
197void RendererGpuVideoDecoderFactories::WaitSyncPoint(uint32 sync_point) {
198  if (message_loop_->BelongsToCurrentThread()) {
199    AsyncWaitSyncPoint(sync_point);
200    message_loop_async_waiter_.Reset();
201    return;
202  }
203
204  message_loop_->PostTask(FROM_HERE, base::Bind(
205      &RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint,
206      this,
207      sync_point));
208  base::WaitableEvent* objects[] = {&aborted_waiter_,
209                                    &message_loop_async_waiter_};
210  base::WaitableEvent::WaitMany(objects, arraysize(objects));
211}
212
213void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) {
214  DCHECK(message_loop_->BelongsToCurrentThread());
215  if (!context_) {
216    message_loop_async_waiter_.Signal();
217    return;
218  }
219
220  gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
221  gles2->WaitSyncPointCHROMIUM(sync_point);
222  message_loop_async_waiter_.Signal();
223}
224
225void RendererGpuVideoDecoderFactories::ReadPixels(
226    uint32 texture_id, uint32 texture_target, const gfx::Size& size,
227    const SkBitmap& pixels) {
228  // SkBitmaps use the SkPixelRef object to refcount the underlying pixels.
229  // Multiple SkBitmaps can share a SkPixelRef instance. We use this to
230  // ensure that the underlying pixels in the SkBitmap passed in remain valid
231  // until the AsyncReadPixels() call completes.
232  read_pixels_bitmap_.setPixelRef(pixels.pixelRef());
233
234  if (!message_loop_->BelongsToCurrentThread()) {
235    message_loop_->PostTask(FROM_HERE, base::Bind(
236        &RendererGpuVideoDecoderFactories::AsyncReadPixels, this,
237        texture_id, texture_target, size));
238    base::WaitableEvent* objects[] = {&aborted_waiter_,
239                                      &message_loop_async_waiter_};
240    if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
241      return;
242  } else {
243    AsyncReadPixels(texture_id, texture_target, size);
244    message_loop_async_waiter_.Reset();
245  }
246  read_pixels_bitmap_.setPixelRef(NULL);
247}
248
249void RendererGpuVideoDecoderFactories::AsyncReadPixels(
250    uint32 texture_id, uint32 texture_target, const gfx::Size& size) {
251  DCHECK(message_loop_->BelongsToCurrentThread());
252  if (!context_.get()) {
253    message_loop_async_waiter_.Signal();
254    return;
255  }
256
257  gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
258
259  GLuint tmp_texture;
260  gles2->GenTextures(1, &tmp_texture);
261  gles2->BindTexture(texture_target, tmp_texture);
262  gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
263  gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
264  gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
265  gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
266  context_->copyTextureCHROMIUM(
267      texture_target, texture_id, tmp_texture, 0, GL_RGBA, GL_UNSIGNED_BYTE);
268
269  GLuint fb;
270  gles2->GenFramebuffers(1, &fb);
271  gles2->BindFramebuffer(GL_FRAMEBUFFER, fb);
272  gles2->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
273                              texture_target, tmp_texture, 0);
274  gles2->PixelStorei(GL_PACK_ALIGNMENT, 4);
275  gles2->ReadPixels(0, 0, size.width(), size.height(), GL_BGRA_EXT,
276      GL_UNSIGNED_BYTE, read_pixels_bitmap_.pixelRef()->pixels());
277  gles2->DeleteFramebuffers(1, &fb);
278  gles2->DeleteTextures(1, &tmp_texture);
279  DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
280  message_loop_async_waiter_.Signal();
281}
282
283base::SharedMemory* RendererGpuVideoDecoderFactories::CreateSharedMemory(
284    size_t size) {
285  if (main_message_loop_->BelongsToCurrentThread()) {
286    return ChildThread::current()->AllocateSharedMemory(size);
287  }
288  main_message_loop_->PostTask(FROM_HERE, base::Bind(
289      &RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory, this,
290      size));
291
292  base::WaitableEvent* objects[] = {&aborted_waiter_,
293                                    &render_thread_async_waiter_};
294  if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
295    return NULL;
296  return shared_memory_segment_.release();
297}
298
299void RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory(size_t size) {
300  DCHECK_EQ(base::MessageLoop::current(),
301            ChildThread::current()->message_loop());
302
303  shared_memory_segment_.reset(
304      ChildThread::current()->AllocateSharedMemory(size));
305  render_thread_async_waiter_.Signal();
306}
307
308scoped_refptr<base::MessageLoopProxy>
309RendererGpuVideoDecoderFactories::GetMessageLoop() {
310  return message_loop_;
311}
312
313void RendererGpuVideoDecoderFactories::Abort() {
314  aborted_waiter_.Signal();
315}
316
317bool RendererGpuVideoDecoderFactories::IsAborted() {
318  return aborted_waiter_.IsSignaled();
319}
320
321scoped_refptr<media::GpuVideoDecoderFactories>
322RendererGpuVideoDecoderFactories::Clone() {
323  scoped_refptr<RendererGpuVideoDecoderFactories> factories =
324      new RendererGpuVideoDecoderFactories();
325  factories->message_loop_ = message_loop_;
326  factories->main_message_loop_ = main_message_loop_;
327  factories->gpu_channel_host_ = gpu_channel_host_;
328  factories->context_ = context_;
329  return factories;
330}
331
332void RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator() {
333  // OK to release because Destroy() will delete the VDA instance.
334  if (vda_)
335    vda_.release()->Destroy();
336}
337
338}  // namespace content
339