1// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "cc/resources/one_copy_raster_worker_pool.h"
6
7#include <algorithm>
8
9#include "base/debug/trace_event.h"
10#include "base/debug/trace_event_argument.h"
11#include "base/strings/stringprintf.h"
12#include "cc/debug/traced_value.h"
13#include "cc/resources/raster_buffer.h"
14#include "cc/resources/resource_pool.h"
15#include "cc/resources/scoped_resource.h"
16#include "gpu/command_buffer/client/gles2_interface.h"
17#include "third_party/skia/include/utils/SkNullCanvas.h"
18
19namespace cc {
20namespace {
21
22class RasterBufferImpl : public RasterBuffer {
23 public:
24  RasterBufferImpl(ResourceProvider* resource_provider,
25                   ResourcePool* resource_pool,
26                   const Resource* resource)
27      : resource_provider_(resource_provider),
28        resource_pool_(resource_pool),
29        resource_(resource),
30        raster_resource_(resource_pool->AcquireResource(resource->size())),
31        buffer_(NULL),
32        stride_(0) {
33    // Acquire and map image for raster resource.
34    resource_provider_->AcquireImage(raster_resource_->id());
35    buffer_ = resource_provider_->MapImage(raster_resource_->id(), &stride_);
36  }
37
38  virtual ~RasterBufferImpl() {
39    // First unmap image for raster resource.
40    resource_provider_->UnmapImage(raster_resource_->id());
41
42    // Copy contents of raster resource to |resource_|.
43    resource_provider_->CopyResource(raster_resource_->id(), resource_->id());
44
45    // This RasterBuffer implementation provides direct access to the memory
46    // used by the GPU. Read lock fences are required to ensure that we're not
47    // trying to map a resource that is currently in-use by the GPU.
48    resource_provider_->EnableReadLockFences(raster_resource_->id());
49
50    // Return raster resource to pool so it can be used by another RasterBuffer
51    // instance.
52    resource_pool_->ReleaseResource(raster_resource_.Pass());
53  }
54
55  // Overridden from RasterBuffer:
56  virtual skia::RefPtr<SkCanvas> AcquireSkCanvas() OVERRIDE {
57    if (!buffer_)
58      return skia::AdoptRef(SkCreateNullCanvas());
59
60    RasterWorkerPool::AcquireBitmapForBuffer(
61        &bitmap_, buffer_, resource_->format(), resource_->size(), stride_);
62    return skia::AdoptRef(new SkCanvas(bitmap_));
63  }
64  virtual void ReleaseSkCanvas(const skia::RefPtr<SkCanvas>& canvas) OVERRIDE {
65    if (!buffer_)
66      return;
67
68    RasterWorkerPool::ReleaseBitmapForBuffer(
69        &bitmap_, buffer_, resource_->format());
70  }
71
72 private:
73  ResourceProvider* resource_provider_;
74  ResourcePool* resource_pool_;
75  const Resource* resource_;
76  scoped_ptr<ScopedResource> raster_resource_;
77  uint8_t* buffer_;
78  int stride_;
79  SkBitmap bitmap_;
80
81  DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
82};
83
84}  // namespace
85
86// static
87scoped_ptr<RasterWorkerPool> OneCopyRasterWorkerPool::Create(
88    base::SequencedTaskRunner* task_runner,
89    TaskGraphRunner* task_graph_runner,
90    ContextProvider* context_provider,
91    ResourceProvider* resource_provider,
92    ResourcePool* resource_pool) {
93  return make_scoped_ptr<RasterWorkerPool>(
94      new OneCopyRasterWorkerPool(task_runner,
95                                  task_graph_runner,
96                                  context_provider,
97                                  resource_provider,
98                                  resource_pool));
99}
100
101OneCopyRasterWorkerPool::OneCopyRasterWorkerPool(
102    base::SequencedTaskRunner* task_runner,
103    TaskGraphRunner* task_graph_runner,
104    ContextProvider* context_provider,
105    ResourceProvider* resource_provider,
106    ResourcePool* resource_pool)
107    : task_runner_(task_runner),
108      task_graph_runner_(task_graph_runner),
109      namespace_token_(task_graph_runner->GetNamespaceToken()),
110      context_provider_(context_provider),
111      resource_provider_(resource_provider),
112      resource_pool_(resource_pool),
113      raster_finished_weak_ptr_factory_(this) {
114  DCHECK(context_provider_);
115}
116
117OneCopyRasterWorkerPool::~OneCopyRasterWorkerPool() {
118}
119
120Rasterizer* OneCopyRasterWorkerPool::AsRasterizer() {
121  return this;
122}
123
124void OneCopyRasterWorkerPool::SetClient(RasterizerClient* client) {
125  client_ = client;
126}
127
128void OneCopyRasterWorkerPool::Shutdown() {
129  TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::Shutdown");
130
131  TaskGraph empty;
132  task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
133  task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
134}
135
136void OneCopyRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) {
137  TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::ScheduleTasks");
138
139  if (raster_pending_.none())
140    TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
141
142  // Mark all task sets as pending.
143  raster_pending_.set();
144
145  unsigned priority = kRasterTaskPriorityBase;
146
147  graph_.Reset();
148
149  // Cancel existing OnRasterFinished callbacks.
150  raster_finished_weak_ptr_factory_.InvalidateWeakPtrs();
151
152  scoped_refptr<RasterizerTask> new_raster_finished_tasks[kNumberOfTaskSets];
153
154  size_t task_count[kNumberOfTaskSets] = {0};
155
156  for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
157    new_raster_finished_tasks[task_set] = CreateRasterFinishedTask(
158        task_runner_.get(),
159        base::Bind(&OneCopyRasterWorkerPool::OnRasterFinished,
160                   raster_finished_weak_ptr_factory_.GetWeakPtr(),
161                   task_set));
162  }
163
164  resource_pool_->CheckBusyResources();
165
166  for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
167       it != queue->items.end();
168       ++it) {
169    const RasterTaskQueue::Item& item = *it;
170    RasterTask* task = item.task;
171    DCHECK(!task->HasCompleted());
172
173    for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
174      if (!item.task_sets[task_set])
175        continue;
176
177      ++task_count[task_set];
178
179      graph_.edges.push_back(
180          TaskGraph::Edge(task, new_raster_finished_tasks[task_set].get()));
181    }
182
183    InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
184  }
185
186  for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
187    InsertNodeForTask(&graph_,
188                      new_raster_finished_tasks[task_set].get(),
189                      kRasterFinishedTaskPriority,
190                      task_count[task_set]);
191  }
192
193  ScheduleTasksOnOriginThread(this, &graph_);
194  task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
195
196  std::copy(new_raster_finished_tasks,
197            new_raster_finished_tasks + kNumberOfTaskSets,
198            raster_finished_tasks_);
199
200  resource_pool_->ReduceResourceUsage();
201
202  TRACE_EVENT_ASYNC_STEP_INTO1(
203      "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
204}
205
206void OneCopyRasterWorkerPool::CheckForCompletedTasks() {
207  TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::CheckForCompletedTasks");
208
209  task_graph_runner_->CollectCompletedTasks(namespace_token_,
210                                            &completed_tasks_);
211  for (Task::Vector::const_iterator it = completed_tasks_.begin();
212       it != completed_tasks_.end();
213       ++it) {
214    RasterizerTask* task = static_cast<RasterizerTask*>(it->get());
215
216    task->WillComplete();
217    task->CompleteOnOriginThread(this);
218    task->DidComplete();
219
220    task->RunReplyOnOriginThread();
221  }
222  completed_tasks_.clear();
223
224  context_provider_->ContextGL()->ShallowFlushCHROMIUM();
225}
226
227scoped_ptr<RasterBuffer> OneCopyRasterWorkerPool::AcquireBufferForRaster(
228    const Resource* resource) {
229  DCHECK_EQ(resource->format(), resource_pool_->resource_format());
230  return make_scoped_ptr<RasterBuffer>(
231      new RasterBufferImpl(resource_provider_, resource_pool_, resource));
232}
233
234void OneCopyRasterWorkerPool::ReleaseBufferForRaster(
235    scoped_ptr<RasterBuffer> buffer) {
236  // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
237}
238
239void OneCopyRasterWorkerPool::OnRasterFinished(TaskSet task_set) {
240  TRACE_EVENT1(
241      "cc", "OneCopyRasterWorkerPool::OnRasterFinished", "task_set", task_set);
242
243  DCHECK(raster_pending_[task_set]);
244  raster_pending_[task_set] = false;
245  if (raster_pending_.any()) {
246    TRACE_EVENT_ASYNC_STEP_INTO1(
247        "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
248  } else {
249    TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
250  }
251  client_->DidFinishRunningTasks(task_set);
252}
253
254scoped_refptr<base::debug::ConvertableToTraceFormat>
255OneCopyRasterWorkerPool::StateAsValue() const {
256  scoped_refptr<base::debug::TracedValue> state =
257      new base::debug::TracedValue();
258
259  state->BeginArray("tasks_pending");
260  for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
261    state->AppendBoolean(raster_pending_[task_set]);
262  state->EndArray();
263  state->BeginDictionary("staging_state");
264  StagingStateAsValueInto(state.get());
265  state->EndDictionary();
266
267  return state;
268}
269void OneCopyRasterWorkerPool::StagingStateAsValueInto(
270    base::debug::TracedValue* staging_state) const {
271  staging_state->SetInteger("staging_resource_count",
272                            resource_pool_->total_resource_count());
273  staging_state->SetInteger("bytes_used_for_staging_resources",
274                            resource_pool_->total_memory_usage_bytes());
275  staging_state->SetInteger("pending_copy_count",
276                            resource_pool_->total_resource_count() -
277                                resource_pool_->acquired_resource_count());
278  staging_state->SetInteger("bytes_pending_copy",
279                            resource_pool_->total_memory_usage_bytes() -
280                                resource_pool_->acquired_memory_usage_bytes());
281}
282
283}  // namespace cc
284