tile_manager.cc revision 5c02ac1a9c1b504631c0a3d2b6e737b5d738bae1
1// Copyright 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "cc/resources/tile_manager.h" 6 7#include <algorithm> 8#include <limits> 9#include <string> 10 11#include "base/bind.h" 12#include "base/json/json_writer.h" 13#include "base/logging.h" 14#include "base/metrics/histogram.h" 15#include "cc/debug/devtools_instrumentation.h" 16#include "cc/debug/frame_viewer_instrumentation.h" 17#include "cc/debug/traced_value.h" 18#include "cc/layers/picture_layer_impl.h" 19#include "cc/resources/raster_worker_pool.h" 20#include "cc/resources/rasterizer_delegate.h" 21#include "cc/resources/tile.h" 22#include "skia/ext/paint_simplifier.h" 23#include "third_party/skia/include/core/SkBitmap.h" 24#include "third_party/skia/include/core/SkPixelRef.h" 25#include "ui/gfx/rect_conversions.h" 26 27namespace cc { 28namespace { 29 30// Flag to indicate whether we should try and detect that 31// a tile is of solid color. 32const bool kUseColorEstimator = true; 33 34// Minimum width/height of a pile that would require analysis for tiles. 35const int kMinDimensionsForAnalysis = 256; 36 37class DisableLCDTextFilter : public SkDrawFilter { 38 public: 39 // SkDrawFilter interface. 40 virtual bool filter(SkPaint* paint, SkDrawFilter::Type type) OVERRIDE { 41 if (type != SkDrawFilter::kText_Type) 42 return true; 43 44 paint->setLCDRenderText(false); 45 return true; 46 } 47}; 48 49class RasterTaskImpl : public RasterTask { 50 public: 51 RasterTaskImpl( 52 const Resource* resource, 53 PicturePileImpl* picture_pile, 54 const gfx::Rect& content_rect, 55 float contents_scale, 56 RasterMode raster_mode, 57 TileResolution tile_resolution, 58 int layer_id, 59 const void* tile_id, 60 int source_frame_number, 61 bool analyze_picture, 62 RenderingStatsInstrumentation* rendering_stats, 63 const base::Callback<void(const PicturePileImpl::Analysis&, bool)>& reply, 64 ImageDecodeTask::Vector* dependencies) 65 : RasterTask(resource, dependencies), 66 picture_pile_(picture_pile), 67 content_rect_(content_rect), 68 contents_scale_(contents_scale), 69 raster_mode_(raster_mode), 70 tile_resolution_(tile_resolution), 71 layer_id_(layer_id), 72 tile_id_(tile_id), 73 source_frame_number_(source_frame_number), 74 analyze_picture_(analyze_picture), 75 rendering_stats_(rendering_stats), 76 reply_(reply), 77 canvas_(NULL) {} 78 79 // Overridden from Task: 80 virtual void RunOnWorkerThread() OVERRIDE { 81 TRACE_EVENT0("cc", "RasterizerTaskImpl::RunOnWorkerThread"); 82 83 DCHECK(picture_pile_); 84 if (canvas_) { 85 AnalyzeAndRaster(picture_pile_->GetCloneForDrawingOnThread( 86 RasterWorkerPool::GetPictureCloneIndexForCurrentThread())); 87 } 88 } 89 90 // Overridden from RasterizerTask: 91 virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE { 92 DCHECK(!canvas_); 93 canvas_ = client->AcquireCanvasForRaster(this); 94 } 95 virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE { 96 canvas_ = NULL; 97 client->ReleaseCanvasForRaster(this); 98 } 99 virtual void RunReplyOnOriginThread() OVERRIDE { 100 DCHECK(!canvas_); 101 reply_.Run(analysis_, !HasFinishedRunning()); 102 } 103 104 protected: 105 virtual ~RasterTaskImpl() { DCHECK(!canvas_); } 106 107 private: 108 void AnalyzeAndRaster(PicturePileImpl* picture_pile) { 109 DCHECK(picture_pile); 110 DCHECK(canvas_); 111 112 if (analyze_picture_) { 113 Analyze(picture_pile); 114 if (analysis_.is_solid_color) 115 return; 116 } 117 118 Raster(picture_pile); 119 } 120 121 void Analyze(PicturePileImpl* picture_pile) { 122 frame_viewer_instrumentation::ScopedAnalyzeTask analyze_task( 123 tile_id_, tile_resolution_, source_frame_number_, layer_id_); 124 125 DCHECK(picture_pile); 126 127 picture_pile->AnalyzeInRect( 128 content_rect_, contents_scale_, &analysis_, rendering_stats_); 129 130 // Record the solid color prediction. 131 UMA_HISTOGRAM_BOOLEAN("Renderer4.SolidColorTilesAnalyzed", 132 analysis_.is_solid_color); 133 134 // Clear the flag if we're not using the estimator. 135 analysis_.is_solid_color &= kUseColorEstimator; 136 } 137 138 void Raster(PicturePileImpl* picture_pile) { 139 frame_viewer_instrumentation::ScopedRasterTask raster_task( 140 tile_id_, 141 tile_resolution_, 142 source_frame_number_, 143 layer_id_, 144 raster_mode_); 145 devtools_instrumentation::ScopedLayerTask layer_task( 146 devtools_instrumentation::kRasterTask, layer_id_); 147 148 skia::RefPtr<SkDrawFilter> draw_filter; 149 switch (raster_mode_) { 150 case LOW_QUALITY_RASTER_MODE: 151 draw_filter = skia::AdoptRef(new skia::PaintSimplifier); 152 break; 153 case HIGH_QUALITY_NO_LCD_RASTER_MODE: 154 draw_filter = skia::AdoptRef(new DisableLCDTextFilter); 155 break; 156 case HIGH_QUALITY_RASTER_MODE: 157 break; 158 case NUM_RASTER_MODES: 159 default: 160 NOTREACHED(); 161 } 162 canvas_->setDrawFilter(draw_filter.get()); 163 164 base::TimeDelta prev_rasterize_time = 165 rendering_stats_->impl_thread_rendering_stats().rasterize_time; 166 167 // Only record rasterization time for highres tiles, because 168 // lowres tiles are not required for activation and therefore 169 // introduce noise in the measurement (sometimes they get rasterized 170 // before we draw and sometimes they aren't) 171 RenderingStatsInstrumentation* stats = 172 tile_resolution_ == HIGH_RESOLUTION ? rendering_stats_ : NULL; 173 DCHECK(picture_pile); 174 picture_pile->RasterToBitmap( 175 canvas_, content_rect_, contents_scale_, stats); 176 177 if (rendering_stats_->record_rendering_stats()) { 178 base::TimeDelta current_rasterize_time = 179 rendering_stats_->impl_thread_rendering_stats().rasterize_time; 180 HISTOGRAM_CUSTOM_COUNTS( 181 "Renderer4.PictureRasterTimeUS", 182 (current_rasterize_time - prev_rasterize_time).InMicroseconds(), 183 0, 184 100000, 185 100); 186 } 187 } 188 189 PicturePileImpl::Analysis analysis_; 190 scoped_refptr<PicturePileImpl> picture_pile_; 191 gfx::Rect content_rect_; 192 float contents_scale_; 193 RasterMode raster_mode_; 194 TileResolution tile_resolution_; 195 int layer_id_; 196 const void* tile_id_; 197 int source_frame_number_; 198 bool analyze_picture_; 199 RenderingStatsInstrumentation* rendering_stats_; 200 const base::Callback<void(const PicturePileImpl::Analysis&, bool)> reply_; 201 SkCanvas* canvas_; 202 203 DISALLOW_COPY_AND_ASSIGN(RasterTaskImpl); 204}; 205 206class ImageDecodeTaskImpl : public ImageDecodeTask { 207 public: 208 ImageDecodeTaskImpl(SkPixelRef* pixel_ref, 209 int layer_id, 210 RenderingStatsInstrumentation* rendering_stats, 211 const base::Callback<void(bool was_canceled)>& reply) 212 : pixel_ref_(skia::SharePtr(pixel_ref)), 213 layer_id_(layer_id), 214 rendering_stats_(rendering_stats), 215 reply_(reply) {} 216 217 // Overridden from Task: 218 virtual void RunOnWorkerThread() OVERRIDE { 219 TRACE_EVENT0("cc", "ImageDecodeTaskImpl::RunOnWorkerThread"); 220 221 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( 222 pixel_ref_.get()); 223 // This will cause the image referred to by pixel ref to be decoded. 224 pixel_ref_->lockPixels(); 225 pixel_ref_->unlockPixels(); 226 } 227 228 // Overridden from RasterizerTask: 229 virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {} 230 virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {} 231 virtual void RunReplyOnOriginThread() OVERRIDE { 232 reply_.Run(!HasFinishedRunning()); 233 } 234 235 protected: 236 virtual ~ImageDecodeTaskImpl() {} 237 238 private: 239 skia::RefPtr<SkPixelRef> pixel_ref_; 240 int layer_id_; 241 RenderingStatsInstrumentation* rendering_stats_; 242 const base::Callback<void(bool was_canceled)> reply_; 243 244 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); 245}; 246 247const size_t kScheduledRasterTasksLimit = 32u; 248 249// Memory limit policy works by mapping some bin states to the NEVER bin. 250const ManagedTileBin kBinPolicyMap[NUM_TILE_MEMORY_LIMIT_POLICIES][NUM_BINS] = { 251 // [ALLOW_NOTHING] 252 {NEVER_BIN, // [NOW_AND_READY_TO_DRAW_BIN] 253 NEVER_BIN, // [NOW_BIN] 254 NEVER_BIN, // [SOON_BIN] 255 NEVER_BIN, // [EVENTUALLY_AND_ACTIVE_BIN] 256 NEVER_BIN, // [EVENTUALLY_BIN] 257 NEVER_BIN, // [AT_LAST_AND_ACTIVE_BIN] 258 NEVER_BIN, // [AT_LAST_BIN] 259 NEVER_BIN // [NEVER_BIN] 260 }, 261 // [ALLOW_ABSOLUTE_MINIMUM] 262 {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN] 263 NOW_BIN, // [NOW_BIN] 264 NEVER_BIN, // [SOON_BIN] 265 NEVER_BIN, // [EVENTUALLY_AND_ACTIVE_BIN] 266 NEVER_BIN, // [EVENTUALLY_BIN] 267 NEVER_BIN, // [AT_LAST_AND_ACTIVE_BIN] 268 NEVER_BIN, // [AT_LAST_BIN] 269 NEVER_BIN // [NEVER_BIN] 270 }, 271 // [ALLOW_PREPAINT_ONLY] 272 {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN] 273 NOW_BIN, // [NOW_BIN] 274 SOON_BIN, // [SOON_BIN] 275 NEVER_BIN, // [EVENTUALLY_AND_ACTIVE_BIN] 276 NEVER_BIN, // [EVENTUALLY_BIN] 277 NEVER_BIN, // [AT_LAST_AND_ACTIVE_BIN] 278 NEVER_BIN, // [AT_LAST_BIN] 279 NEVER_BIN // [NEVER_BIN] 280 }, 281 // [ALLOW_ANYTHING] 282 {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN] 283 NOW_BIN, // [NOW_BIN] 284 SOON_BIN, // [SOON_BIN] 285 EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN] 286 EVENTUALLY_BIN, // [EVENTUALLY_BIN] 287 AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN] 288 AT_LAST_BIN, // [AT_LAST_BIN] 289 NEVER_BIN // [NEVER_BIN] 290 }}; 291 292// Ready to draw works by mapping NOW_BIN to NOW_AND_READY_TO_DRAW_BIN. 293const ManagedTileBin kBinReadyToDrawMap[2][NUM_BINS] = { 294 // Not ready 295 {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN] 296 NOW_BIN, // [NOW_BIN] 297 SOON_BIN, // [SOON_BIN] 298 EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN] 299 EVENTUALLY_BIN, // [EVENTUALLY_BIN] 300 AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN] 301 AT_LAST_BIN, // [AT_LAST_BIN] 302 NEVER_BIN // [NEVER_BIN] 303 }, 304 // Ready 305 {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN] 306 NOW_AND_READY_TO_DRAW_BIN, // [NOW_BIN] 307 SOON_BIN, // [SOON_BIN] 308 EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN] 309 EVENTUALLY_BIN, // [EVENTUALLY_BIN] 310 AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN] 311 AT_LAST_BIN, // [AT_LAST_BIN] 312 NEVER_BIN // [NEVER_BIN] 313 }}; 314 315// Active works by mapping some bin stats to equivalent _ACTIVE_BIN state. 316const ManagedTileBin kBinIsActiveMap[2][NUM_BINS] = { 317 // Inactive 318 {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN] 319 NOW_BIN, // [NOW_BIN] 320 SOON_BIN, // [SOON_BIN] 321 EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN] 322 EVENTUALLY_BIN, // [EVENTUALLY_BIN] 323 AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN] 324 AT_LAST_BIN, // [AT_LAST_BIN] 325 NEVER_BIN // [NEVER_BIN] 326 }, 327 // Active 328 {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN] 329 NOW_BIN, // [NOW_BIN] 330 SOON_BIN, // [SOON_BIN] 331 EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN] 332 EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_BIN] 333 AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN] 334 AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_BIN] 335 NEVER_BIN // [NEVER_BIN] 336 }}; 337 338// Determine bin based on three categories of tiles: things we need now, 339// things we need soon, and eventually. 340inline ManagedTileBin BinFromTilePriority(const TilePriority& prio) { 341 const float kBackflingGuardDistancePixels = 314.0f; 342 343 if (prio.priority_bin == TilePriority::NOW) 344 return NOW_BIN; 345 346 if (prio.priority_bin == TilePriority::SOON || 347 prio.distance_to_visible < kBackflingGuardDistancePixels) 348 return SOON_BIN; 349 350 if (prio.distance_to_visible == std::numeric_limits<float>::infinity()) 351 return NEVER_BIN; 352 353 return EVENTUALLY_BIN; 354} 355 356} // namespace 357 358RasterTaskCompletionStats::RasterTaskCompletionStats() 359 : completed_count(0u), canceled_count(0u) {} 360 361scoped_ptr<base::Value> RasterTaskCompletionStatsAsValue( 362 const RasterTaskCompletionStats& stats) { 363 scoped_ptr<base::DictionaryValue> state(new base::DictionaryValue()); 364 state->SetInteger("completed_count", stats.completed_count); 365 state->SetInteger("canceled_count", stats.canceled_count); 366 return state.PassAs<base::Value>(); 367} 368 369// static 370scoped_ptr<TileManager> TileManager::Create( 371 TileManagerClient* client, 372 ResourcePool* resource_pool, 373 Rasterizer* rasterizer, 374 Rasterizer* gpu_rasterizer, 375 size_t max_raster_usage_bytes, 376 bool use_rasterize_on_demand, 377 RenderingStatsInstrumentation* rendering_stats_instrumentation) { 378 return make_scoped_ptr(new TileManager(client, 379 resource_pool, 380 rasterizer, 381 gpu_rasterizer, 382 max_raster_usage_bytes, 383 use_rasterize_on_demand, 384 rendering_stats_instrumentation)); 385} 386 387TileManager::TileManager( 388 TileManagerClient* client, 389 ResourcePool* resource_pool, 390 Rasterizer* rasterizer, 391 Rasterizer* gpu_rasterizer, 392 size_t max_raster_usage_bytes, 393 bool use_rasterize_on_demand, 394 RenderingStatsInstrumentation* rendering_stats_instrumentation) 395 : client_(client), 396 resource_pool_(resource_pool), 397 prioritized_tiles_dirty_(false), 398 all_tiles_that_need_to_be_rasterized_have_memory_(true), 399 all_tiles_required_for_activation_have_memory_(true), 400 memory_required_bytes_(0), 401 memory_nice_to_have_bytes_(0), 402 bytes_releasable_(0), 403 resources_releasable_(0), 404 max_raster_usage_bytes_(max_raster_usage_bytes), 405 ever_exceeded_memory_budget_(false), 406 rendering_stats_instrumentation_(rendering_stats_instrumentation), 407 did_initialize_visible_tile_(false), 408 did_check_for_completed_tasks_since_last_schedule_tasks_(true), 409 use_rasterize_on_demand_(use_rasterize_on_demand) { 410 Rasterizer* rasterizers[NUM_RASTERIZER_TYPES] = { 411 rasterizer, // RASTERIZER_TYPE_DEFAULT 412 gpu_rasterizer, // RASTERIZER_TYPE_GPU 413 }; 414 rasterizer_delegate_ = 415 RasterizerDelegate::Create(this, rasterizers, arraysize(rasterizers)); 416} 417 418TileManager::~TileManager() { 419 // Reset global state and manage. This should cause 420 // our memory usage to drop to zero. 421 global_state_ = GlobalStateThatImpactsTilePriority(); 422 423 CleanUpReleasedTiles(); 424 DCHECK_EQ(0u, tiles_.size()); 425 426 RasterTaskQueue empty[NUM_RASTERIZER_TYPES]; 427 rasterizer_delegate_->ScheduleTasks(empty); 428 orphan_raster_tasks_.clear(); 429 430 // This should finish all pending tasks and release any uninitialized 431 // resources. 432 rasterizer_delegate_->Shutdown(); 433 rasterizer_delegate_->CheckForCompletedTasks(); 434 435 DCHECK_EQ(0u, bytes_releasable_); 436 DCHECK_EQ(0u, resources_releasable_); 437 438 for (std::vector<PictureLayerImpl*>::iterator it = layers_.begin(); 439 it != layers_.end(); 440 ++it) { 441 (*it)->DidUnregisterLayer(); 442 } 443 layers_.clear(); 444} 445 446void TileManager::Release(Tile* tile) { 447 prioritized_tiles_dirty_ = true; 448 released_tiles_.push_back(tile); 449} 450 451void TileManager::DidChangeTilePriority(Tile* tile) { 452 prioritized_tiles_dirty_ = true; 453} 454 455bool TileManager::ShouldForceTasksRequiredForActivationToComplete() const { 456 return global_state_.tree_priority != SMOOTHNESS_TAKES_PRIORITY; 457} 458 459void TileManager::CleanUpReleasedTiles() { 460 for (std::vector<Tile*>::iterator it = released_tiles_.begin(); 461 it != released_tiles_.end(); 462 ++it) { 463 Tile* tile = *it; 464 ManagedTileState& mts = tile->managed_state(); 465 466 for (int mode = 0; mode < NUM_RASTER_MODES; ++mode) { 467 FreeResourceForTile(tile, static_cast<RasterMode>(mode)); 468 orphan_raster_tasks_.push_back(mts.tile_versions[mode].raster_task_); 469 } 470 471 DCHECK(tiles_.find(tile->id()) != tiles_.end()); 472 tiles_.erase(tile->id()); 473 474 LayerCountMap::iterator layer_it = 475 used_layer_counts_.find(tile->layer_id()); 476 DCHECK_GT(layer_it->second, 0); 477 if (--layer_it->second == 0) { 478 used_layer_counts_.erase(layer_it); 479 image_decode_tasks_.erase(tile->layer_id()); 480 } 481 482 delete tile; 483 } 484 485 released_tiles_.clear(); 486} 487 488void TileManager::UpdatePrioritizedTileSetIfNeeded() { 489 if (!prioritized_tiles_dirty_) 490 return; 491 492 CleanUpReleasedTiles(); 493 494 prioritized_tiles_.Clear(); 495 GetTilesWithAssignedBins(&prioritized_tiles_); 496 prioritized_tiles_dirty_ = false; 497} 498 499void TileManager::DidFinishRunningTasks() { 500 TRACE_EVENT0("cc", "TileManager::DidFinishRunningTasks"); 501 502 bool memory_usage_above_limit = resource_pool_->total_memory_usage_bytes() > 503 global_state_.soft_memory_limit_in_bytes; 504 505 // When OOM, keep re-assigning memory until we reach a steady state 506 // where top-priority tiles are initialized. 507 if (all_tiles_that_need_to_be_rasterized_have_memory_ && 508 !memory_usage_above_limit) 509 return; 510 511 rasterizer_delegate_->CheckForCompletedTasks(); 512 did_check_for_completed_tasks_since_last_schedule_tasks_ = true; 513 514 TileVector tiles_that_need_to_be_rasterized; 515 AssignGpuMemoryToTiles(&prioritized_tiles_, 516 &tiles_that_need_to_be_rasterized); 517 518 // |tiles_that_need_to_be_rasterized| will be empty when we reach a 519 // steady memory state. Keep scheduling tasks until we reach this state. 520 if (!tiles_that_need_to_be_rasterized.empty()) { 521 ScheduleTasks(tiles_that_need_to_be_rasterized); 522 return; 523 } 524 525 resource_pool_->ReduceResourceUsage(); 526 527 // We don't reserve memory for required-for-activation tiles during 528 // accelerated gestures, so we just postpone activation when we don't 529 // have these tiles, and activate after the accelerated gesture. 530 bool allow_rasterize_on_demand = 531 global_state_.tree_priority != SMOOTHNESS_TAKES_PRIORITY; 532 533 // Use on-demand raster for any required-for-activation tiles that have not 534 // been been assigned memory after reaching a steady memory state. This 535 // ensures that we activate even when OOM. 536 for (TileMap::iterator it = tiles_.begin(); it != tiles_.end(); ++it) { 537 Tile* tile = it->second; 538 ManagedTileState& mts = tile->managed_state(); 539 ManagedTileState::TileVersion& tile_version = 540 mts.tile_versions[mts.raster_mode]; 541 542 if (tile->required_for_activation() && !tile_version.IsReadyToDraw()) { 543 // If we can't raster on demand, give up early (and don't activate). 544 if (!allow_rasterize_on_demand) 545 return; 546 if (use_rasterize_on_demand_) 547 tile_version.set_rasterize_on_demand(); 548 } 549 } 550 551 client_->NotifyReadyToActivate(); 552} 553 554void TileManager::DidFinishRunningTasksRequiredForActivation() { 555 // This is only a true indication that all tiles required for 556 // activation are initialized when no tiles are OOM. We need to 557 // wait for DidFinishRunningTasks() to be called, try to re-assign 558 // memory and in worst case use on-demand raster when tiles 559 // required for activation are OOM. 560 if (!all_tiles_required_for_activation_have_memory_) 561 return; 562 563 client_->NotifyReadyToActivate(); 564} 565 566void TileManager::GetTilesWithAssignedBins(PrioritizedTileSet* tiles) { 567 TRACE_EVENT0("cc", "TileManager::GetTilesWithAssignedBins"); 568 569 // Compute new stats to be return by GetMemoryStats(). 570 memory_required_bytes_ = 0; 571 memory_nice_to_have_bytes_ = 0; 572 573 const TileMemoryLimitPolicy memory_policy = global_state_.memory_limit_policy; 574 const TreePriority tree_priority = global_state_.tree_priority; 575 576 // For each tree, bin into different categories of tiles. 577 for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) { 578 Tile* tile = it->second; 579 ManagedTileState& mts = tile->managed_state(); 580 581 const ManagedTileState::TileVersion& tile_version = 582 tile->GetTileVersionForDrawing(); 583 bool tile_is_ready_to_draw = tile_version.IsReadyToDraw(); 584 bool tile_is_active = tile_is_ready_to_draw || 585 mts.tile_versions[mts.raster_mode].raster_task_; 586 587 // Get the active priority and bin. 588 TilePriority active_priority = tile->priority(ACTIVE_TREE); 589 ManagedTileBin active_bin = BinFromTilePriority(active_priority); 590 591 // Get the pending priority and bin. 592 TilePriority pending_priority = tile->priority(PENDING_TREE); 593 ManagedTileBin pending_bin = BinFromTilePriority(pending_priority); 594 595 bool pending_is_low_res = pending_priority.resolution == LOW_RESOLUTION; 596 bool pending_is_non_ideal = 597 pending_priority.resolution == NON_IDEAL_RESOLUTION; 598 bool active_is_non_ideal = 599 active_priority.resolution == NON_IDEAL_RESOLUTION; 600 601 // Adjust pending bin state for low res tiles. This prevents 602 // pending tree low-res tiles from being initialized before 603 // high-res tiles. 604 if (pending_is_low_res) 605 pending_bin = std::max(pending_bin, EVENTUALLY_BIN); 606 607 // Adjust bin state based on if ready to draw. 608 active_bin = kBinReadyToDrawMap[tile_is_ready_to_draw][active_bin]; 609 pending_bin = kBinReadyToDrawMap[tile_is_ready_to_draw][pending_bin]; 610 611 // Adjust bin state based on if active. 612 active_bin = kBinIsActiveMap[tile_is_active][active_bin]; 613 pending_bin = kBinIsActiveMap[tile_is_active][pending_bin]; 614 615 // We never want to paint new non-ideal tiles, as we always have 616 // a high-res tile covering that content (paint that instead). 617 if (!tile_is_ready_to_draw && active_is_non_ideal) 618 active_bin = NEVER_BIN; 619 if (!tile_is_ready_to_draw && pending_is_non_ideal) 620 pending_bin = NEVER_BIN; 621 622 // Compute combined bin. 623 ManagedTileBin combined_bin = std::min(active_bin, pending_bin); 624 625 if (!tile_is_ready_to_draw || tile_version.requires_resource()) { 626 // The bin that the tile would have if the GPU memory manager had 627 // a maximally permissive policy, send to the GPU memory manager 628 // to determine policy. 629 ManagedTileBin gpu_memmgr_stats_bin = combined_bin; 630 if ((gpu_memmgr_stats_bin == NOW_BIN) || 631 (gpu_memmgr_stats_bin == NOW_AND_READY_TO_DRAW_BIN)) 632 memory_required_bytes_ += BytesConsumedIfAllocated(tile); 633 if (gpu_memmgr_stats_bin != NEVER_BIN) 634 memory_nice_to_have_bytes_ += BytesConsumedIfAllocated(tile); 635 } 636 637 ManagedTileBin tree_bin[NUM_TREES]; 638 tree_bin[ACTIVE_TREE] = kBinPolicyMap[memory_policy][active_bin]; 639 tree_bin[PENDING_TREE] = kBinPolicyMap[memory_policy][pending_bin]; 640 641 TilePriority tile_priority; 642 switch (tree_priority) { 643 case SAME_PRIORITY_FOR_BOTH_TREES: 644 mts.bin = kBinPolicyMap[memory_policy][combined_bin]; 645 tile_priority = tile->combined_priority(); 646 break; 647 case SMOOTHNESS_TAKES_PRIORITY: 648 mts.bin = tree_bin[ACTIVE_TREE]; 649 tile_priority = active_priority; 650 break; 651 case NEW_CONTENT_TAKES_PRIORITY: 652 mts.bin = tree_bin[PENDING_TREE]; 653 tile_priority = pending_priority; 654 break; 655 } 656 657 // Bump up the priority if we determined it's NEVER_BIN on one tree, 658 // but is still required on the other tree. 659 bool is_in_never_bin_on_both_trees = tree_bin[ACTIVE_TREE] == NEVER_BIN && 660 tree_bin[PENDING_TREE] == NEVER_BIN; 661 662 if (mts.bin == NEVER_BIN && !is_in_never_bin_on_both_trees) 663 mts.bin = tile_is_active ? AT_LAST_AND_ACTIVE_BIN : AT_LAST_BIN; 664 665 mts.resolution = tile_priority.resolution; 666 mts.priority_bin = tile_priority.priority_bin; 667 mts.distance_to_visible = tile_priority.distance_to_visible; 668 mts.required_for_activation = tile_priority.required_for_activation; 669 670 mts.visible_and_ready_to_draw = 671 tree_bin[ACTIVE_TREE] == NOW_AND_READY_TO_DRAW_BIN; 672 673 // If the tile is in NEVER_BIN and it does not have an active task, then we 674 // can release the resources early. If it does have the task however, we 675 // should keep it in the prioritized tile set to ensure that AssignGpuMemory 676 // can visit it. 677 if (mts.bin == NEVER_BIN && 678 !mts.tile_versions[mts.raster_mode].raster_task_ && 679 !tile->required_for_activation()) { 680 FreeResourcesForTile(tile); 681 continue; 682 } 683 684 // Insert the tile into a priority set. 685 tiles->InsertTile(tile, mts.bin); 686 } 687} 688 689void TileManager::ManageTiles(const GlobalStateThatImpactsTilePriority& state) { 690 TRACE_EVENT0("cc", "TileManager::ManageTiles"); 691 692 // Update internal state. 693 if (state != global_state_) { 694 global_state_ = state; 695 prioritized_tiles_dirty_ = true; 696 } 697 698 // We need to call CheckForCompletedTasks() once in-between each call 699 // to ScheduleTasks() to prevent canceled tasks from being scheduled. 700 if (!did_check_for_completed_tasks_since_last_schedule_tasks_) { 701 rasterizer_delegate_->CheckForCompletedTasks(); 702 did_check_for_completed_tasks_since_last_schedule_tasks_ = true; 703 } 704 705 UpdatePrioritizedTileSetIfNeeded(); 706 707 TileVector tiles_that_need_to_be_rasterized; 708 AssignGpuMemoryToTiles(&prioritized_tiles_, 709 &tiles_that_need_to_be_rasterized); 710 711 // Finally, schedule rasterizer tasks. 712 ScheduleTasks(tiles_that_need_to_be_rasterized); 713 714 TRACE_EVENT_INSTANT1("cc", 715 "DidManage", 716 TRACE_EVENT_SCOPE_THREAD, 717 "state", 718 TracedValue::FromValue(BasicStateAsValue().release())); 719 720 TRACE_COUNTER_ID1("cc", 721 "unused_memory_bytes", 722 this, 723 resource_pool_->total_memory_usage_bytes() - 724 resource_pool_->acquired_memory_usage_bytes()); 725} 726 727bool TileManager::UpdateVisibleTiles() { 728 TRACE_EVENT0("cc", "TileManager::UpdateVisibleTiles"); 729 730 rasterizer_delegate_->CheckForCompletedTasks(); 731 did_check_for_completed_tasks_since_last_schedule_tasks_ = true; 732 733 TRACE_EVENT_INSTANT1( 734 "cc", 735 "DidUpdateVisibleTiles", 736 TRACE_EVENT_SCOPE_THREAD, 737 "stats", 738 TracedValue::FromValue(RasterTaskCompletionStatsAsValue( 739 update_visible_tiles_stats_).release())); 740 update_visible_tiles_stats_ = RasterTaskCompletionStats(); 741 742 bool did_initialize_visible_tile = did_initialize_visible_tile_; 743 did_initialize_visible_tile_ = false; 744 return did_initialize_visible_tile; 745} 746 747void TileManager::GetMemoryStats(size_t* memory_required_bytes, 748 size_t* memory_nice_to_have_bytes, 749 size_t* memory_allocated_bytes, 750 size_t* memory_used_bytes) const { 751 *memory_required_bytes = memory_required_bytes_; 752 *memory_nice_to_have_bytes = memory_nice_to_have_bytes_; 753 *memory_allocated_bytes = resource_pool_->total_memory_usage_bytes(); 754 *memory_used_bytes = resource_pool_->acquired_memory_usage_bytes(); 755} 756 757scoped_ptr<base::Value> TileManager::BasicStateAsValue() const { 758 scoped_ptr<base::DictionaryValue> state(new base::DictionaryValue()); 759 state->SetInteger("tile_count", tiles_.size()); 760 state->Set("global_state", global_state_.AsValue().release()); 761 state->Set("memory_requirements", GetMemoryRequirementsAsValue().release()); 762 return state.PassAs<base::Value>(); 763} 764 765scoped_ptr<base::Value> TileManager::AllTilesAsValue() const { 766 scoped_ptr<base::ListValue> state(new base::ListValue()); 767 for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) 768 state->Append(it->second->AsValue().release()); 769 770 return state.PassAs<base::Value>(); 771} 772 773scoped_ptr<base::Value> TileManager::GetMemoryRequirementsAsValue() const { 774 scoped_ptr<base::DictionaryValue> requirements(new base::DictionaryValue()); 775 776 size_t memory_required_bytes; 777 size_t memory_nice_to_have_bytes; 778 size_t memory_allocated_bytes; 779 size_t memory_used_bytes; 780 GetMemoryStats(&memory_required_bytes, 781 &memory_nice_to_have_bytes, 782 &memory_allocated_bytes, 783 &memory_used_bytes); 784 requirements->SetInteger("memory_required_bytes", memory_required_bytes); 785 requirements->SetInteger("memory_nice_to_have_bytes", 786 memory_nice_to_have_bytes); 787 requirements->SetInteger("memory_allocated_bytes", memory_allocated_bytes); 788 requirements->SetInteger("memory_used_bytes", memory_used_bytes); 789 return requirements.PassAs<base::Value>(); 790} 791 792void TileManager::AssignGpuMemoryToTiles( 793 PrioritizedTileSet* tiles, 794 TileVector* tiles_that_need_to_be_rasterized) { 795 TRACE_EVENT0("cc", "TileManager::AssignGpuMemoryToTiles"); 796 797 // Maintain the list of released resources that can potentially be re-used 798 // or deleted. 799 // If this operation becomes expensive too, only do this after some 800 // resource(s) was returned. Note that in that case, one also need to 801 // invalidate when releasing some resource from the pool. 802 resource_pool_->CheckBusyResources(); 803 804 // Now give memory out to the tiles until we're out, and build 805 // the needs-to-be-rasterized queue. 806 all_tiles_that_need_to_be_rasterized_have_memory_ = true; 807 all_tiles_required_for_activation_have_memory_ = true; 808 809 // Cast to prevent overflow. 810 int64 soft_bytes_available = 811 static_cast<int64>(bytes_releasable_) + 812 static_cast<int64>(global_state_.soft_memory_limit_in_bytes) - 813 static_cast<int64>(resource_pool_->acquired_memory_usage_bytes()); 814 int64 hard_bytes_available = 815 static_cast<int64>(bytes_releasable_) + 816 static_cast<int64>(global_state_.hard_memory_limit_in_bytes) - 817 static_cast<int64>(resource_pool_->acquired_memory_usage_bytes()); 818 int resources_available = resources_releasable_ + 819 global_state_.num_resources_limit - 820 resource_pool_->acquired_resource_count(); 821 size_t soft_bytes_allocatable = 822 std::max(static_cast<int64>(0), soft_bytes_available); 823 size_t hard_bytes_allocatable = 824 std::max(static_cast<int64>(0), hard_bytes_available); 825 size_t resources_allocatable = std::max(0, resources_available); 826 827 size_t bytes_that_exceeded_memory_budget = 0; 828 size_t soft_bytes_left = soft_bytes_allocatable; 829 size_t hard_bytes_left = hard_bytes_allocatable; 830 831 size_t resources_left = resources_allocatable; 832 bool oomed_soft = false; 833 bool oomed_hard = false; 834 bool have_hit_soft_memory = false; // Soft memory comes after hard. 835 836 // Memory we assign to raster tasks now will be deducted from our memory 837 // in future iterations if priorities change. By assigning at most half 838 // the raster limit, we will always have another 50% left even if priorities 839 // change completely (assuming we check for completed/cancelled rasters 840 // between each call to this function). 841 size_t max_raster_bytes = max_raster_usage_bytes_ / 2; 842 size_t raster_bytes = 0; 843 844 int processed_required_for_activation_tile_count = 0; 845 unsigned schedule_priority = 1u; 846 for (PrioritizedTileSet::Iterator it(tiles, true); it; ++it) { 847 Tile* tile = *it; 848 ManagedTileState& mts = tile->managed_state(); 849 850 mts.scheduled_priority = schedule_priority++; 851 852 mts.raster_mode = tile->DetermineOverallRasterMode(); 853 854 ManagedTileState::TileVersion& tile_version = 855 mts.tile_versions[mts.raster_mode]; 856 857 // If this tile doesn't need a resource, then nothing to do. 858 if (!tile_version.requires_resource()) 859 continue; 860 861 // If the tile is not needed, free it up. 862 if (mts.bin == NEVER_BIN) { 863 FreeResourcesForTile(tile); 864 if (tile->required_for_activation()) 865 ++processed_required_for_activation_tile_count; 866 continue; 867 } 868 869 const bool tile_uses_hard_limit = mts.bin <= NOW_BIN; 870 const size_t bytes_if_allocated = BytesConsumedIfAllocated(tile); 871 const size_t raster_bytes_if_rastered = raster_bytes + bytes_if_allocated; 872 const size_t tile_bytes_left = 873 (tile_uses_hard_limit) ? hard_bytes_left : soft_bytes_left; 874 875 // Hard-limit is reserved for tiles that would cause a calamity 876 // if they were to go away, so by definition they are the highest 877 // priority memory, and must be at the front of the list. 878 DCHECK(!(have_hit_soft_memory && tile_uses_hard_limit)); 879 have_hit_soft_memory |= !tile_uses_hard_limit; 880 881 size_t tile_bytes = 0; 882 size_t tile_resources = 0; 883 884 // It costs to maintain a resource. 885 for (int mode = 0; mode < NUM_RASTER_MODES; ++mode) { 886 if (mts.tile_versions[mode].resource_) { 887 tile_bytes += bytes_if_allocated; 888 tile_resources++; 889 } 890 } 891 892 // Allow lower priority tiles with initialized resources to keep 893 // their memory by only assigning memory to new raster tasks if 894 // they can be scheduled. 895 if (raster_bytes_if_rastered <= max_raster_bytes) { 896 // If we don't have the required version, and it's not in flight 897 // then we'll have to pay to create a new task. 898 if (!tile_version.resource_ && !tile_version.raster_task_) { 899 tile_bytes += bytes_if_allocated; 900 tile_resources++; 901 } 902 } 903 904 // Tile is OOM. 905 if (tile_bytes > tile_bytes_left || tile_resources > resources_left) { 906 FreeResourcesForTile(tile); 907 908 // This tile was already on screen and now its resources have been 909 // released. In order to prevent checkerboarding, set this tile as 910 // rasterize on demand immediately. 911 if (mts.visible_and_ready_to_draw && use_rasterize_on_demand_) 912 tile_version.set_rasterize_on_demand(); 913 914 oomed_soft = true; 915 if (tile_uses_hard_limit) { 916 oomed_hard = true; 917 bytes_that_exceeded_memory_budget += tile_bytes; 918 } 919 } else { 920 resources_left -= tile_resources; 921 hard_bytes_left -= tile_bytes; 922 soft_bytes_left = 923 (soft_bytes_left > tile_bytes) ? soft_bytes_left - tile_bytes : 0; 924 if (tile_version.resource_) { 925 DCHECK(tile->IsReadyToDraw()); 926 continue; 927 } 928 } 929 930 DCHECK(!tile_version.resource_); 931 932 // Tile shouldn't be rasterized if |tiles_that_need_to_be_rasterized| 933 // has reached it's limit or we've failed to assign gpu memory to this 934 // or any higher priority tile. Preventing tiles that fit into memory 935 // budget to be rasterized when higher priority tile is oom is 936 // important for two reasons: 937 // 1. Tile size should not impact raster priority. 938 // 2. Tiles with existing raster task could otherwise incorrectly 939 // be added as they are not affected by |bytes_allocatable|. 940 bool can_schedule_tile = 941 !oomed_soft && raster_bytes_if_rastered <= max_raster_bytes && 942 tiles_that_need_to_be_rasterized->size() < kScheduledRasterTasksLimit; 943 944 if (!can_schedule_tile) { 945 all_tiles_that_need_to_be_rasterized_have_memory_ = false; 946 it.DisablePriorityOrdering(); 947 continue; 948 } 949 950 raster_bytes = raster_bytes_if_rastered; 951 tiles_that_need_to_be_rasterized->push_back(tile); 952 if (tile->required_for_activation()) 953 ++processed_required_for_activation_tile_count; 954 } 955 956 int total_required_for_activation_tile_count = 0; 957 for (std::vector<PictureLayerImpl*>::const_iterator it = layers_.begin(); 958 it != layers_.end(); 959 ++it) { 960 if ((*it)->GetTree() == PENDING_TREE) { 961 total_required_for_activation_tile_count += 962 (*it)->UninitializedTilesRequiredForActivationCount(); 963 } 964 } 965 966 all_tiles_required_for_activation_have_memory_ = 967 processed_required_for_activation_tile_count == 968 total_required_for_activation_tile_count; 969 970 // OOM reporting uses hard-limit, soft-OOM is normal depending on limit. 971 ever_exceeded_memory_budget_ |= oomed_hard; 972 if (ever_exceeded_memory_budget_) { 973 TRACE_COUNTER_ID2("cc", 974 "over_memory_budget", 975 this, 976 "budget", 977 global_state_.hard_memory_limit_in_bytes, 978 "over", 979 bytes_that_exceeded_memory_budget); 980 } 981 memory_stats_from_last_assign_.total_budget_in_bytes = 982 global_state_.hard_memory_limit_in_bytes; 983 memory_stats_from_last_assign_.bytes_allocated = 984 hard_bytes_allocatable - hard_bytes_left; 985 memory_stats_from_last_assign_.bytes_unreleasable = 986 hard_bytes_allocatable - bytes_releasable_; 987 memory_stats_from_last_assign_.bytes_over = bytes_that_exceeded_memory_budget; 988} 989 990void TileManager::FreeResourceForTile(Tile* tile, RasterMode mode) { 991 ManagedTileState& mts = tile->managed_state(); 992 if (mts.tile_versions[mode].resource_) { 993 resource_pool_->ReleaseResource(mts.tile_versions[mode].resource_.Pass()); 994 995 DCHECK_GE(bytes_releasable_, BytesConsumedIfAllocated(tile)); 996 DCHECK_GE(resources_releasable_, 1u); 997 998 bytes_releasable_ -= BytesConsumedIfAllocated(tile); 999 --resources_releasable_; 1000 } 1001} 1002 1003void TileManager::FreeResourcesForTile(Tile* tile) { 1004 for (int mode = 0; mode < NUM_RASTER_MODES; ++mode) { 1005 FreeResourceForTile(tile, static_cast<RasterMode>(mode)); 1006 } 1007} 1008 1009void TileManager::FreeUnusedResourcesForTile(Tile* tile) { 1010 DCHECK(tile->IsReadyToDraw()); 1011 ManagedTileState& mts = tile->managed_state(); 1012 RasterMode used_mode = HIGH_QUALITY_NO_LCD_RASTER_MODE; 1013 for (int mode = 0; mode < NUM_RASTER_MODES; ++mode) { 1014 if (mts.tile_versions[mode].IsReadyToDraw()) { 1015 used_mode = static_cast<RasterMode>(mode); 1016 break; 1017 } 1018 } 1019 1020 for (int mode = 0; mode < NUM_RASTER_MODES; ++mode) { 1021 if (mode != used_mode) 1022 FreeResourceForTile(tile, static_cast<RasterMode>(mode)); 1023 } 1024} 1025 1026void TileManager::ScheduleTasks( 1027 const TileVector& tiles_that_need_to_be_rasterized) { 1028 TRACE_EVENT1("cc", 1029 "TileManager::ScheduleTasks", 1030 "count", 1031 tiles_that_need_to_be_rasterized.size()); 1032 1033 DCHECK(did_check_for_completed_tasks_since_last_schedule_tasks_); 1034 1035 for (size_t i = 0; i < NUM_RASTERIZER_TYPES; ++i) 1036 raster_queue_[i].Reset(); 1037 1038 // Build a new task queue containing all task currently needed. Tasks 1039 // are added in order of priority, highest priority task first. 1040 for (TileVector::const_iterator it = tiles_that_need_to_be_rasterized.begin(); 1041 it != tiles_that_need_to_be_rasterized.end(); 1042 ++it) { 1043 Tile* tile = *it; 1044 ManagedTileState& mts = tile->managed_state(); 1045 ManagedTileState::TileVersion& tile_version = 1046 mts.tile_versions[mts.raster_mode]; 1047 1048 DCHECK(tile_version.requires_resource()); 1049 DCHECK(!tile_version.resource_); 1050 1051 if (!tile_version.raster_task_) 1052 tile_version.raster_task_ = CreateRasterTask(tile); 1053 1054 size_t pool_type = tile->use_gpu_rasterization() ? RASTERIZER_TYPE_GPU 1055 : RASTERIZER_TYPE_DEFAULT; 1056 1057 raster_queue_[pool_type].items.push_back(RasterTaskQueue::Item( 1058 tile_version.raster_task_.get(), tile->required_for_activation())); 1059 raster_queue_[pool_type].required_for_activation_count += 1060 tile->required_for_activation(); 1061 } 1062 1063 // We must reduce the amount of unused resoruces before calling 1064 // ScheduleTasks to prevent usage from rising above limits. 1065 resource_pool_->ReduceResourceUsage(); 1066 1067 // Schedule running of |raster_tasks_|. This replaces any previously 1068 // scheduled tasks and effectively cancels all tasks not present 1069 // in |raster_tasks_|. 1070 rasterizer_delegate_->ScheduleTasks(raster_queue_); 1071 1072 // It's now safe to clean up orphan tasks as raster worker pool is not 1073 // allowed to keep around unreferenced raster tasks after ScheduleTasks() has 1074 // been called. 1075 orphan_raster_tasks_.clear(); 1076 1077 did_check_for_completed_tasks_since_last_schedule_tasks_ = false; 1078} 1079 1080scoped_refptr<ImageDecodeTask> TileManager::CreateImageDecodeTask( 1081 Tile* tile, 1082 SkPixelRef* pixel_ref) { 1083 return make_scoped_refptr(new ImageDecodeTaskImpl( 1084 pixel_ref, 1085 tile->layer_id(), 1086 rendering_stats_instrumentation_, 1087 base::Bind(&TileManager::OnImageDecodeTaskCompleted, 1088 base::Unretained(this), 1089 tile->layer_id(), 1090 base::Unretained(pixel_ref)))); 1091} 1092 1093scoped_refptr<RasterTask> TileManager::CreateRasterTask(Tile* tile) { 1094 ManagedTileState& mts = tile->managed_state(); 1095 1096 scoped_ptr<ScopedResource> resource = 1097 resource_pool_->AcquireResource(tile->tile_size_.size()); 1098 const ScopedResource* const_resource = resource.get(); 1099 1100 // Create and queue all image decode tasks that this tile depends on. 1101 ImageDecodeTask::Vector decode_tasks; 1102 PixelRefTaskMap& existing_pixel_refs = image_decode_tasks_[tile->layer_id()]; 1103 for (PicturePileImpl::PixelRefIterator iter( 1104 tile->content_rect(), tile->contents_scale(), tile->picture_pile()); 1105 iter; 1106 ++iter) { 1107 SkPixelRef* pixel_ref = *iter; 1108 uint32_t id = pixel_ref->getGenerationID(); 1109 1110 // Append existing image decode task if available. 1111 PixelRefTaskMap::iterator decode_task_it = existing_pixel_refs.find(id); 1112 if (decode_task_it != existing_pixel_refs.end()) { 1113 decode_tasks.push_back(decode_task_it->second); 1114 continue; 1115 } 1116 1117 // Create and append new image decode task for this pixel ref. 1118 scoped_refptr<ImageDecodeTask> decode_task = 1119 CreateImageDecodeTask(tile, pixel_ref); 1120 decode_tasks.push_back(decode_task); 1121 existing_pixel_refs[id] = decode_task; 1122 } 1123 1124 // We analyze picture before rasterization to detect solid-color tiles. 1125 // If the tile is detected as such there is no need to raster or upload. 1126 // It is drawn directly as a solid-color quad saving raster and upload cost. 1127 // The analysis step is however expensive and is not justified when doing 1128 // gpu rasterization where there is no upload. 1129 // 1130 // Additionally, we do not want to do the analysis if the layer that produced 1131 // this tile is narrow, since more likely than not the tile would not be 1132 // solid. We use the picture pile size as a proxy for layer size, since it 1133 // represents the recorded (and thus rasterizable) content. 1134 // Note that this last optimization is a heuristic that ensures that we don't 1135 // spend too much time analyzing tiles on a multitude of small layers, as it 1136 // is likely that these layers have some non-solid content. 1137 gfx::Size pile_size = tile->picture_pile()->tiling_rect().size(); 1138 bool analyze_picture = !tile->use_gpu_rasterization() && 1139 std::min(pile_size.width(), pile_size.height()) >= 1140 kMinDimensionsForAnalysis; 1141 1142 return make_scoped_refptr( 1143 new RasterTaskImpl(const_resource, 1144 tile->picture_pile(), 1145 tile->content_rect(), 1146 tile->contents_scale(), 1147 mts.raster_mode, 1148 mts.resolution, 1149 tile->layer_id(), 1150 static_cast<const void*>(tile), 1151 tile->source_frame_number(), 1152 analyze_picture, 1153 rendering_stats_instrumentation_, 1154 base::Bind(&TileManager::OnRasterTaskCompleted, 1155 base::Unretained(this), 1156 tile->id(), 1157 base::Passed(&resource), 1158 mts.raster_mode), 1159 &decode_tasks)); 1160} 1161 1162void TileManager::OnImageDecodeTaskCompleted(int layer_id, 1163 SkPixelRef* pixel_ref, 1164 bool was_canceled) { 1165 // If the task was canceled, we need to clean it up 1166 // from |image_decode_tasks_|. 1167 if (!was_canceled) 1168 return; 1169 1170 LayerPixelRefTaskMap::iterator layer_it = image_decode_tasks_.find(layer_id); 1171 if (layer_it == image_decode_tasks_.end()) 1172 return; 1173 1174 PixelRefTaskMap& pixel_ref_tasks = layer_it->second; 1175 PixelRefTaskMap::iterator task_it = 1176 pixel_ref_tasks.find(pixel_ref->getGenerationID()); 1177 1178 if (task_it != pixel_ref_tasks.end()) 1179 pixel_ref_tasks.erase(task_it); 1180} 1181 1182void TileManager::OnRasterTaskCompleted( 1183 Tile::Id tile_id, 1184 scoped_ptr<ScopedResource> resource, 1185 RasterMode raster_mode, 1186 const PicturePileImpl::Analysis& analysis, 1187 bool was_canceled) { 1188 TileMap::iterator it = tiles_.find(tile_id); 1189 if (it == tiles_.end()) { 1190 ++update_visible_tiles_stats_.canceled_count; 1191 resource_pool_->ReleaseResource(resource.Pass()); 1192 return; 1193 } 1194 1195 Tile* tile = it->second; 1196 ManagedTileState& mts = tile->managed_state(); 1197 ManagedTileState::TileVersion& tile_version = mts.tile_versions[raster_mode]; 1198 DCHECK(tile_version.raster_task_); 1199 orphan_raster_tasks_.push_back(tile_version.raster_task_); 1200 tile_version.raster_task_ = NULL; 1201 1202 if (was_canceled) { 1203 ++update_visible_tiles_stats_.canceled_count; 1204 resource_pool_->ReleaseResource(resource.Pass()); 1205 return; 1206 } 1207 1208 ++update_visible_tiles_stats_.completed_count; 1209 1210 tile_version.set_has_text(analysis.has_text); 1211 if (analysis.is_solid_color) { 1212 tile_version.set_solid_color(analysis.solid_color); 1213 resource_pool_->ReleaseResource(resource.Pass()); 1214 } else { 1215 tile_version.set_use_resource(); 1216 tile_version.resource_ = resource.Pass(); 1217 1218 bytes_releasable_ += BytesConsumedIfAllocated(tile); 1219 ++resources_releasable_; 1220 } 1221 1222 client_->NotifyTileInitialized(tile); 1223 1224 FreeUnusedResourcesForTile(tile); 1225 if (tile->priority(ACTIVE_TREE).distance_to_visible == 0.f) 1226 did_initialize_visible_tile_ = true; 1227} 1228 1229scoped_refptr<Tile> TileManager::CreateTile(PicturePileImpl* picture_pile, 1230 const gfx::Size& tile_size, 1231 const gfx::Rect& content_rect, 1232 const gfx::Rect& opaque_rect, 1233 float contents_scale, 1234 int layer_id, 1235 int source_frame_number, 1236 int flags) { 1237 scoped_refptr<Tile> tile = make_scoped_refptr(new Tile(this, 1238 picture_pile, 1239 tile_size, 1240 content_rect, 1241 opaque_rect, 1242 contents_scale, 1243 layer_id, 1244 source_frame_number, 1245 flags)); 1246 DCHECK(tiles_.find(tile->id()) == tiles_.end()); 1247 1248 tiles_[tile->id()] = tile; 1249 used_layer_counts_[tile->layer_id()]++; 1250 prioritized_tiles_dirty_ = true; 1251 return tile; 1252} 1253 1254void TileManager::RegisterPictureLayerImpl(PictureLayerImpl* layer) { 1255 DCHECK(std::find(layers_.begin(), layers_.end(), layer) == layers_.end()); 1256 layers_.push_back(layer); 1257} 1258 1259void TileManager::UnregisterPictureLayerImpl(PictureLayerImpl* layer) { 1260 std::vector<PictureLayerImpl*>::iterator it = 1261 std::find(layers_.begin(), layers_.end(), layer); 1262 DCHECK(it != layers_.end()); 1263 layers_.erase(it); 1264} 1265 1266void TileManager::GetPairedPictureLayers( 1267 std::vector<PairedPictureLayer>* paired_layers) const { 1268 paired_layers->clear(); 1269 // Reserve a maximum possible paired layers. 1270 paired_layers->reserve(layers_.size()); 1271 1272 for (std::vector<PictureLayerImpl*>::const_iterator it = layers_.begin(); 1273 it != layers_.end(); 1274 ++it) { 1275 PictureLayerImpl* layer = *it; 1276 1277 // This is a recycle tree layer, we can safely skip since the tiles on this 1278 // layer have to be accessible via the active tree. 1279 if (!layer->IsOnActiveOrPendingTree()) 1280 continue; 1281 1282 PictureLayerImpl* twin_layer = layer->GetTwinLayer(); 1283 1284 // If the twin layer is recycled, it is not a valid twin. 1285 if (twin_layer && !twin_layer->IsOnActiveOrPendingTree()) 1286 twin_layer = NULL; 1287 1288 PairedPictureLayer paired_layer; 1289 WhichTree tree = layer->GetTree(); 1290 1291 // If the current tree is ACTIVE_TREE, then always generate a paired_layer. 1292 // If current tree is PENDING_TREE, then only generate a paired_layer if 1293 // there is no twin layer. 1294 if (tree == ACTIVE_TREE) { 1295 DCHECK(!twin_layer || twin_layer->GetTree() == PENDING_TREE); 1296 paired_layer.active_layer = layer; 1297 paired_layer.pending_layer = twin_layer; 1298 paired_layers->push_back(paired_layer); 1299 } else if (!twin_layer) { 1300 paired_layer.active_layer = NULL; 1301 paired_layer.pending_layer = layer; 1302 paired_layers->push_back(paired_layer); 1303 } 1304 } 1305} 1306 1307TileManager::PairedPictureLayer::PairedPictureLayer() 1308 : active_layer(NULL), pending_layer(NULL) {} 1309 1310TileManager::PairedPictureLayer::~PairedPictureLayer() {} 1311 1312TileManager::RasterTileIterator::RasterTileIterator(TileManager* tile_manager, 1313 TreePriority tree_priority) 1314 : tree_priority_(tree_priority), comparator_(tree_priority) { 1315 std::vector<TileManager::PairedPictureLayer> paired_layers; 1316 tile_manager->GetPairedPictureLayers(&paired_layers); 1317 bool prioritize_low_res = tree_priority_ == SMOOTHNESS_TAKES_PRIORITY; 1318 1319 paired_iterators_.reserve(paired_layers.size()); 1320 iterator_heap_.reserve(paired_layers.size()); 1321 for (std::vector<TileManager::PairedPictureLayer>::iterator it = 1322 paired_layers.begin(); 1323 it != paired_layers.end(); 1324 ++it) { 1325 PairedPictureLayerIterator paired_iterator; 1326 if (it->active_layer) { 1327 paired_iterator.active_iterator = 1328 PictureLayerImpl::LayerRasterTileIterator(it->active_layer, 1329 prioritize_low_res); 1330 } 1331 1332 if (it->pending_layer) { 1333 paired_iterator.pending_iterator = 1334 PictureLayerImpl::LayerRasterTileIterator(it->pending_layer, 1335 prioritize_low_res); 1336 } 1337 1338 if (paired_iterator.PeekTile(tree_priority_) != NULL) { 1339 paired_iterators_.push_back(paired_iterator); 1340 iterator_heap_.push_back(&paired_iterators_.back()); 1341 } 1342 } 1343 1344 std::make_heap(iterator_heap_.begin(), iterator_heap_.end(), comparator_); 1345} 1346 1347TileManager::RasterTileIterator::~RasterTileIterator() {} 1348 1349TileManager::RasterTileIterator& TileManager::RasterTileIterator::operator++() { 1350 DCHECK(*this); 1351 1352 std::pop_heap(iterator_heap_.begin(), iterator_heap_.end(), comparator_); 1353 PairedPictureLayerIterator* paired_iterator = iterator_heap_.back(); 1354 iterator_heap_.pop_back(); 1355 1356 paired_iterator->PopTile(tree_priority_); 1357 if (paired_iterator->PeekTile(tree_priority_) != NULL) { 1358 iterator_heap_.push_back(paired_iterator); 1359 std::push_heap(iterator_heap_.begin(), iterator_heap_.end(), comparator_); 1360 } 1361 return *this; 1362} 1363 1364TileManager::RasterTileIterator::operator bool() const { 1365 return !iterator_heap_.empty(); 1366} 1367 1368Tile* TileManager::RasterTileIterator::operator*() { 1369 DCHECK(*this); 1370 return iterator_heap_.front()->PeekTile(tree_priority_); 1371} 1372 1373TileManager::RasterTileIterator::PairedPictureLayerIterator:: 1374 PairedPictureLayerIterator() {} 1375 1376TileManager::RasterTileIterator::PairedPictureLayerIterator:: 1377 ~PairedPictureLayerIterator() {} 1378 1379Tile* TileManager::RasterTileIterator::PairedPictureLayerIterator::PeekTile( 1380 TreePriority tree_priority) { 1381 PictureLayerImpl::LayerRasterTileIterator* next_iterator = 1382 NextTileIterator(tree_priority).first; 1383 if (!next_iterator) 1384 return NULL; 1385 1386 DCHECK(*next_iterator); 1387 DCHECK(std::find(returned_shared_tiles.begin(), 1388 returned_shared_tiles.end(), 1389 **next_iterator) == returned_shared_tiles.end()); 1390 return **next_iterator; 1391} 1392 1393void TileManager::RasterTileIterator::PairedPictureLayerIterator::PopTile( 1394 TreePriority tree_priority) { 1395 PictureLayerImpl::LayerRasterTileIterator* next_iterator = 1396 NextTileIterator(tree_priority).first; 1397 DCHECK(next_iterator); 1398 DCHECK(*next_iterator); 1399 returned_shared_tiles.push_back(**next_iterator); 1400 ++(*next_iterator); 1401 1402 next_iterator = NextTileIterator(tree_priority).first; 1403 while (next_iterator && 1404 std::find(returned_shared_tiles.begin(), 1405 returned_shared_tiles.end(), 1406 **next_iterator) != returned_shared_tiles.end()) { 1407 ++(*next_iterator); 1408 next_iterator = NextTileIterator(tree_priority).first; 1409 } 1410} 1411 1412std::pair<PictureLayerImpl::LayerRasterTileIterator*, WhichTree> 1413TileManager::RasterTileIterator::PairedPictureLayerIterator::NextTileIterator( 1414 TreePriority tree_priority) { 1415 // If both iterators are out of tiles, return NULL. 1416 if (!active_iterator && !pending_iterator) { 1417 return std::pair<PictureLayerImpl::LayerRasterTileIterator*, WhichTree>( 1418 NULL, ACTIVE_TREE); 1419 } 1420 1421 // If we only have one iterator with tiles, return it. 1422 if (!active_iterator) 1423 return std::make_pair(&pending_iterator, PENDING_TREE); 1424 if (!pending_iterator) 1425 return std::make_pair(&active_iterator, ACTIVE_TREE); 1426 1427 // Now both iterators have tiles, so we have to decide based on tree priority. 1428 switch (tree_priority) { 1429 case SMOOTHNESS_TAKES_PRIORITY: 1430 return std::make_pair(&active_iterator, ACTIVE_TREE); 1431 case NEW_CONTENT_TAKES_PRIORITY: 1432 return std::make_pair(&pending_iterator, ACTIVE_TREE); 1433 case SAME_PRIORITY_FOR_BOTH_TREES: { 1434 Tile* active_tile = *active_iterator; 1435 Tile* pending_tile = *pending_iterator; 1436 if (active_tile == pending_tile) 1437 return std::make_pair(&active_iterator, ACTIVE_TREE); 1438 1439 const TilePriority& active_priority = active_tile->priority(ACTIVE_TREE); 1440 const TilePriority& pending_priority = 1441 pending_tile->priority(PENDING_TREE); 1442 1443 if (active_priority.IsHigherPriorityThan(pending_priority)) 1444 return std::make_pair(&active_iterator, ACTIVE_TREE); 1445 return std::make_pair(&pending_iterator, PENDING_TREE); 1446 } 1447 } 1448 1449 NOTREACHED(); 1450 // Keep the compiler happy. 1451 return std::pair<PictureLayerImpl::LayerRasterTileIterator*, WhichTree>( 1452 NULL, ACTIVE_TREE); 1453} 1454 1455TileManager::RasterTileIterator::RasterOrderComparator::RasterOrderComparator( 1456 TreePriority tree_priority) 1457 : tree_priority_(tree_priority) {} 1458 1459bool TileManager::RasterTileIterator::RasterOrderComparator::operator()( 1460 PairedPictureLayerIterator* a, 1461 PairedPictureLayerIterator* b) const { 1462 std::pair<PictureLayerImpl::LayerRasterTileIterator*, WhichTree> a_pair = 1463 a->NextTileIterator(tree_priority_); 1464 DCHECK(a_pair.first); 1465 DCHECK(*a_pair.first); 1466 1467 std::pair<PictureLayerImpl::LayerRasterTileIterator*, WhichTree> b_pair = 1468 b->NextTileIterator(tree_priority_); 1469 DCHECK(b_pair.first); 1470 DCHECK(*b_pair.first); 1471 1472 Tile* a_tile = **a_pair.first; 1473 Tile* b_tile = **b_pair.first; 1474 1475 const TilePriority& a_priority = 1476 a_tile->priority_for_tree_priority(tree_priority_); 1477 const TilePriority& b_priority = 1478 b_tile->priority_for_tree_priority(tree_priority_); 1479 bool prioritize_low_res = tree_priority_ == SMOOTHNESS_TAKES_PRIORITY; 1480 1481 if (b_priority.resolution != a_priority.resolution) { 1482 return (prioritize_low_res && b_priority.resolution == LOW_RESOLUTION) || 1483 (!prioritize_low_res && b_priority.resolution == HIGH_RESOLUTION) || 1484 (a_priority.resolution == NON_IDEAL_RESOLUTION); 1485 } 1486 1487 return b_priority.IsHigherPriorityThan(a_priority); 1488} 1489 1490TileManager::EvictionTileIterator::EvictionTileIterator() 1491 : comparator_(SAME_PRIORITY_FOR_BOTH_TREES) {} 1492 1493TileManager::EvictionTileIterator::EvictionTileIterator( 1494 TileManager* tile_manager, 1495 TreePriority tree_priority) 1496 : tree_priority_(tree_priority), comparator_(tree_priority) { 1497 std::vector<TileManager::PairedPictureLayer> paired_layers; 1498 1499 tile_manager->GetPairedPictureLayers(&paired_layers); 1500 1501 paired_iterators_.reserve(paired_layers.size()); 1502 iterator_heap_.reserve(paired_layers.size()); 1503 for (std::vector<TileManager::PairedPictureLayer>::iterator it = 1504 paired_layers.begin(); 1505 it != paired_layers.end(); 1506 ++it) { 1507 PairedPictureLayerIterator paired_iterator; 1508 if (it->active_layer) { 1509 paired_iterator.active_iterator = 1510 PictureLayerImpl::LayerEvictionTileIterator(it->active_layer, 1511 tree_priority_); 1512 } 1513 1514 if (it->pending_layer) { 1515 paired_iterator.pending_iterator = 1516 PictureLayerImpl::LayerEvictionTileIterator(it->pending_layer, 1517 tree_priority_); 1518 } 1519 1520 if (paired_iterator.PeekTile(tree_priority_) != NULL) { 1521 paired_iterators_.push_back(paired_iterator); 1522 iterator_heap_.push_back(&paired_iterators_.back()); 1523 } 1524 } 1525 1526 std::make_heap(iterator_heap_.begin(), iterator_heap_.end(), comparator_); 1527} 1528 1529TileManager::EvictionTileIterator::~EvictionTileIterator() {} 1530 1531TileManager::EvictionTileIterator& TileManager::EvictionTileIterator:: 1532operator++() { 1533 std::pop_heap(iterator_heap_.begin(), iterator_heap_.end(), comparator_); 1534 PairedPictureLayerIterator* paired_iterator = iterator_heap_.back(); 1535 iterator_heap_.pop_back(); 1536 1537 paired_iterator->PopTile(tree_priority_); 1538 if (paired_iterator->PeekTile(tree_priority_) != NULL) { 1539 iterator_heap_.push_back(paired_iterator); 1540 std::push_heap(iterator_heap_.begin(), iterator_heap_.end(), comparator_); 1541 } 1542 return *this; 1543} 1544 1545TileManager::EvictionTileIterator::operator bool() const { 1546 return !iterator_heap_.empty(); 1547} 1548 1549Tile* TileManager::EvictionTileIterator::operator*() { 1550 DCHECK(*this); 1551 return iterator_heap_.front()->PeekTile(tree_priority_); 1552} 1553 1554TileManager::EvictionTileIterator::PairedPictureLayerIterator:: 1555 PairedPictureLayerIterator() {} 1556 1557TileManager::EvictionTileIterator::PairedPictureLayerIterator:: 1558 ~PairedPictureLayerIterator() {} 1559 1560Tile* TileManager::EvictionTileIterator::PairedPictureLayerIterator::PeekTile( 1561 TreePriority tree_priority) { 1562 PictureLayerImpl::LayerEvictionTileIterator* next_iterator = 1563 NextTileIterator(tree_priority); 1564 if (!next_iterator) 1565 return NULL; 1566 1567 DCHECK(*next_iterator); 1568 DCHECK(std::find(returned_shared_tiles.begin(), 1569 returned_shared_tiles.end(), 1570 **next_iterator) == returned_shared_tiles.end()); 1571 return **next_iterator; 1572} 1573 1574void TileManager::EvictionTileIterator::PairedPictureLayerIterator::PopTile( 1575 TreePriority tree_priority) { 1576 PictureLayerImpl::LayerEvictionTileIterator* next_iterator = 1577 NextTileIterator(tree_priority); 1578 DCHECK(next_iterator); 1579 DCHECK(*next_iterator); 1580 returned_shared_tiles.push_back(**next_iterator); 1581 ++(*next_iterator); 1582 1583 next_iterator = NextTileIterator(tree_priority); 1584 while (next_iterator && 1585 std::find(returned_shared_tiles.begin(), 1586 returned_shared_tiles.end(), 1587 **next_iterator) != returned_shared_tiles.end()) { 1588 ++(*next_iterator); 1589 next_iterator = NextTileIterator(tree_priority); 1590 } 1591} 1592 1593PictureLayerImpl::LayerEvictionTileIterator* 1594TileManager::EvictionTileIterator::PairedPictureLayerIterator::NextTileIterator( 1595 TreePriority tree_priority) { 1596 // If both iterators are out of tiles, return NULL. 1597 if (!active_iterator && !pending_iterator) 1598 return NULL; 1599 1600 // If we only have one iterator with tiles, return it. 1601 if (!active_iterator) 1602 return &pending_iterator; 1603 if (!pending_iterator) 1604 return &active_iterator; 1605 1606 Tile* active_tile = *active_iterator; 1607 Tile* pending_tile = *pending_iterator; 1608 if (active_tile == pending_tile) 1609 return &active_iterator; 1610 1611 const TilePriority& active_priority = 1612 active_tile->priority_for_tree_priority(tree_priority); 1613 const TilePriority& pending_priority = 1614 pending_tile->priority_for_tree_priority(tree_priority); 1615 1616 if (pending_priority.IsHigherPriorityThan(active_priority)) 1617 return &active_iterator; 1618 return &pending_iterator; 1619} 1620 1621TileManager::EvictionTileIterator::EvictionOrderComparator:: 1622 EvictionOrderComparator(TreePriority tree_priority) 1623 : tree_priority_(tree_priority) {} 1624 1625bool TileManager::EvictionTileIterator::EvictionOrderComparator::operator()( 1626 PairedPictureLayerIterator* a, 1627 PairedPictureLayerIterator* b) const { 1628 PictureLayerImpl::LayerEvictionTileIterator* a_iterator = 1629 a->NextTileIterator(tree_priority_); 1630 DCHECK(a_iterator); 1631 DCHECK(*a_iterator); 1632 1633 PictureLayerImpl::LayerEvictionTileIterator* b_iterator = 1634 b->NextTileIterator(tree_priority_); 1635 DCHECK(b_iterator); 1636 DCHECK(*b_iterator); 1637 1638 Tile* a_tile = **a_iterator; 1639 Tile* b_tile = **b_iterator; 1640 1641 const TilePriority& a_priority = 1642 a_tile->priority_for_tree_priority(tree_priority_); 1643 const TilePriority& b_priority = 1644 b_tile->priority_for_tree_priority(tree_priority_); 1645 bool prioritize_low_res = tree_priority_ != SMOOTHNESS_TAKES_PRIORITY; 1646 1647 if (b_priority.resolution != a_priority.resolution) { 1648 return (prioritize_low_res && b_priority.resolution == LOW_RESOLUTION) || 1649 (!prioritize_low_res && b_priority.resolution == HIGH_RESOLUTION) || 1650 (a_priority.resolution == NON_IDEAL_RESOLUTION); 1651 } 1652 return a_priority.IsHigherPriorityThan(b_priority); 1653} 1654 1655} // namespace cc 1656