1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "content/common/gpu/gpu_memory_manager.h"
6
7#include <algorithm>
8
9#include "base/bind.h"
10#include "base/command_line.h"
11#include "base/debug/trace_event.h"
12#include "base/message_loop/message_loop.h"
13#include "base/process/process_handle.h"
14#include "base/strings/string_number_conversions.h"
15#include "content/common/gpu/gpu_channel_manager.h"
16#include "content/common/gpu/gpu_memory_manager_client.h"
17#include "content/common/gpu/gpu_memory_tracking.h"
18#include "content/common/gpu/gpu_memory_uma_stats.h"
19#include "content/common/gpu/gpu_messages.h"
20#include "gpu/command_buffer/common/gpu_memory_allocation.h"
21#include "gpu/command_buffer/service/gpu_switches.h"
22
23using gpu::ManagedMemoryStats;
24using gpu::MemoryAllocation;
25
26namespace content {
27namespace {
28
29const int kDelayedScheduleManageTimeoutMs = 67;
30
31const uint64 kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024;
32
33void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) {
34  DCHECK(new_size > old_size || *total_size >= (old_size - new_size));
35  *total_size += (new_size - old_size);
36}
37
38template<typename T>
39T RoundUp(T n, T mul) {
40  return ((n + mul - 1) / mul) * mul;
41}
42
43template<typename T>
44T RoundDown(T n, T mul) {
45  return (n / mul) * mul;
46}
47
48}
49
50GpuMemoryManager::GpuMemoryManager(
51    GpuChannelManager* channel_manager,
52    uint64 max_surfaces_with_frontbuffer_soft_limit)
53    : channel_manager_(channel_manager),
54      manage_immediate_scheduled_(false),
55      max_surfaces_with_frontbuffer_soft_limit_(
56          max_surfaces_with_frontbuffer_soft_limit),
57      priority_cutoff_(MemoryAllocation::CUTOFF_ALLOW_EVERYTHING),
58      bytes_available_gpu_memory_(0),
59      bytes_available_gpu_memory_overridden_(false),
60      bytes_minimum_per_client_(0),
61      bytes_default_per_client_(0),
62      bytes_allocated_managed_current_(0),
63      bytes_allocated_unmanaged_current_(0),
64      bytes_allocated_historical_max_(0),
65      bytes_allocated_unmanaged_high_(0),
66      bytes_allocated_unmanaged_low_(0),
67      bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep),
68      disable_schedule_manage_(false)
69{
70  CommandLine* command_line = CommandLine::ForCurrentProcess();
71
72  // Use a more conservative memory allocation policy on Linux and Mac because
73  // the platform is unstable when under memory pressure.
74  // http://crbug.com/145600 (Linux)
75  // http://crbug.com/141377 (Mac)
76#if defined(OS_MACOSX) || (defined(OS_LINUX) && !defined(OS_CHROMEOS))
77  priority_cutoff_ = MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE;
78#endif
79
80#if defined(OS_ANDROID)
81  bytes_default_per_client_ = 8 * 1024 * 1024;
82  bytes_minimum_per_client_ = 8 * 1024 * 1024;
83#elif defined(OS_CHROMEOS)
84  bytes_default_per_client_ = 64 * 1024 * 1024;
85  bytes_minimum_per_client_ = 4 * 1024 * 1024;
86#elif defined(OS_MACOSX)
87  bytes_default_per_client_ = 128 * 1024 * 1024;
88  bytes_minimum_per_client_ = 128 * 1024 * 1024;
89#else
90  bytes_default_per_client_ = 64 * 1024 * 1024;
91  bytes_minimum_per_client_ = 64 * 1024 * 1024;
92#endif
93
94  if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) {
95    base::StringToUint64(
96        command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb),
97        &bytes_available_gpu_memory_);
98    bytes_available_gpu_memory_ *= 1024 * 1024;
99    bytes_available_gpu_memory_overridden_ = true;
100  } else
101    bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory();
102}
103
104GpuMemoryManager::~GpuMemoryManager() {
105  DCHECK(tracking_groups_.empty());
106  DCHECK(clients_visible_mru_.empty());
107  DCHECK(clients_nonvisible_mru_.empty());
108  DCHECK(clients_nonsurface_.empty());
109  DCHECK(!bytes_allocated_managed_current_);
110  DCHECK(!bytes_allocated_unmanaged_current_);
111}
112
113uint64 GpuMemoryManager::GetAvailableGpuMemory() const {
114  // Allow unmanaged allocations to over-subscribe by at most (high_ - low_)
115  // before restricting managed (compositor) memory based on unmanaged usage.
116  if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_)
117    return 0;
118  return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_;
119}
120
121uint64 GpuMemoryManager::GetDefaultAvailableGpuMemory() const {
122#if defined(OS_ANDROID)
123  return 16 * 1024 * 1024;
124#elif defined(OS_CHROMEOS)
125  return 1024 * 1024 * 1024;
126#else
127  return 256 * 1024 * 1024;
128#endif
129}
130
131uint64 GpuMemoryManager::GetMaximumTotalGpuMemory() const {
132#if defined(OS_ANDROID)
133  return 256 * 1024 * 1024;
134#else
135  return 1024 * 1024 * 1024;
136#endif
137}
138
139uint64 GpuMemoryManager::GetMaximumClientAllocation() const {
140#if defined(OS_ANDROID) || defined(OS_CHROMEOS)
141  return bytes_available_gpu_memory_;
142#else
143  // This is to avoid allowing a single page on to use a full 256MB of memory
144  // (the current total limit). Long-scroll pages will hit this limit,
145  // resulting in instability on some platforms (e.g, issue 141377).
146  return bytes_available_gpu_memory_ / 2;
147#endif
148}
149
150uint64 GpuMemoryManager::CalcAvailableFromGpuTotal(uint64 total_gpu_memory) {
151#if defined(OS_ANDROID)
152  // We don't need to reduce the total on Android, since
153  // the total is an estimate to begin with.
154  return total_gpu_memory;
155#else
156  // Allow Chrome to use 75% of total GPU memory, or all-but-64MB of GPU
157  // memory, whichever is less.
158  return std::min(3 * total_gpu_memory / 4, total_gpu_memory - 64*1024*1024);
159#endif
160}
161
162void GpuMemoryManager::UpdateAvailableGpuMemory() {
163  // If the amount of video memory to use was specified at the command
164  // line, never change it.
165  if (bytes_available_gpu_memory_overridden_)
166    return;
167
168  // On non-Android, we use an operating system query when possible.
169  // We do not have a reliable concept of multiple GPUs existing in
170  // a system, so just be safe and go with the minimum encountered.
171  uint64 bytes_min = 0;
172
173  // Only use the clients that are visible, because otherwise the set of clients
174  // we are querying could become extremely large.
175  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
176      it != clients_visible_mru_.end();
177      ++it) {
178    const GpuMemoryManagerClientState* client_state = *it;
179    if (!client_state->has_surface_)
180      continue;
181    if (!client_state->visible_)
182      continue;
183
184    uint64 bytes = 0;
185    if (client_state->client_->GetTotalGpuMemory(&bytes)) {
186      if (!bytes_min || bytes < bytes_min)
187        bytes_min = bytes;
188    }
189  }
190
191  if (!bytes_min)
192    return;
193
194  bytes_available_gpu_memory_ = CalcAvailableFromGpuTotal(bytes_min);
195
196  // Never go below the default allocation
197  bytes_available_gpu_memory_ = std::max(bytes_available_gpu_memory_,
198                                         GetDefaultAvailableGpuMemory());
199
200  // Never go above the maximum.
201  bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_,
202                                         GetMaximumTotalGpuMemory());
203}
204
205void GpuMemoryManager::UpdateUnmanagedMemoryLimits() {
206  // Set the limit to be [current_, current_ + step_ / 4), with the endpoints
207  // of the intervals rounded down and up to the nearest step_, to avoid
208  // thrashing the interval.
209  bytes_allocated_unmanaged_high_ = RoundUp(
210      bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4,
211      bytes_unmanaged_limit_step_);
212  bytes_allocated_unmanaged_low_ = RoundDown(
213      bytes_allocated_unmanaged_current_,
214      bytes_unmanaged_limit_step_);
215}
216
217void GpuMemoryManager::ScheduleManage(
218    ScheduleManageTime schedule_manage_time) {
219  if (disable_schedule_manage_)
220    return;
221  if (manage_immediate_scheduled_)
222    return;
223  if (schedule_manage_time == kScheduleManageNow) {
224    base::MessageLoop::current()->PostTask(
225        FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr()));
226    manage_immediate_scheduled_ = true;
227    if (!delayed_manage_callback_.IsCancelled())
228      delayed_manage_callback_.Cancel();
229  } else {
230    if (!delayed_manage_callback_.IsCancelled())
231      return;
232    delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage,
233                                              AsWeakPtr()));
234    base::MessageLoop::current()->PostDelayedTask(
235        FROM_HERE,
236        delayed_manage_callback_.callback(),
237        base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs));
238  }
239}
240
241void GpuMemoryManager::TrackMemoryAllocatedChange(
242    GpuMemoryTrackingGroup* tracking_group,
243    uint64 old_size,
244    uint64 new_size,
245    gpu::gles2::MemoryTracker::Pool tracking_pool) {
246  TrackValueChanged(old_size, new_size, &tracking_group->size_);
247  switch (tracking_pool) {
248    case gpu::gles2::MemoryTracker::kManaged:
249      TrackValueChanged(old_size, new_size, &bytes_allocated_managed_current_);
250      break;
251    case gpu::gles2::MemoryTracker::kUnmanaged:
252      TrackValueChanged(old_size,
253                        new_size,
254                        &bytes_allocated_unmanaged_current_);
255      break;
256    default:
257      NOTREACHED();
258      break;
259  }
260  if (new_size != old_size) {
261    TRACE_COUNTER1("gpu",
262                   "GpuMemoryUsage",
263                   GetCurrentUsage());
264  }
265
266  // If we've gone past our current limit on unmanaged memory, schedule a
267  // re-manage to take int account the unmanaged memory.
268  if (bytes_allocated_unmanaged_current_ >= bytes_allocated_unmanaged_high_)
269    ScheduleManage(kScheduleManageNow);
270  if (bytes_allocated_unmanaged_current_ < bytes_allocated_unmanaged_low_)
271    ScheduleManage(kScheduleManageLater);
272
273  if (GetCurrentUsage() > bytes_allocated_historical_max_) {
274      bytes_allocated_historical_max_ = GetCurrentUsage();
275      // If we're blowing into new memory usage territory, spam the browser
276      // process with the most up-to-date information about our memory usage.
277      SendUmaStatsToBrowser();
278  }
279}
280
281bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) {
282  // TODO: Check if there is enough space. Lose contexts until there is.
283  return true;
284}
285
286GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState(
287    GpuMemoryManagerClient* client,
288    bool has_surface,
289    bool visible) {
290  TrackingGroupMap::iterator tracking_group_it =
291      tracking_groups_.find(client->GetMemoryTracker());
292  DCHECK(tracking_group_it != tracking_groups_.end());
293  GpuMemoryTrackingGroup* tracking_group = tracking_group_it->second;
294
295  GpuMemoryManagerClientState* client_state = new GpuMemoryManagerClientState(
296      this, client, tracking_group, has_surface, visible);
297  AddClientToList(client_state);
298  ScheduleManage(kScheduleManageNow);
299  return client_state;
300}
301
302void GpuMemoryManager::OnDestroyClientState(
303    GpuMemoryManagerClientState* client_state) {
304  RemoveClientFromList(client_state);
305  ScheduleManage(kScheduleManageLater);
306}
307
308void GpuMemoryManager::SetClientStateVisible(
309    GpuMemoryManagerClientState* client_state, bool visible) {
310  DCHECK(client_state->has_surface_);
311  if (client_state->visible_ == visible)
312    return;
313
314  RemoveClientFromList(client_state);
315  client_state->visible_ = visible;
316  AddClientToList(client_state);
317  ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater);
318}
319
320void GpuMemoryManager::SetClientStateManagedMemoryStats(
321    GpuMemoryManagerClientState* client_state,
322    const ManagedMemoryStats& stats)
323{
324  client_state->managed_memory_stats_ = stats;
325
326  // If this is the first time that stats have been received for this
327  // client, use them immediately.
328  if (!client_state->managed_memory_stats_received_) {
329    client_state->managed_memory_stats_received_ = true;
330    ScheduleManage(kScheduleManageNow);
331    return;
332  }
333
334  // If these statistics sit outside of the range that we used in our
335  // computation of memory allocations then recompute the allocations.
336  if (client_state->managed_memory_stats_.bytes_nice_to_have >
337      client_state->bytes_nicetohave_limit_high_) {
338    ScheduleManage(kScheduleManageNow);
339  } else if (client_state->managed_memory_stats_.bytes_nice_to_have <
340             client_state->bytes_nicetohave_limit_low_) {
341    ScheduleManage(kScheduleManageLater);
342  }
343}
344
345uint64 GpuMemoryManager::GetClientMemoryUsage(
346    const GpuMemoryManagerClient* client) const{
347  TrackingGroupMap::const_iterator tracking_group_it =
348      tracking_groups_.find(client->GetMemoryTracker());
349  DCHECK(tracking_group_it != tracking_groups_.end());
350  return tracking_group_it->second->GetSize();
351}
352
353GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup(
354    base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) {
355  GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup(
356      pid, memory_tracker, this);
357  DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker()));
358  tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(),
359                                         tracking_group));
360  return tracking_group;
361}
362
363void GpuMemoryManager::OnDestroyTrackingGroup(
364    GpuMemoryTrackingGroup* tracking_group) {
365  DCHECK(tracking_groups_.count(tracking_group->GetMemoryTracker()));
366  tracking_groups_.erase(tracking_group->GetMemoryTracker());
367}
368
369void GpuMemoryManager::GetVideoMemoryUsageStats(
370    GPUVideoMemoryUsageStats* video_memory_usage_stats) const {
371  // For each context group, assign its memory usage to its PID
372  video_memory_usage_stats->process_map.clear();
373  for (TrackingGroupMap::const_iterator i =
374       tracking_groups_.begin(); i != tracking_groups_.end(); ++i) {
375    const GpuMemoryTrackingGroup* tracking_group = i->second;
376    video_memory_usage_stats->process_map[
377        tracking_group->GetPid()].video_memory += tracking_group->GetSize();
378  }
379
380  // Assign the total across all processes in the GPU process
381  video_memory_usage_stats->process_map[
382      base::GetCurrentProcId()].video_memory = GetCurrentUsage();
383  video_memory_usage_stats->process_map[
384      base::GetCurrentProcId()].has_duplicates = true;
385
386  video_memory_usage_stats->bytes_allocated = GetCurrentUsage();
387  video_memory_usage_stats->bytes_allocated_historical_max =
388      bytes_allocated_historical_max_;
389}
390
391void GpuMemoryManager::Manage() {
392  manage_immediate_scheduled_ = false;
393  delayed_manage_callback_.Cancel();
394
395  // Update the amount of GPU memory available on the system.
396  UpdateAvailableGpuMemory();
397
398  // Update the limit on unmanaged memory.
399  UpdateUnmanagedMemoryLimits();
400
401  // Determine which clients are "hibernated" (which determines the
402  // distribution of frontbuffers and memory among clients that don't have
403  // surfaces).
404  SetClientsHibernatedState();
405
406  // Assign memory allocations to clients that have surfaces.
407  AssignSurfacesAllocations();
408
409  // Assign memory allocations to clients that don't have surfaces.
410  AssignNonSurfacesAllocations();
411
412  SendUmaStatsToBrowser();
413}
414
415// static
416uint64 GpuMemoryManager::ComputeCap(
417    std::vector<uint64> bytes, uint64 bytes_sum_limit)
418{
419  size_t bytes_size = bytes.size();
420  uint64 bytes_sum = 0;
421
422  if (bytes_size == 0)
423    return std::numeric_limits<uint64>::max();
424
425  // Sort and add up all entries
426  std::sort(bytes.begin(), bytes.end());
427  for (size_t i = 0; i < bytes_size; ++i)
428    bytes_sum += bytes[i];
429
430  // As we go through the below loop, let bytes_partial_sum be the
431  // sum of bytes[0] + ... + bytes[bytes_size - i - 1]
432  uint64 bytes_partial_sum = bytes_sum;
433
434  // Try using each entry as a cap, and see where we get cut off.
435  for (size_t i = 0; i < bytes_size; ++i) {
436    // Try limiting cap to bytes[bytes_size - i - 1]
437    uint64 test_cap = bytes[bytes_size - i - 1];
438    uint64 bytes_sum_with_test_cap = i * test_cap + bytes_partial_sum;
439
440    // If that fits, raise test_cap to give an even distribution to the
441    // last i entries.
442    if (bytes_sum_with_test_cap <= bytes_sum_limit) {
443      if (i == 0)
444        return std::numeric_limits<uint64>::max();
445      else
446        return test_cap + (bytes_sum_limit - bytes_sum_with_test_cap) / i;
447    } else {
448      bytes_partial_sum -= test_cap;
449    }
450  }
451
452  // If we got here, then we can't fully accommodate any of the clients,
453  // so distribute bytes_sum_limit evenly.
454  return bytes_sum_limit / bytes_size;
455}
456
457uint64 GpuMemoryManager::ComputeClientAllocationWhenVisible(
458    GpuMemoryManagerClientState* client_state,
459    uint64 bytes_above_required_cap,
460    uint64 bytes_above_minimum_cap,
461    uint64 bytes_overall_cap) {
462  ManagedMemoryStats* stats = &client_state->managed_memory_stats_;
463
464  if (!client_state->managed_memory_stats_received_)
465    return GetDefaultClientAllocation();
466
467  uint64 bytes_required = 9 * stats->bytes_required / 8;
468  bytes_required = std::min(bytes_required, GetMaximumClientAllocation());
469  bytes_required = std::max(bytes_required, GetMinimumClientAllocation());
470
471  uint64 bytes_nicetohave = 4 * stats->bytes_nice_to_have / 3;
472  bytes_nicetohave = std::min(bytes_nicetohave, GetMaximumClientAllocation());
473  bytes_nicetohave = std::max(bytes_nicetohave, GetMinimumClientAllocation());
474  bytes_nicetohave = std::max(bytes_nicetohave, bytes_required);
475
476  uint64 allocation = GetMinimumClientAllocation();
477  allocation += std::min(bytes_required - GetMinimumClientAllocation(),
478                         bytes_above_minimum_cap);
479  allocation += std::min(bytes_nicetohave - bytes_required,
480                         bytes_above_required_cap);
481  allocation = std::min(allocation,
482                        bytes_overall_cap);
483  return allocation;
484}
485
486void GpuMemoryManager::ComputeVisibleSurfacesAllocations() {
487  uint64 bytes_available_total = GetAvailableGpuMemory();
488  uint64 bytes_above_required_cap = std::numeric_limits<uint64>::max();
489  uint64 bytes_above_minimum_cap = std::numeric_limits<uint64>::max();
490  uint64 bytes_overall_cap_visible = GetMaximumClientAllocation();
491
492  // Compute memory usage at three levels
493  // - painting everything that is nicetohave for visible clients
494  // - painting only what that is visible
495  // - giving every client the minimum allocation
496  uint64 bytes_nicetohave_visible = 0;
497  uint64 bytes_required_visible = 0;
498  uint64 bytes_minimum_visible = 0;
499  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
500       it != clients_visible_mru_.end();
501       ++it) {
502    GpuMemoryManagerClientState* client_state = *it;
503    client_state->bytes_allocation_ideal_nicetohave_ =
504        ComputeClientAllocationWhenVisible(
505            client_state,
506            bytes_above_required_cap,
507            bytes_above_minimum_cap,
508            bytes_overall_cap_visible);
509    client_state->bytes_allocation_ideal_required_ =
510        ComputeClientAllocationWhenVisible(
511            client_state,
512            0,
513            bytes_above_minimum_cap,
514            bytes_overall_cap_visible);
515    client_state->bytes_allocation_ideal_minimum_ =
516        ComputeClientAllocationWhenVisible(
517            client_state,
518            0,
519            0,
520            bytes_overall_cap_visible);
521
522    bytes_nicetohave_visible +=
523        client_state->bytes_allocation_ideal_nicetohave_;
524    bytes_required_visible +=
525        client_state->bytes_allocation_ideal_required_;
526    bytes_minimum_visible +=
527        client_state->bytes_allocation_ideal_minimum_;
528  }
529
530  // Determine which of those three points we can satisfy, and limit
531  // bytes_above_required_cap and bytes_above_minimum_cap to not go
532  // over the limit.
533  if (bytes_minimum_visible > bytes_available_total) {
534    bytes_above_required_cap = 0;
535    bytes_above_minimum_cap = 0;
536  } else if (bytes_required_visible > bytes_available_total) {
537    std::vector<uint64> bytes_to_fit;
538    for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
539         it != clients_visible_mru_.end();
540         ++it) {
541      GpuMemoryManagerClientState* client_state = *it;
542      bytes_to_fit.push_back(client_state->bytes_allocation_ideal_required_ -
543                             client_state->bytes_allocation_ideal_minimum_);
544    }
545    bytes_above_required_cap = 0;
546    bytes_above_minimum_cap = ComputeCap(
547        bytes_to_fit, bytes_available_total - bytes_minimum_visible);
548  } else if (bytes_nicetohave_visible > bytes_available_total) {
549    std::vector<uint64> bytes_to_fit;
550    for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
551         it != clients_visible_mru_.end();
552         ++it) {
553      GpuMemoryManagerClientState* client_state = *it;
554      bytes_to_fit.push_back(client_state->bytes_allocation_ideal_nicetohave_ -
555                             client_state->bytes_allocation_ideal_required_);
556    }
557    bytes_above_required_cap = ComputeCap(
558        bytes_to_fit, bytes_available_total - bytes_required_visible);
559    bytes_above_minimum_cap = std::numeric_limits<uint64>::max();
560  }
561
562  // Given those computed limits, set the actual memory allocations for the
563  // visible clients, tracking the largest allocation and the total allocation
564  // for future use.
565  uint64 bytes_allocated_visible = 0;
566  uint64 bytes_allocated_max_client_allocation = 0;
567  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
568       it != clients_visible_mru_.end();
569       ++it) {
570    GpuMemoryManagerClientState* client_state = *it;
571    client_state->bytes_allocation_when_visible_ =
572        ComputeClientAllocationWhenVisible(
573            client_state,
574            bytes_above_required_cap,
575            bytes_above_minimum_cap,
576            bytes_overall_cap_visible);
577    bytes_allocated_visible += client_state->bytes_allocation_when_visible_;
578    bytes_allocated_max_client_allocation = std::max(
579        bytes_allocated_max_client_allocation,
580        client_state->bytes_allocation_when_visible_);
581  }
582
583  // Set the limit for nonvisible clients for when they become visible.
584  // Use the same formula, with a lowered overall cap in case any of the
585  // currently-nonvisible clients are much more resource-intensive than any
586  // of the existing clients.
587  uint64 bytes_overall_cap_nonvisible = bytes_allocated_max_client_allocation;
588  if (bytes_available_total > bytes_allocated_visible) {
589    bytes_overall_cap_nonvisible +=
590        bytes_available_total - bytes_allocated_visible;
591  }
592  bytes_overall_cap_nonvisible = std::min(bytes_overall_cap_nonvisible,
593                                          GetMaximumClientAllocation());
594  for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
595       it != clients_nonvisible_mru_.end();
596       ++it) {
597    GpuMemoryManagerClientState* client_state = *it;
598    client_state->bytes_allocation_when_visible_ =
599        ComputeClientAllocationWhenVisible(
600            client_state,
601            bytes_above_required_cap,
602            bytes_above_minimum_cap,
603            bytes_overall_cap_nonvisible);
604  }
605}
606
607void GpuMemoryManager::DistributeRemainingMemoryToVisibleSurfaces() {
608  uint64 bytes_available_total = GetAvailableGpuMemory();
609  uint64 bytes_allocated_total = 0;
610
611  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
612       it != clients_visible_mru_.end();
613       ++it) {
614    GpuMemoryManagerClientState* client_state = *it;
615    bytes_allocated_total += client_state->bytes_allocation_when_visible_;
616  }
617
618  if (bytes_allocated_total >= bytes_available_total)
619    return;
620
621  std::vector<uint64> bytes_extra_requests;
622  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
623       it != clients_visible_mru_.end();
624       ++it) {
625    GpuMemoryManagerClientState* client_state = *it;
626    CHECK(GetMaximumClientAllocation() >=
627          client_state->bytes_allocation_when_visible_);
628    uint64 bytes_extra = GetMaximumClientAllocation() -
629                         client_state->bytes_allocation_when_visible_;
630    bytes_extra_requests.push_back(bytes_extra);
631  }
632  uint64 bytes_extra_cap = ComputeCap(
633      bytes_extra_requests, bytes_available_total - bytes_allocated_total);
634  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
635       it != clients_visible_mru_.end();
636       ++it) {
637    GpuMemoryManagerClientState* client_state = *it;
638    uint64 bytes_extra = GetMaximumClientAllocation() -
639                         client_state->bytes_allocation_when_visible_;
640    client_state->bytes_allocation_when_visible_ += std::min(
641        bytes_extra, bytes_extra_cap);
642  }
643}
644
645void GpuMemoryManager::AssignSurfacesAllocations() {
646  // Compute allocation when for all clients.
647  ComputeVisibleSurfacesAllocations();
648
649  // Distribute the remaining memory to visible clients.
650  DistributeRemainingMemoryToVisibleSurfaces();
651
652  // Send that allocation to the clients.
653  ClientStateList clients = clients_visible_mru_;
654  clients.insert(clients.end(),
655                 clients_nonvisible_mru_.begin(),
656                 clients_nonvisible_mru_.end());
657  for (ClientStateList::const_iterator it = clients.begin();
658       it != clients.end();
659       ++it) {
660    GpuMemoryManagerClientState* client_state = *it;
661
662    // Re-assign memory limits to this client when its "nice to have" bucket
663    // grows or shrinks by 1/4.
664    client_state->bytes_nicetohave_limit_high_ =
665        5 * client_state->managed_memory_stats_.bytes_nice_to_have / 4;
666    client_state->bytes_nicetohave_limit_low_ =
667        3 * client_state->managed_memory_stats_.bytes_nice_to_have / 4;
668
669    // Populate and send the allocation to the client
670    MemoryAllocation allocation;
671
672    allocation.bytes_limit_when_visible =
673        client_state->bytes_allocation_when_visible_;
674    allocation.priority_cutoff_when_visible = priority_cutoff_;
675
676    client_state->client_->SetMemoryAllocation(allocation);
677    client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_);
678  }
679}
680
681void GpuMemoryManager::AssignNonSurfacesAllocations() {
682  for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
683       it != clients_nonsurface_.end();
684       ++it) {
685    GpuMemoryManagerClientState* client_state = *it;
686    MemoryAllocation allocation;
687
688    if (!client_state->hibernated_) {
689      allocation.bytes_limit_when_visible =
690          GetMinimumClientAllocation();
691      allocation.priority_cutoff_when_visible =
692          MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
693    }
694
695    client_state->client_->SetMemoryAllocation(allocation);
696  }
697}
698
699void GpuMemoryManager::SetClientsHibernatedState() const {
700  // Re-set all tracking groups as being hibernated.
701  for (TrackingGroupMap::const_iterator it = tracking_groups_.begin();
702       it != tracking_groups_.end();
703       ++it) {
704    GpuMemoryTrackingGroup* tracking_group = it->second;
705    tracking_group->hibernated_ = true;
706  }
707  // All clients with surfaces that are visible are non-hibernated.
708  uint64 non_hibernated_clients = 0;
709  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
710       it != clients_visible_mru_.end();
711       ++it) {
712    GpuMemoryManagerClientState* client_state = *it;
713    client_state->hibernated_ = false;
714    client_state->tracking_group_->hibernated_ = false;
715    non_hibernated_clients++;
716  }
717  // Then an additional few clients with surfaces are non-hibernated too, up to
718  // a fixed limit.
719  for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
720       it != clients_nonvisible_mru_.end();
721       ++it) {
722    GpuMemoryManagerClientState* client_state = *it;
723    if (non_hibernated_clients < max_surfaces_with_frontbuffer_soft_limit_) {
724      client_state->hibernated_ = false;
725      client_state->tracking_group_->hibernated_ = false;
726      non_hibernated_clients++;
727    } else {
728      client_state->hibernated_ = true;
729    }
730  }
731  // Clients that don't have surfaces are non-hibernated if they are
732  // in a GL share group with a non-hibernated surface.
733  for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
734       it != clients_nonsurface_.end();
735       ++it) {
736    GpuMemoryManagerClientState* client_state = *it;
737    client_state->hibernated_ = client_state->tracking_group_->hibernated_;
738  }
739}
740
741void GpuMemoryManager::SendUmaStatsToBrowser() {
742  if (!channel_manager_)
743    return;
744  GPUMemoryUmaStats params;
745  params.bytes_allocated_current = GetCurrentUsage();
746  params.bytes_allocated_max = bytes_allocated_historical_max_;
747  params.bytes_limit = bytes_available_gpu_memory_;
748  params.client_count = clients_visible_mru_.size() +
749                        clients_nonvisible_mru_.size() +
750                        clients_nonsurface_.size();
751  params.context_group_count = tracking_groups_.size();
752  channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params));
753}
754
755GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList(
756    GpuMemoryManagerClientState* client_state) {
757  if (client_state->has_surface_) {
758    if (client_state->visible_)
759      return &clients_visible_mru_;
760    else
761      return &clients_nonvisible_mru_;
762  }
763  return &clients_nonsurface_;
764}
765
766void GpuMemoryManager::AddClientToList(
767    GpuMemoryManagerClientState* client_state) {
768  DCHECK(!client_state->list_iterator_valid_);
769  ClientStateList* client_list = GetClientList(client_state);
770  client_state->list_iterator_ = client_list->insert(
771      client_list->begin(), client_state);
772  client_state->list_iterator_valid_ = true;
773}
774
775void GpuMemoryManager::RemoveClientFromList(
776    GpuMemoryManagerClientState* client_state) {
777  DCHECK(client_state->list_iterator_valid_);
778  ClientStateList* client_list = GetClientList(client_state);
779  client_list->erase(client_state->list_iterator_);
780  client_state->list_iterator_valid_ = false;
781}
782
783}  // namespace content
784