1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "content/common/gpu/gpu_memory_manager.h"
6
7#include <algorithm>
8
9#include "base/bind.h"
10#include "base/command_line.h"
11#include "base/debug/trace_event.h"
12#include "base/message_loop/message_loop.h"
13#include "base/process/process_handle.h"
14#include "base/strings/string_number_conversions.h"
15#include "content/common/gpu/gpu_channel_manager.h"
16#include "content/common/gpu/gpu_memory_manager_client.h"
17#include "content/common/gpu/gpu_memory_tracking.h"
18#include "content/common/gpu/gpu_memory_uma_stats.h"
19#include "content/common/gpu/gpu_messages.h"
20#include "gpu/command_buffer/common/gpu_memory_allocation.h"
21#include "gpu/command_buffer/service/gpu_switches.h"
22
23using gpu::MemoryAllocation;
24
25namespace content {
26namespace {
27
28const int kDelayedScheduleManageTimeoutMs = 67;
29
30const uint64 kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024;
31
32void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) {
33  DCHECK(new_size > old_size || *total_size >= (old_size - new_size));
34  *total_size += (new_size - old_size);
35}
36
37}
38
39GpuMemoryManager::GpuMemoryManager(
40    GpuChannelManager* channel_manager,
41    uint64 max_surfaces_with_frontbuffer_soft_limit)
42    : channel_manager_(channel_manager),
43      manage_immediate_scheduled_(false),
44      disable_schedule_manage_(false),
45      max_surfaces_with_frontbuffer_soft_limit_(
46          max_surfaces_with_frontbuffer_soft_limit),
47      client_hard_limit_bytes_(0),
48      bytes_allocated_managed_current_(0),
49      bytes_allocated_unmanaged_current_(0),
50      bytes_allocated_historical_max_(0)
51{ }
52
53GpuMemoryManager::~GpuMemoryManager() {
54  DCHECK(tracking_groups_.empty());
55  DCHECK(clients_visible_mru_.empty());
56  DCHECK(clients_nonvisible_mru_.empty());
57  DCHECK(clients_nonsurface_.empty());
58  DCHECK(!bytes_allocated_managed_current_);
59  DCHECK(!bytes_allocated_unmanaged_current_);
60}
61
62void GpuMemoryManager::UpdateAvailableGpuMemory() {
63  // If the value was overridden on the command line, use the specified value.
64  static bool client_hard_limit_bytes_overridden =
65      base::CommandLine::ForCurrentProcess()->HasSwitch(
66          switches::kForceGpuMemAvailableMb);
67  if (client_hard_limit_bytes_overridden) {
68    base::StringToUint64(
69        base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
70            switches::kForceGpuMemAvailableMb),
71        &client_hard_limit_bytes_);
72    client_hard_limit_bytes_ *= 1024 * 1024;
73    return;
74  }
75
76#if defined(OS_ANDROID)
77  // On non-Android, we use an operating system query when possible.
78  // We do not have a reliable concept of multiple GPUs existing in
79  // a system, so just be safe and go with the minimum encountered.
80  uint64 bytes_min = 0;
81
82  // Only use the clients that are visible, because otherwise the set of clients
83  // we are querying could become extremely large.
84  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
85      it != clients_visible_mru_.end();
86      ++it) {
87    const GpuMemoryManagerClientState* client_state = *it;
88    if (!client_state->has_surface_)
89      continue;
90    if (!client_state->visible_)
91      continue;
92
93    uint64 bytes = 0;
94    if (client_state->client_->GetTotalGpuMemory(&bytes)) {
95      if (!bytes_min || bytes < bytes_min)
96        bytes_min = bytes;
97    }
98  }
99
100  client_hard_limit_bytes_ = bytes_min;
101  // Clamp the observed value to a specific range on Android.
102  client_hard_limit_bytes_ = std::max(client_hard_limit_bytes_,
103                                      static_cast<uint64>(16 * 1024 * 1024));
104  client_hard_limit_bytes_ = std::min(client_hard_limit_bytes_,
105                                      static_cast<uint64>(256 * 1024 * 1024));
106#else
107  // Ignore what the system said and give all clients the same maximum
108  // allocation on desktop platforms.
109  client_hard_limit_bytes_ = 512 * 1024 * 1024;
110#endif
111}
112
113void GpuMemoryManager::ScheduleManage(
114    ScheduleManageTime schedule_manage_time) {
115  if (disable_schedule_manage_)
116    return;
117  if (manage_immediate_scheduled_)
118    return;
119  if (schedule_manage_time == kScheduleManageNow) {
120    base::MessageLoop::current()->PostTask(
121        FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr()));
122    manage_immediate_scheduled_ = true;
123    if (!delayed_manage_callback_.IsCancelled())
124      delayed_manage_callback_.Cancel();
125  } else {
126    if (!delayed_manage_callback_.IsCancelled())
127      return;
128    delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage,
129                                              AsWeakPtr()));
130    base::MessageLoop::current()->PostDelayedTask(
131        FROM_HERE,
132        delayed_manage_callback_.callback(),
133        base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs));
134  }
135}
136
137void GpuMemoryManager::TrackMemoryAllocatedChange(
138    GpuMemoryTrackingGroup* tracking_group,
139    uint64 old_size,
140    uint64 new_size,
141    gpu::gles2::MemoryTracker::Pool tracking_pool) {
142  TrackValueChanged(old_size, new_size, &tracking_group->size_);
143  switch (tracking_pool) {
144    case gpu::gles2::MemoryTracker::kManaged:
145      TrackValueChanged(old_size, new_size, &bytes_allocated_managed_current_);
146      break;
147    case gpu::gles2::MemoryTracker::kUnmanaged:
148      TrackValueChanged(old_size,
149                        new_size,
150                        &bytes_allocated_unmanaged_current_);
151      break;
152    default:
153      NOTREACHED();
154      break;
155  }
156  if (new_size != old_size) {
157    TRACE_COUNTER1("gpu",
158                   "GpuMemoryUsage",
159                   GetCurrentUsage());
160  }
161
162  if (GetCurrentUsage() > bytes_allocated_historical_max_ +
163                          kBytesAllocatedUnmanagedStep) {
164      bytes_allocated_historical_max_ = GetCurrentUsage();
165      // If we're blowing into new memory usage territory, spam the browser
166      // process with the most up-to-date information about our memory usage.
167      SendUmaStatsToBrowser();
168  }
169}
170
171bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) {
172  // TODO: Check if there is enough space. Lose contexts until there is.
173  return true;
174}
175
176GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState(
177    GpuMemoryManagerClient* client,
178    bool has_surface,
179    bool visible) {
180  TrackingGroupMap::iterator tracking_group_it =
181      tracking_groups_.find(client->GetMemoryTracker());
182  DCHECK(tracking_group_it != tracking_groups_.end());
183  GpuMemoryTrackingGroup* tracking_group = tracking_group_it->second;
184
185  GpuMemoryManagerClientState* client_state = new GpuMemoryManagerClientState(
186      this, client, tracking_group, has_surface, visible);
187  AddClientToList(client_state);
188  ScheduleManage(kScheduleManageNow);
189  return client_state;
190}
191
192void GpuMemoryManager::OnDestroyClientState(
193    GpuMemoryManagerClientState* client_state) {
194  RemoveClientFromList(client_state);
195  ScheduleManage(kScheduleManageLater);
196}
197
198void GpuMemoryManager::SetClientStateVisible(
199    GpuMemoryManagerClientState* client_state, bool visible) {
200  DCHECK(client_state->has_surface_);
201  if (client_state->visible_ == visible)
202    return;
203
204  RemoveClientFromList(client_state);
205  client_state->visible_ = visible;
206  AddClientToList(client_state);
207  ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater);
208}
209
210uint64 GpuMemoryManager::GetClientMemoryUsage(
211    const GpuMemoryManagerClient* client) const {
212  TrackingGroupMap::const_iterator tracking_group_it =
213      tracking_groups_.find(client->GetMemoryTracker());
214  DCHECK(tracking_group_it != tracking_groups_.end());
215  return tracking_group_it->second->GetSize();
216}
217
218GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup(
219    base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) {
220  GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup(
221      pid, memory_tracker, this);
222  DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker()));
223  tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(),
224                                         tracking_group));
225  return tracking_group;
226}
227
228void GpuMemoryManager::OnDestroyTrackingGroup(
229    GpuMemoryTrackingGroup* tracking_group) {
230  DCHECK(tracking_groups_.count(tracking_group->GetMemoryTracker()));
231  tracking_groups_.erase(tracking_group->GetMemoryTracker());
232}
233
234void GpuMemoryManager::GetVideoMemoryUsageStats(
235    GPUVideoMemoryUsageStats* video_memory_usage_stats) const {
236  // For each context group, assign its memory usage to its PID
237  video_memory_usage_stats->process_map.clear();
238  for (TrackingGroupMap::const_iterator i =
239       tracking_groups_.begin(); i != tracking_groups_.end(); ++i) {
240    const GpuMemoryTrackingGroup* tracking_group = i->second;
241    video_memory_usage_stats->process_map[
242        tracking_group->GetPid()].video_memory += tracking_group->GetSize();
243  }
244
245  // Assign the total across all processes in the GPU process
246  video_memory_usage_stats->process_map[
247      base::GetCurrentProcId()].video_memory = GetCurrentUsage();
248  video_memory_usage_stats->process_map[
249      base::GetCurrentProcId()].has_duplicates = true;
250
251  video_memory_usage_stats->bytes_allocated = GetCurrentUsage();
252  video_memory_usage_stats->bytes_allocated_historical_max =
253      bytes_allocated_historical_max_;
254}
255
256void GpuMemoryManager::Manage() {
257  manage_immediate_scheduled_ = false;
258  delayed_manage_callback_.Cancel();
259
260  // Update the amount of GPU memory available on the system.
261  UpdateAvailableGpuMemory();
262
263  // Determine which clients are "hibernated" (which determines the
264  // distribution of frontbuffers and memory among clients that don't have
265  // surfaces).
266  SetClientsHibernatedState();
267
268  // Assign memory allocations to clients that have surfaces.
269  AssignSurfacesAllocations();
270
271  // Assign memory allocations to clients that don't have surfaces.
272  AssignNonSurfacesAllocations();
273
274  SendUmaStatsToBrowser();
275}
276
277void GpuMemoryManager::AssignSurfacesAllocations() {
278  // Send that allocation to the clients.
279  ClientStateList clients = clients_visible_mru_;
280  clients.insert(clients.end(),
281                 clients_nonvisible_mru_.begin(),
282                 clients_nonvisible_mru_.end());
283  for (ClientStateList::const_iterator it = clients.begin();
284       it != clients.end();
285       ++it) {
286    GpuMemoryManagerClientState* client_state = *it;
287
288    // Populate and send the allocation to the client
289    MemoryAllocation allocation;
290    allocation.bytes_limit_when_visible = client_hard_limit_bytes_;
291#if defined(OS_ANDROID)
292    // On Android, because there is only one visible tab at any time, allow
293    // that renderer to cache as much as it can.
294    allocation.priority_cutoff_when_visible =
295        MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
296#else
297    // On desktop platforms, instruct the renderers to cache only a smaller
298    // set, to play nice with other renderers and other applications. If this
299    // if not done, then the system can become unstable.
300    // http://crbug.com/145600 (Linux)
301    // http://crbug.com/141377 (Mac)
302    allocation.priority_cutoff_when_visible =
303        MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE;
304#endif
305
306    client_state->client_->SetMemoryAllocation(allocation);
307    client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_);
308  }
309}
310
311void GpuMemoryManager::AssignNonSurfacesAllocations() {
312  for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
313       it != clients_nonsurface_.end();
314       ++it) {
315    GpuMemoryManagerClientState* client_state = *it;
316    MemoryAllocation allocation;
317
318    if (!client_state->hibernated_) {
319      allocation.bytes_limit_when_visible = client_hard_limit_bytes_;
320      allocation.priority_cutoff_when_visible =
321          MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
322    }
323
324    client_state->client_->SetMemoryAllocation(allocation);
325  }
326}
327
328void GpuMemoryManager::SetClientsHibernatedState() const {
329  // Re-set all tracking groups as being hibernated.
330  for (TrackingGroupMap::const_iterator it = tracking_groups_.begin();
331       it != tracking_groups_.end();
332       ++it) {
333    GpuMemoryTrackingGroup* tracking_group = it->second;
334    tracking_group->hibernated_ = true;
335  }
336  // All clients with surfaces that are visible are non-hibernated.
337  uint64 non_hibernated_clients = 0;
338  for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
339       it != clients_visible_mru_.end();
340       ++it) {
341    GpuMemoryManagerClientState* client_state = *it;
342    client_state->hibernated_ = false;
343    client_state->tracking_group_->hibernated_ = false;
344    non_hibernated_clients++;
345  }
346  // Then an additional few clients with surfaces are non-hibernated too, up to
347  // a fixed limit.
348  for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
349       it != clients_nonvisible_mru_.end();
350       ++it) {
351    GpuMemoryManagerClientState* client_state = *it;
352    if (non_hibernated_clients < max_surfaces_with_frontbuffer_soft_limit_) {
353      client_state->hibernated_ = false;
354      client_state->tracking_group_->hibernated_ = false;
355      non_hibernated_clients++;
356    } else {
357      client_state->hibernated_ = true;
358    }
359  }
360  // Clients that don't have surfaces are non-hibernated if they are
361  // in a GL share group with a non-hibernated surface.
362  for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
363       it != clients_nonsurface_.end();
364       ++it) {
365    GpuMemoryManagerClientState* client_state = *it;
366    client_state->hibernated_ = client_state->tracking_group_->hibernated_;
367  }
368}
369
370void GpuMemoryManager::SendUmaStatsToBrowser() {
371  if (!channel_manager_)
372    return;
373  GPUMemoryUmaStats params;
374  params.bytes_allocated_current = GetCurrentUsage();
375  params.bytes_allocated_max = bytes_allocated_historical_max_;
376  params.bytes_limit = client_hard_limit_bytes_;
377  params.client_count = clients_visible_mru_.size() +
378                        clients_nonvisible_mru_.size() +
379                        clients_nonsurface_.size();
380  params.context_group_count = tracking_groups_.size();
381  channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params));
382}
383
384GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList(
385    GpuMemoryManagerClientState* client_state) {
386  if (client_state->has_surface_) {
387    if (client_state->visible_)
388      return &clients_visible_mru_;
389    else
390      return &clients_nonvisible_mru_;
391  }
392  return &clients_nonsurface_;
393}
394
395void GpuMemoryManager::AddClientToList(
396    GpuMemoryManagerClientState* client_state) {
397  DCHECK(!client_state->list_iterator_valid_);
398  ClientStateList* client_list = GetClientList(client_state);
399  client_state->list_iterator_ = client_list->insert(
400      client_list->begin(), client_state);
401  client_state->list_iterator_valid_ = true;
402}
403
404void GpuMemoryManager::RemoveClientFromList(
405    GpuMemoryManagerClientState* client_state) {
406  DCHECK(client_state->list_iterator_valid_);
407  ClientStateList* client_list = GetClientList(client_state);
408  client_list->erase(client_state->list_iterator_);
409  client_state->list_iterator_valid_ = false;
410}
411
412}  // namespace content
413