1// Copyright (c) 2011 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "gpu/command_buffer/client/mapped_memory.h"
6
7#include <algorithm>
8#include <functional>
9
10#include "base/debug/trace_event.h"
11#include "base/logging.h"
12#include "gpu/command_buffer/client/cmd_buffer_helper.h"
13
14namespace gpu {
15
16MemoryChunk::MemoryChunk(int32 shm_id,
17                         scoped_refptr<gpu::Buffer> shm,
18                         CommandBufferHelper* helper,
19                         const base::Closure& poll_callback)
20    : shm_id_(shm_id),
21      shm_(shm),
22      allocator_(shm->size(), helper, poll_callback, shm->memory()) {}
23
24MemoryChunk::~MemoryChunk() {}
25
26MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
27                                         const base::Closure& poll_callback,
28                                         size_t unused_memory_reclaim_limit)
29    : chunk_size_multiple_(1),
30      helper_(helper),
31      poll_callback_(poll_callback),
32      allocated_memory_(0),
33      max_free_bytes_(unused_memory_reclaim_limit) {
34}
35
36MappedMemoryManager::~MappedMemoryManager() {
37  CommandBuffer* cmd_buf = helper_->command_buffer();
38  for (MemoryChunkVector::iterator iter = chunks_.begin();
39       iter != chunks_.end(); ++iter) {
40    MemoryChunk* chunk = *iter;
41    cmd_buf->DestroyTransferBuffer(chunk->shm_id());
42  }
43}
44
45void* MappedMemoryManager::Alloc(
46    unsigned int size, int32* shm_id, unsigned int* shm_offset) {
47  DCHECK(shm_id);
48  DCHECK(shm_offset);
49  if (size <= allocated_memory_) {
50    size_t total_bytes_in_use = 0;
51    // See if any of the chunks can satisfy this request.
52    for (size_t ii = 0; ii < chunks_.size(); ++ii) {
53      MemoryChunk* chunk = chunks_[ii];
54      chunk->FreeUnused();
55      total_bytes_in_use += chunk->bytes_in_use();
56      if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
57        void* mem = chunk->Alloc(size);
58        DCHECK(mem);
59        *shm_id = chunk->shm_id();
60        *shm_offset = chunk->GetOffset(mem);
61        return mem;
62      }
63    }
64
65    // If there is a memory limit being enforced and total free
66    // memory (allocated_memory_ - total_bytes_in_use) is larger than
67    // the limit try waiting.
68    if (max_free_bytes_ != kNoLimit &&
69        (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
70      TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
71      for (size_t ii = 0; ii < chunks_.size(); ++ii) {
72        MemoryChunk* chunk = chunks_[ii];
73        if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
74          void* mem = chunk->Alloc(size);
75          DCHECK(mem);
76          *shm_id = chunk->shm_id();
77          *shm_offset = chunk->GetOffset(mem);
78          return mem;
79        }
80      }
81    }
82  }
83
84  // Make a new chunk to satisfy the request.
85  CommandBuffer* cmd_buf = helper_->command_buffer();
86  unsigned int chunk_size =
87      ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
88      chunk_size_multiple_;
89  int32 id = -1;
90  scoped_refptr<gpu::Buffer> shm =
91      cmd_buf->CreateTransferBuffer(chunk_size, &id);
92  if (id  < 0)
93    return NULL;
94  DCHECK(shm);
95  MemoryChunk* mc = new MemoryChunk(id, shm, helper_, poll_callback_);
96  allocated_memory_ += mc->GetSize();
97  chunks_.push_back(mc);
98  void* mem = mc->Alloc(size);
99  DCHECK(mem);
100  *shm_id = mc->shm_id();
101  *shm_offset = mc->GetOffset(mem);
102  return mem;
103}
104
105void MappedMemoryManager::Free(void* pointer) {
106  for (size_t ii = 0; ii < chunks_.size(); ++ii) {
107    MemoryChunk* chunk = chunks_[ii];
108    if (chunk->IsInChunk(pointer)) {
109      chunk->Free(pointer);
110      return;
111    }
112  }
113  NOTREACHED();
114}
115
116void MappedMemoryManager::FreePendingToken(void* pointer, int32 token) {
117  for (size_t ii = 0; ii < chunks_.size(); ++ii) {
118    MemoryChunk* chunk = chunks_[ii];
119    if (chunk->IsInChunk(pointer)) {
120      chunk->FreePendingToken(pointer, token);
121      return;
122    }
123  }
124  NOTREACHED();
125}
126
127void MappedMemoryManager::FreeUnused() {
128  CommandBuffer* cmd_buf = helper_->command_buffer();
129  MemoryChunkVector::iterator iter = chunks_.begin();
130  while (iter != chunks_.end()) {
131    MemoryChunk* chunk = *iter;
132    chunk->FreeUnused();
133    if (!chunk->InUse()) {
134      cmd_buf->DestroyTransferBuffer(chunk->shm_id());
135      allocated_memory_ -= chunk->GetSize();
136      iter = chunks_.erase(iter);
137    } else {
138      ++iter;
139    }
140  }
141}
142
143}  // namespace gpu
144