1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "swap_space.h"
18
19#include <sys/mman.h>
20
21#include <algorithm>
22#include <numeric>
23
24#include "base/bit_utils.h"
25#include "base/macros.h"
26#include "base/mutex.h"
27#include "thread-current-inl.h"
28
29namespace art {
30
31// The chunk size by which the swap file is increased and mapped.
32static constexpr size_t kMininumMapSize = 16 * MB;
33
34static constexpr bool kCheckFreeMaps = false;
35
36template <typename FreeBySizeSet>
37static void DumpFreeMap(const FreeBySizeSet& free_by_size) {
38  size_t last_size = static_cast<size_t>(-1);
39  for (const auto& entry : free_by_size) {
40    if (last_size != entry.size) {
41      last_size = entry.size;
42      LOG(INFO) << "Size " << last_size;
43    }
44    LOG(INFO) << "  0x" << std::hex << entry.free_by_start_entry->Start()
45        << " size=" << std::dec << entry.free_by_start_entry->size;
46  }
47}
48
49void SwapSpace::RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) {
50  auto free_by_start_pos = free_by_size_pos->free_by_start_entry;
51  free_by_size_.erase(free_by_size_pos);
52  free_by_start_.erase(free_by_start_pos);
53}
54
55inline void SwapSpace::InsertChunk(const SpaceChunk& chunk) {
56  DCHECK_NE(chunk.size, 0u);
57  auto insert_result = free_by_start_.insert(chunk);
58  DCHECK(insert_result.second);
59  free_by_size_.emplace(chunk.size, insert_result.first);
60}
61
62SwapSpace::SwapSpace(int fd, size_t initial_size)
63    : fd_(fd),
64      size_(0),
65      lock_("SwapSpace lock", static_cast<LockLevel>(LockLevel::kDefaultMutexLevel - 1)) {
66  // Assume that the file is unlinked.
67
68  InsertChunk(NewFileChunk(initial_size));
69}
70
71SwapSpace::~SwapSpace() {
72  // Unmap all mmapped chunks. Nothing should be allocated anymore at
73  // this point, so there should be only full size chunks in free_by_start_.
74  for (const SpaceChunk& chunk : free_by_start_) {
75    if (munmap(chunk.ptr, chunk.size) != 0) {
76      PLOG(ERROR) << "Failed to unmap swap space chunk at "
77          << static_cast<const void*>(chunk.ptr) << " size=" << chunk.size;
78    }
79  }
80  // All arenas are backed by the same file. Just close the descriptor.
81  close(fd_);
82}
83
84template <typename FreeByStartSet, typename FreeBySizeSet>
85static size_t CollectFree(const FreeByStartSet& free_by_start, const FreeBySizeSet& free_by_size) {
86  if (free_by_start.size() != free_by_size.size()) {
87    LOG(FATAL) << "Size: " << free_by_start.size() << " vs " << free_by_size.size();
88  }
89
90  // Calculate over free_by_size.
91  size_t sum1 = 0;
92  for (const auto& entry : free_by_size) {
93    sum1 += entry.free_by_start_entry->size;
94  }
95
96  // Calculate over free_by_start.
97  size_t sum2 = 0;
98  for (const auto& entry : free_by_start) {
99    sum2 += entry.size;
100  }
101
102  if (sum1 != sum2) {
103    LOG(FATAL) << "Sum: " << sum1 << " vs " << sum2;
104  }
105  return sum1;
106}
107
108void* SwapSpace::Alloc(size_t size) {
109  MutexLock lock(Thread::Current(), lock_);
110  size = RoundUp(size, 8U);
111
112  // Check the free list for something that fits.
113  // TODO: Smarter implementation. Global biggest chunk, ...
114  auto it = free_by_start_.empty()
115      ? free_by_size_.end()
116      : free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
117  if (it != free_by_size_.end()) {
118    auto entry = it->free_by_start_entry;
119    SpaceChunk old_chunk = *entry;
120    if (old_chunk.size == size) {
121      RemoveChunk(it);
122    } else {
123      // Try to avoid deallocating and allocating the std::set<> nodes.
124      // This would be much simpler if we could use replace() from Boost.Bimap.
125
126      // The free_by_start_ map contains disjoint intervals ordered by the `ptr`.
127      // Shrinking the interval does not affect the ordering.
128      it->free_by_start_entry->ptr += size;
129      it->free_by_start_entry->size -= size;
130
131      // The free_by_size_ map is ordered by the `size` and then `free_by_start_entry->ptr`.
132      // Adjusting the `ptr` above does not change that ordering but decreasing `size` can
133      // push the node before the previous node(s).
134      if (it == free_by_size_.begin()) {
135        it->size -= size;
136      } else {
137        auto prev = it;
138        --prev;
139        FreeBySizeEntry new_value(old_chunk.size - size, entry);
140        if (free_by_size_.key_comp()(*prev, new_value)) {
141          it->size -= size;
142        } else {
143          // Changing in place would break the std::set<> ordering, we need to remove and insert.
144          free_by_size_.erase(it);
145          free_by_size_.insert(new_value);
146        }
147      }
148    }
149    return old_chunk.ptr;
150  } else {
151    // Not a big enough free chunk, need to increase file size.
152    SpaceChunk new_chunk = NewFileChunk(size);
153    if (new_chunk.size != size) {
154      // Insert the remainder.
155      SpaceChunk remainder = { new_chunk.ptr + size, new_chunk.size - size };
156      InsertChunk(remainder);
157    }
158    return new_chunk.ptr;
159  }
160}
161
162SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
163#if !defined(__APPLE__)
164  size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize));
165  int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part));
166  if (result != 0) {
167    PLOG(FATAL) << "Unable to increase swap file.";
168  }
169  uint8_t* ptr = reinterpret_cast<uint8_t*>(
170      mmap(nullptr, next_part, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, size_));
171  if (ptr == MAP_FAILED) {
172    LOG(ERROR) << "Unable to mmap new swap file chunk.";
173    LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size;
174    LOG(ERROR) << "Free list:";
175    DumpFreeMap(free_by_size_);
176    LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_);
177    LOG(FATAL) << "Aborting...";
178  }
179  size_ += next_part;
180  SpaceChunk new_chunk = {ptr, next_part};
181  return new_chunk;
182#else
183  UNUSED(min_size, kMininumMapSize);
184  LOG(FATAL) << "No swap file support on the Mac.";
185  UNREACHABLE();
186#endif
187}
188
189// TODO: Full coalescing.
190void SwapSpace::Free(void* ptr, size_t size) {
191  MutexLock lock(Thread::Current(), lock_);
192  size = RoundUp(size, 8U);
193
194  size_t free_before = 0;
195  if (kCheckFreeMaps) {
196    free_before = CollectFree(free_by_start_, free_by_size_);
197  }
198
199  SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptr), size };
200  auto it = free_by_start_.lower_bound(chunk);
201  if (it != free_by_start_.begin()) {
202    auto prev = it;
203    --prev;
204    CHECK_LE(prev->End(), chunk.Start());
205    if (prev->End() == chunk.Start()) {
206      // Merge *prev with this chunk.
207      chunk.size += prev->size;
208      chunk.ptr -= prev->size;
209      auto erase_pos = free_by_size_.find(FreeBySizeEntry { prev->size, prev });
210      DCHECK(erase_pos != free_by_size_.end());
211      RemoveChunk(erase_pos);
212      // "prev" is invalidated but "it" remains valid.
213    }
214  }
215  if (it != free_by_start_.end()) {
216    CHECK_LE(chunk.End(), it->Start());
217    if (chunk.End() == it->Start()) {
218      // Merge *it with this chunk.
219      chunk.size += it->size;
220      auto erase_pos = free_by_size_.find(FreeBySizeEntry { it->size, it });
221      DCHECK(erase_pos != free_by_size_.end());
222      RemoveChunk(erase_pos);
223      // "it" is invalidated but we don't need it anymore.
224    }
225  }
226  InsertChunk(chunk);
227
228  if (kCheckFreeMaps) {
229    size_t free_after = CollectFree(free_by_start_, free_by_size_);
230
231    if (free_after != free_before + size) {
232      DumpFreeMap(free_by_size_);
233      CHECK_EQ(free_after, free_before + size) << "Should be " << size << " difference from " << free_before;
234    }
235  }
236}
237
238}  // namespace art
239