1// Copyright (c) 2008, Google Inc. 2// All rights reserved. 3// 4// Redistribution and use in source and binary forms, with or without 5// modification, are permitted provided that the following conditions are 6// met: 7// 8// * Redistributions of source code must retain the above copyright 9// notice, this list of conditions and the following disclaimer. 10// * Redistributions in binary form must reproduce the above 11// copyright notice, this list of conditions and the following disclaimer 12// in the documentation and/or other materials provided with the 13// distribution. 14// * Neither the name of Google Inc. nor the names of its 15// contributors may be used to endorse or promote products derived from 16// this software without specific prior written permission. 17// 18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 30// --- 31// Author: Sanjay Ghemawat <opensource@google.com> 32 33#include "config.h" 34#include <algorithm> 35#include "central_freelist.h" 36#include "free_list.h" // for FL_Next, FL_Push, etc 37#include "internal_logging.h" // for ASSERT, MESSAGE 38#include "page_heap.h" // for PageHeap 39#include "static_vars.h" // for Static 40 41using std::min; 42using std::max; 43 44namespace tcmalloc { 45 46void CentralFreeList::Init(size_t cl) { 47 size_class_ = cl; 48 tcmalloc::DLL_Init(&empty_); 49 tcmalloc::DLL_Init(&nonempty_); 50 num_spans_ = 0; 51 counter_ = 0; 52 53 max_cache_size_ = kMaxNumTransferEntries; 54#ifdef TCMALLOC_SMALL_BUT_SLOW 55 // Disable the transfer cache for the small footprint case. 56 cache_size_ = 0; 57#else 58 cache_size_ = 16; 59#endif 60 if (cl > 0) { 61 // Limit the maximum size of the cache based on the size class. If this 62 // is not done, large size class objects will consume a lot of memory if 63 // they just sit in the transfer cache. 64 int32_t bytes = Static::sizemap()->ByteSizeForClass(cl); 65 int32_t objs_to_move = Static::sizemap()->num_objects_to_move(cl); 66 67 ASSERT(objs_to_move > 0 && bytes > 0); 68 // Limit each size class cache to at most 1MB of objects or one entry, 69 // whichever is greater. Total transfer cache memory used across all 70 // size classes then can't be greater than approximately 71 // 1MB * kMaxNumTransferEntries. 72 // min and max are in parens to avoid macro-expansion on windows. 73 max_cache_size_ = (min)(max_cache_size_, 74 (max)(1, (1024 * 1024) / (bytes * objs_to_move))); 75 cache_size_ = (min)(cache_size_, max_cache_size_); 76 } 77 used_slots_ = 0; 78 ASSERT(cache_size_ <= max_cache_size_); 79} 80 81void CentralFreeList::ReleaseListToSpans(void* start) { 82 while (start) { 83 void *next = FL_Next(start); 84 ReleaseToSpans(start); 85 start = next; 86 } 87} 88 89// MapObjectToSpan should logically be part of ReleaseToSpans. But 90// this triggers an optimization bug in gcc 4.5.0. Moving to a 91// separate function, and making sure that function isn't inlined, 92// seems to fix the problem. It also should be fixed for gcc 4.5.1. 93static 94#if __GNUC__ == 4 && __GNUC_MINOR__ == 5 && __GNUC_PATCHLEVEL__ == 0 95__attribute__ ((noinline)) 96#endif 97Span* MapObjectToSpan(void* object) { 98 const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift; 99 Span* span = Static::pageheap()->GetDescriptor(p); 100 return span; 101} 102 103void CentralFreeList::ReleaseToSpans(void* object) { 104 Span* span = MapObjectToSpan(object); 105 ASSERT(span != NULL); 106 ASSERT(span->refcount > 0); 107 108 // If span is empty, move it to non-empty list 109 if (span->objects == NULL) { 110 tcmalloc::DLL_Remove(span); 111 tcmalloc::DLL_Prepend(&nonempty_, span); 112 Event(span, 'N', 0); 113 } 114 115 // The following check is expensive, so it is disabled by default 116 if (false) { 117 // Check that object does not occur in list 118 int got = 0; 119 for (void* p = span->objects; p != NULL; p = FL_Next(p)){ 120 ASSERT(p != object); 121 got++; 122 } 123 ASSERT(got + span->refcount == 124 (span->length<<kPageShift) / 125 Static::sizemap()->ByteSizeForClass(span->sizeclass)); 126 } 127 128 counter_++; 129 span->refcount--; 130 if (span->refcount == 0) { 131 Event(span, '#', 0); 132 counter_ -= ((span->length<<kPageShift) / 133 Static::sizemap()->ByteSizeForClass(span->sizeclass)); 134 tcmalloc::DLL_Remove(span); 135 --num_spans_; 136 137 // Release central list lock while operating on pageheap 138 lock_.Unlock(); 139 { 140 SpinLockHolder h(Static::pageheap_lock()); 141 Static::pageheap()->Delete(span); 142 } 143 lock_.Lock(); 144 } else { 145 FL_Push(&(span->objects), object); 146 } 147} 148 149bool CentralFreeList::EvictRandomSizeClass( 150 int locked_size_class, bool force) { 151 static int race_counter = 0; 152 int t = race_counter++; // Updated without a lock, but who cares. 153 if (t >= kNumClasses) { 154 while (t >= kNumClasses) { 155 t -= kNumClasses; 156 } 157 race_counter = t; 158 } 159 ASSERT(t >= 0); 160 ASSERT(t < kNumClasses); 161 if (t == locked_size_class) return false; 162 return Static::central_cache()[t].ShrinkCache(locked_size_class, force); 163} 164 165bool CentralFreeList::MakeCacheSpace() { 166 // Is there room in the cache? 167 if (used_slots_ < cache_size_) return true; 168 // Check if we can expand this cache? 169 if (cache_size_ == max_cache_size_) return false; 170 // Ok, we'll try to grab an entry from some other size class. 171 if (EvictRandomSizeClass(size_class_, false) || 172 EvictRandomSizeClass(size_class_, true)) { 173 // Succeeded in evicting, we're going to make our cache larger. 174 // However, we may have dropped and re-acquired the lock in 175 // EvictRandomSizeClass (via ShrinkCache and the LockInverter), so the 176 // cache_size may have changed. Therefore, check and verify that it is 177 // still OK to increase the cache_size. 178 if (cache_size_ < max_cache_size_) { 179 cache_size_++; 180 return true; 181 } 182 } 183 return false; 184} 185 186 187namespace { 188class LockInverter { 189 private: 190 SpinLock *held_, *temp_; 191 public: 192 inline explicit LockInverter(SpinLock* held, SpinLock *temp) 193 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); } 194 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); } 195}; 196} 197 198// This function is marked as NO_THREAD_SAFETY_ANALYSIS because it uses 199// LockInverter to release one lock and acquire another in scoped-lock 200// style, which our current annotation/analysis does not support. 201bool CentralFreeList::ShrinkCache(int locked_size_class, bool force) 202 NO_THREAD_SAFETY_ANALYSIS { 203 // Start with a quick check without taking a lock. 204 if (cache_size_ == 0) return false; 205 // We don't evict from a full cache unless we are 'forcing'. 206 if (force == false && used_slots_ == cache_size_) return false; 207 208 // Grab lock, but first release the other lock held by this thread. We use 209 // the lock inverter to ensure that we never hold two size class locks 210 // concurrently. That can create a deadlock because there is no well 211 // defined nesting order. 212 LockInverter li(&Static::central_cache()[locked_size_class].lock_, &lock_); 213 ASSERT(used_slots_ <= cache_size_); 214 ASSERT(0 <= cache_size_); 215 if (cache_size_ == 0) return false; 216 if (used_slots_ == cache_size_) { 217 if (force == false) return false; 218 // ReleaseListToSpans releases the lock, so we have to make all the 219 // updates to the central list before calling it. 220 cache_size_--; 221 used_slots_--; 222 ReleaseListToSpans(tc_slots_[used_slots_].head); 223 return true; 224 } 225 cache_size_--; 226 return true; 227} 228 229void CentralFreeList::InsertRange(void *start, void *end, int N) { 230 SpinLockHolder h(&lock_); 231 if (N == Static::sizemap()->num_objects_to_move(size_class_) && 232 MakeCacheSpace()) { 233 int slot = used_slots_++; 234 ASSERT(slot >=0); 235 ASSERT(slot < max_cache_size_); 236 TCEntry *entry = &tc_slots_[slot]; 237 entry->head = start; 238 entry->tail = end; 239 return; 240 } 241 ReleaseListToSpans(start); 242} 243 244int CentralFreeList::RemoveRange(void **start, void **end, int N) { 245 ASSERT(N > 0); 246 lock_.Lock(); 247 if (N == Static::sizemap()->num_objects_to_move(size_class_) && 248 used_slots_ > 0) { 249 int slot = --used_slots_; 250 ASSERT(slot >= 0); 251 TCEntry *entry = &tc_slots_[slot]; 252 *start = entry->head; 253 *end = entry->tail; 254 lock_.Unlock(); 255 return N; 256 } 257 258 int result = 0; 259 void* head = NULL; 260 void* tail = NULL; 261 // TODO: Prefetch multiple TCEntries? 262 tail = FetchFromSpansSafe(); 263 if (tail != NULL) { 264 FL_Push(&head, tail); 265 result = 1; 266 while (result < N) { 267 void *t = FetchFromSpans(); 268 if (!t) break; 269 FL_Push(&head, t); 270 result++; 271 } 272 } 273 lock_.Unlock(); 274 *start = head; 275 *end = tail; 276 return result; 277} 278 279 280void* CentralFreeList::FetchFromSpansSafe() { 281 void *t = FetchFromSpans(); 282 if (!t) { 283 Populate(); 284 t = FetchFromSpans(); 285 } 286 return t; 287} 288 289void* CentralFreeList::FetchFromSpans() { 290 if (tcmalloc::DLL_IsEmpty(&nonempty_)) return NULL; 291 Span* span = nonempty_.next; 292 293 ASSERT(span->objects != NULL); 294 span->refcount++; 295 void *result = FL_Pop(&(span->objects)); 296 if (span->objects == NULL) { 297 // Move to empty list 298 tcmalloc::DLL_Remove(span); 299 tcmalloc::DLL_Prepend(&empty_, span); 300 Event(span, 'E', 0); 301 } 302 counter_--; 303 return result; 304} 305 306// Fetch memory from the system and add to the central cache freelist. 307void CentralFreeList::Populate() { 308 // Release central list lock while operating on pageheap 309 lock_.Unlock(); 310 const size_t npages = Static::sizemap()->class_to_pages(size_class_); 311 312 Span* span; 313 { 314 SpinLockHolder h(Static::pageheap_lock()); 315 span = Static::pageheap()->New(npages); 316 if (span) Static::pageheap()->RegisterSizeClass(span, size_class_); 317 } 318 if (span == NULL) { 319 Log(kLog, __FILE__, __LINE__, 320 "tcmalloc: allocation failed", npages << kPageShift); 321 lock_.Lock(); 322 return; 323 } 324 ASSERT(span->length == npages); 325 // Cache sizeclass info eagerly. Locking is not necessary. 326 // (Instead of being eager, we could just replace any stale info 327 // about this span, but that seems to be no better in practice.) 328 for (int i = 0; i < npages; i++) { 329 Static::pageheap()->CacheSizeClass(span->start + i, size_class_); 330 } 331 332 // Split the block into pieces and add to the free-list 333 // TODO: coloring of objects to avoid cache conflicts? 334 void* list = NULL; 335 char* ptr = reinterpret_cast<char*>(span->start << kPageShift); 336 char* limit = ptr + (npages << kPageShift); 337 const size_t size = Static::sizemap()->ByteSizeForClass(size_class_); 338 int num = 0; 339 while (ptr + size <= limit) { 340 FL_Push(&list, ptr); 341 ptr += size; 342 num++; 343 } 344 ASSERT(ptr <= limit); 345 span->objects = list; 346 span->refcount = 0; // No sub-object in use yet 347 348 // Add span to list of non-empty spans 349 lock_.Lock(); 350 tcmalloc::DLL_Prepend(&nonempty_, span); 351 ++num_spans_; 352 counter_ += num; 353} 354 355int CentralFreeList::tc_length() { 356 SpinLockHolder h(&lock_); 357 return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_); 358} 359 360size_t CentralFreeList::OverheadBytes() { 361 SpinLockHolder h(&lock_); 362 if (size_class_ == 0) { // 0 holds the 0-sized allocations 363 return 0; 364 } 365 const size_t pages_per_span = Static::sizemap()->class_to_pages(size_class_); 366 const size_t object_size = Static::sizemap()->class_to_size(size_class_); 367 ASSERT(object_size > 0); 368 const size_t overhead_per_span = (pages_per_span * kPageSize) % object_size; 369 return num_spans_ * overhead_per_span; 370} 371 372} // namespace tcmalloc 373