space_test.h revision 28b1cf779b8c438b01b28a4adfeb22a4a8ebdb12
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_ 18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_ 19 20#include <stdint.h> 21#include <memory> 22 23#include "common_runtime_test.h" 24#include "globals.h" 25#include "mirror/array-inl.h" 26#include "mirror/class-inl.h" 27#include "mirror/class_loader.h" 28#include "mirror/object-inl.h" 29#include "scoped_thread_state_change.h" 30#include "zygote_space.h" 31 32namespace art { 33namespace gc { 34namespace space { 35 36template <class Super> 37class SpaceTest : public Super { 38 public: 39 jobject byte_array_class_ = nullptr; 40 41 void AddSpace(ContinuousSpace* space, bool revoke = true) { 42 Heap* heap = Runtime::Current()->GetHeap(); 43 if (revoke) { 44 heap->RevokeAllThreadLocalBuffers(); 45 } 46 heap->AddSpace(space); 47 heap->SetSpaceAsDefault(space); 48 } 49 50 mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { 51 StackHandleScope<1> hs(self); 52 auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr)); 53 if (byte_array_class_ == nullptr) { 54 mirror::Class* byte_array_class = 55 Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader); 56 EXPECT_TRUE(byte_array_class != nullptr); 57 byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class); 58 EXPECT_TRUE(byte_array_class_ != nullptr); 59 } 60 return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_)); 61 } 62 63 mirror::Object* Alloc(space::MallocSpace* alloc_space, 64 Thread* self, 65 size_t bytes, 66 size_t* bytes_allocated, 67 size_t* usable_size, 68 size_t* bytes_tl_bulk_allocated) 69 SHARED_REQUIRES(Locks::mutator_lock_) { 70 StackHandleScope<1> hs(self); 71 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self))); 72 mirror::Object* obj = alloc_space->Alloc(self, 73 bytes, 74 bytes_allocated, 75 usable_size, 76 bytes_tl_bulk_allocated); 77 if (obj != nullptr) { 78 InstallClass(obj, byte_array_class.Get(), bytes); 79 } 80 return obj; 81 } 82 83 mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, 84 Thread* self, 85 size_t bytes, 86 size_t* bytes_allocated, 87 size_t* usable_size, 88 size_t* bytes_tl_bulk_allocated) 89 SHARED_REQUIRES(Locks::mutator_lock_) { 90 StackHandleScope<1> hs(self); 91 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self))); 92 mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size, 93 bytes_tl_bulk_allocated); 94 if (obj != nullptr) { 95 InstallClass(obj, byte_array_class.Get(), bytes); 96 } 97 return obj; 98 } 99 100 void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size) 101 SHARED_REQUIRES(Locks::mutator_lock_) { 102 // Note the minimum size, which is the size of a zero-length byte array. 103 EXPECT_GE(size, SizeOfZeroLengthByteArray()); 104 EXPECT_TRUE(byte_array_class != nullptr); 105 o->SetClass(byte_array_class); 106 if (kUseBakerOrBrooksReadBarrier) { 107 // Like the proper heap object allocation, install and verify 108 // the correct read barrier pointer. 109 if (kUseBrooksReadBarrier) { 110 o->SetReadBarrierPointer(o); 111 } 112 o->AssertReadBarrierPointer(); 113 } 114 mirror::Array* arr = o->AsArray<kVerifyNone>(); 115 size_t header_size = SizeOfZeroLengthByteArray(); 116 int32_t length = size - header_size; 117 arr->SetLength(length); 118 EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size); 119 } 120 121 static size_t SizeOfZeroLengthByteArray() { 122 return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value(); 123 } 124 125 typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit, 126 size_t capacity, uint8_t* requested_begin); 127 128 void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size, 129 int round, size_t growth_limit); 130 void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space); 131}; 132 133static inline size_t test_rand(size_t* seed) { 134 *seed = *seed * 1103515245 + 12345; 135 return *seed; 136} 137 138template <class Super> 139void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, 140 intptr_t object_size, 141 int round, 142 size_t growth_limit) { 143 if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) || 144 ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) { 145 // No allocation can succeed 146 return; 147 } 148 149 // The space's footprint equals amount of resources requested from system 150 size_t footprint = space->GetFootprint(); 151 152 // The space must at least have its book keeping allocated 153 EXPECT_GT(footprint, 0u); 154 155 // But it shouldn't exceed the initial size 156 EXPECT_LE(footprint, growth_limit); 157 158 // space's size shouldn't exceed the initial size 159 EXPECT_LE(space->Size(), growth_limit); 160 161 // this invariant should always hold or else the space has grown to be larger than what the 162 // space believes its size is (which will break invariants) 163 EXPECT_GE(space->Size(), footprint); 164 165 // Fill the space with lots of small objects up to the growth limit 166 size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1; 167 std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]); 168 size_t last_object = 0; // last object for which allocation succeeded 169 size_t amount_allocated = 0; // amount of space allocated 170 Thread* self = Thread::Current(); 171 ScopedObjectAccess soa(self); 172 size_t rand_seed = 123456789; 173 for (size_t i = 0; i < max_objects; i++) { 174 size_t alloc_fails = 0; // number of failed allocations 175 size_t max_fails = 30; // number of times we fail allocation before giving up 176 for (; alloc_fails < max_fails; alloc_fails++) { 177 size_t alloc_size; 178 if (object_size > 0) { 179 alloc_size = object_size; 180 } else { 181 alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size); 182 // Note the minimum size, which is the size of a zero-length byte array. 183 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray(); 184 if (alloc_size < size_of_zero_length_byte_array) { 185 alloc_size = size_of_zero_length_byte_array; 186 } 187 } 188 StackHandleScope<1> hs(soa.Self()); 189 auto object(hs.NewHandle<mirror::Object>(nullptr)); 190 size_t bytes_allocated = 0; 191 size_t bytes_tl_bulk_allocated; 192 if (round <= 1) { 193 object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr, 194 &bytes_tl_bulk_allocated)); 195 } else { 196 object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr, 197 &bytes_tl_bulk_allocated)); 198 } 199 footprint = space->GetFootprint(); 200 EXPECT_GE(space->Size(), footprint); // invariant 201 if (object.Get() != nullptr) { // allocation succeeded 202 lots_of_objects[i] = object.Get(); 203 size_t allocation_size = space->AllocationSize(object.Get(), nullptr); 204 EXPECT_EQ(bytes_allocated, allocation_size); 205 if (object_size > 0) { 206 EXPECT_GE(allocation_size, static_cast<size_t>(object_size)); 207 } else { 208 EXPECT_GE(allocation_size, 8u); 209 } 210 EXPECT_TRUE(bytes_tl_bulk_allocated == 0 || 211 bytes_tl_bulk_allocated >= allocation_size); 212 amount_allocated += allocation_size; 213 break; 214 } 215 } 216 if (alloc_fails == max_fails) { 217 last_object = i; 218 break; 219 } 220 } 221 CHECK_NE(last_object, 0u); // we should have filled the space 222 EXPECT_GT(amount_allocated, 0u); 223 224 // We shouldn't have gone past the growth_limit 225 EXPECT_LE(amount_allocated, growth_limit); 226 EXPECT_LE(footprint, growth_limit); 227 EXPECT_LE(space->Size(), growth_limit); 228 229 // footprint and size should agree with amount allocated 230 EXPECT_GE(footprint, amount_allocated); 231 EXPECT_GE(space->Size(), amount_allocated); 232 233 // Release storage in a semi-adhoc manner 234 size_t free_increment = 96; 235 while (true) { 236 { 237 ScopedThreadStateChange tsc(self, kNative); 238 // Give the space a haircut. 239 space->Trim(); 240 } 241 242 // Bounds sanity 243 footprint = space->GetFootprint(); 244 EXPECT_LE(amount_allocated, growth_limit); 245 EXPECT_GE(footprint, amount_allocated); 246 EXPECT_LE(footprint, growth_limit); 247 EXPECT_GE(space->Size(), amount_allocated); 248 EXPECT_LE(space->Size(), growth_limit); 249 250 if (free_increment == 0) { 251 break; 252 } 253 254 // Free some objects 255 for (size_t i = 0; i < last_object; i += free_increment) { 256 mirror::Object* object = lots_of_objects.get()[i]; 257 if (object == nullptr) { 258 continue; 259 } 260 size_t allocation_size = space->AllocationSize(object, nullptr); 261 if (object_size > 0) { 262 EXPECT_GE(allocation_size, static_cast<size_t>(object_size)); 263 } else { 264 EXPECT_GE(allocation_size, 8u); 265 } 266 space->Free(self, object); 267 lots_of_objects.get()[i] = nullptr; 268 amount_allocated -= allocation_size; 269 footprint = space->GetFootprint(); 270 EXPECT_GE(space->Size(), footprint); // invariant 271 } 272 273 free_increment >>= 1; 274 } 275 276 // The space has become empty here before allocating a large object 277 // below. For RosAlloc, revoke thread-local runs, which are kept 278 // even when empty for a performance reason, so that they won't 279 // cause the following large object allocation to fail due to 280 // potential fragmentation. Note they are normally revoked at each 281 // GC (but no GC here.) 282 space->RevokeAllThreadLocalBuffers(); 283 284 // All memory was released, try a large allocation to check freed memory is being coalesced 285 StackHandleScope<1> hs(soa.Self()); 286 auto large_object(hs.NewHandle<mirror::Object>(nullptr)); 287 size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4); 288 size_t bytes_allocated = 0; 289 size_t bytes_tl_bulk_allocated; 290 if (round <= 1) { 291 large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr, 292 &bytes_tl_bulk_allocated)); 293 } else { 294 large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated, 295 nullptr, &bytes_tl_bulk_allocated)); 296 } 297 EXPECT_TRUE(large_object.Get() != nullptr); 298 299 // Sanity check footprint 300 footprint = space->GetFootprint(); 301 EXPECT_LE(footprint, growth_limit); 302 EXPECT_GE(space->Size(), footprint); 303 EXPECT_LE(space->Size(), growth_limit); 304 305 // Clean up 306 space->Free(self, large_object.Assign(nullptr)); 307 308 // Sanity check footprint 309 footprint = space->GetFootprint(); 310 EXPECT_LE(footprint, growth_limit); 311 EXPECT_GE(space->Size(), footprint); 312 EXPECT_LE(space->Size(), growth_limit); 313} 314 315template <class Super> 316void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, 317 CreateSpaceFn create_space) { 318 if (object_size < SizeOfZeroLengthByteArray()) { 319 // Too small for the object layout/model. 320 return; 321 } 322 size_t initial_size = 4 * MB; 323 size_t growth_limit = 8 * MB; 324 size_t capacity = 16 * MB; 325 MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr)); 326 ASSERT_TRUE(space != nullptr); 327 328 // Basic sanity 329 EXPECT_EQ(space->Capacity(), growth_limit); 330 EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity); 331 332 // Make space findable to the heap, will also delete space when runtime is cleaned up 333 AddSpace(space); 334 335 // In this round we don't allocate with growth and therefore can't grow past the initial size. 336 // This effectively makes the growth_limit the initial_size, so assert this. 337 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size); 338 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit); 339 // Remove growth limit 340 space->ClearGrowthLimit(); 341 EXPECT_EQ(space->Capacity(), capacity); 342 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity); 343} 344 345#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \ 346 TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \ 347 SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \ 348 } 349 350#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \ 351 TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \ 352 SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \ 353 } 354 355#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \ 356 class spaceName##StaticTest : public SpaceTest<CommonRuntimeTest> { \ 357 }; \ 358 \ 359 TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \ 360 TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \ 361 TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \ 362 TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \ 363 TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \ 364 TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \ 365 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \ 366 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \ 367 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \ 368 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \ 369 TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB) 370 371#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \ 372 class spaceName##RandomTest : public SpaceTest<CommonRuntimeTest> { \ 373 }; \ 374 \ 375 TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \ 376 TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \ 377 TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \ 378 TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \ 379 TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \ 380 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \ 381 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \ 382 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \ 383 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \ 384 TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB) 385 386} // namespace space 387} // namespace gc 388} // namespace art 389 390#endif // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_ 391