1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_ 18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_ 19 20#include <stdint.h> 21#include <memory> 22 23#include "common_runtime_test.h" 24#include "globals.h" 25#include "mirror/array-inl.h" 26#include "mirror/class-inl.h" 27#include "mirror/class_loader.h" 28#include "mirror/object-inl.h" 29#include "scoped_thread_state_change-inl.h" 30#include "thread_list.h" 31#include "zygote_space.h" 32 33namespace art { 34namespace gc { 35namespace space { 36 37template <class Super> 38class SpaceTest : public Super { 39 public: 40 jobject byte_array_class_ = nullptr; 41 42 void AddSpace(ContinuousSpace* space, bool revoke = true) { 43 Heap* heap = Runtime::Current()->GetHeap(); 44 if (revoke) { 45 heap->RevokeAllThreadLocalBuffers(); 46 } 47 { 48 ScopedThreadStateChange sts(Thread::Current(), kSuspended); 49 ScopedSuspendAll ssa("Add image space"); 50 heap->AddSpace(space); 51 } 52 heap->SetSpaceAsDefault(space); 53 } 54 55 mirror::Class* GetByteArrayClass(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { 56 StackHandleScope<1> hs(self); 57 auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr)); 58 if (byte_array_class_ == nullptr) { 59 mirror::Class* byte_array_class = 60 Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader); 61 EXPECT_TRUE(byte_array_class != nullptr); 62 byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class); 63 EXPECT_TRUE(byte_array_class_ != nullptr); 64 } 65 return self->DecodeJObject(byte_array_class_)->AsClass(); 66 } 67 68 mirror::Object* Alloc(space::MallocSpace* alloc_space, 69 Thread* self, 70 size_t bytes, 71 size_t* bytes_allocated, 72 size_t* usable_size, 73 size_t* bytes_tl_bulk_allocated) 74 REQUIRES_SHARED(Locks::mutator_lock_) { 75 StackHandleScope<1> hs(self); 76 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self))); 77 mirror::Object* obj = alloc_space->Alloc(self, 78 bytes, 79 bytes_allocated, 80 usable_size, 81 bytes_tl_bulk_allocated); 82 if (obj != nullptr) { 83 InstallClass(obj, byte_array_class.Get(), bytes); 84 } 85 return obj; 86 } 87 88 mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, 89 Thread* self, 90 size_t bytes, 91 size_t* bytes_allocated, 92 size_t* usable_size, 93 size_t* bytes_tl_bulk_allocated) 94 REQUIRES_SHARED(Locks::mutator_lock_) { 95 StackHandleScope<1> hs(self); 96 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self))); 97 mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size, 98 bytes_tl_bulk_allocated); 99 if (obj != nullptr) { 100 InstallClass(obj, byte_array_class.Get(), bytes); 101 } 102 return obj; 103 } 104 105 void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size) 106 REQUIRES_SHARED(Locks::mutator_lock_) { 107 // Note the minimum size, which is the size of a zero-length byte array. 108 EXPECT_GE(size, SizeOfZeroLengthByteArray()); 109 EXPECT_TRUE(byte_array_class != nullptr); 110 o->SetClass(byte_array_class); 111 if (kUseBakerReadBarrier) { 112 // Like the proper heap object allocation, install and verify 113 // the correct read barrier state. 114 o->AssertReadBarrierState(); 115 } 116 mirror::Array* arr = o->AsArray<kVerifyNone>(); 117 size_t header_size = SizeOfZeroLengthByteArray(); 118 int32_t length = size - header_size; 119 arr->SetLength(length); 120 EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size); 121 } 122 123 static size_t SizeOfZeroLengthByteArray() { 124 return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value(); 125 } 126 127 typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit, 128 size_t capacity, uint8_t* requested_begin); 129 130 void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size, 131 int round, size_t growth_limit); 132 void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space); 133}; 134 135static inline size_t test_rand(size_t* seed) { 136 *seed = *seed * 1103515245 + 12345; 137 return *seed; 138} 139 140template <class Super> 141void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, 142 intptr_t object_size, 143 int round, 144 size_t growth_limit) { 145 if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) || 146 ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) { 147 // No allocation can succeed 148 return; 149 } 150 151 // The space's footprint equals amount of resources requested from system 152 size_t footprint = space->GetFootprint(); 153 154 // The space must at least have its book keeping allocated 155 EXPECT_GT(footprint, 0u); 156 157 // But it shouldn't exceed the initial size 158 EXPECT_LE(footprint, growth_limit); 159 160 // space's size shouldn't exceed the initial size 161 EXPECT_LE(space->Size(), growth_limit); 162 163 // this invariant should always hold or else the space has grown to be larger than what the 164 // space believes its size is (which will break invariants) 165 EXPECT_GE(space->Size(), footprint); 166 167 // Fill the space with lots of small objects up to the growth limit 168 size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1; 169 std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]); 170 size_t last_object = 0; // last object for which allocation succeeded 171 size_t amount_allocated = 0; // amount of space allocated 172 Thread* self = Thread::Current(); 173 ScopedObjectAccess soa(self); 174 size_t rand_seed = 123456789; 175 for (size_t i = 0; i < max_objects; i++) { 176 size_t alloc_fails = 0; // number of failed allocations 177 size_t max_fails = 30; // number of times we fail allocation before giving up 178 for (; alloc_fails < max_fails; alloc_fails++) { 179 size_t alloc_size; 180 if (object_size > 0) { 181 alloc_size = object_size; 182 } else { 183 alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size); 184 // Note the minimum size, which is the size of a zero-length byte array. 185 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray(); 186 if (alloc_size < size_of_zero_length_byte_array) { 187 alloc_size = size_of_zero_length_byte_array; 188 } 189 } 190 StackHandleScope<1> hs(soa.Self()); 191 auto object(hs.NewHandle<mirror::Object>(nullptr)); 192 size_t bytes_allocated = 0; 193 size_t bytes_tl_bulk_allocated; 194 if (round <= 1) { 195 object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr, 196 &bytes_tl_bulk_allocated)); 197 } else { 198 object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr, 199 &bytes_tl_bulk_allocated)); 200 } 201 footprint = space->GetFootprint(); 202 EXPECT_GE(space->Size(), footprint); // invariant 203 if (object != nullptr) { // allocation succeeded 204 lots_of_objects[i] = object.Get(); 205 size_t allocation_size = space->AllocationSize(object.Get(), nullptr); 206 EXPECT_EQ(bytes_allocated, allocation_size); 207 if (object_size > 0) { 208 EXPECT_GE(allocation_size, static_cast<size_t>(object_size)); 209 } else { 210 EXPECT_GE(allocation_size, 8u); 211 } 212 EXPECT_TRUE(bytes_tl_bulk_allocated == 0 || 213 bytes_tl_bulk_allocated >= allocation_size); 214 amount_allocated += allocation_size; 215 break; 216 } 217 } 218 if (alloc_fails == max_fails) { 219 last_object = i; 220 break; 221 } 222 } 223 CHECK_NE(last_object, 0u); // we should have filled the space 224 EXPECT_GT(amount_allocated, 0u); 225 226 // We shouldn't have gone past the growth_limit 227 EXPECT_LE(amount_allocated, growth_limit); 228 EXPECT_LE(footprint, growth_limit); 229 EXPECT_LE(space->Size(), growth_limit); 230 231 // footprint and size should agree with amount allocated 232 EXPECT_GE(footprint, amount_allocated); 233 EXPECT_GE(space->Size(), amount_allocated); 234 235 // Release storage in a semi-adhoc manner 236 size_t free_increment = 96; 237 while (true) { 238 { 239 ScopedThreadStateChange tsc(self, kNative); 240 // Give the space a haircut. 241 space->Trim(); 242 } 243 244 // Bounds sanity 245 footprint = space->GetFootprint(); 246 EXPECT_LE(amount_allocated, growth_limit); 247 EXPECT_GE(footprint, amount_allocated); 248 EXPECT_LE(footprint, growth_limit); 249 EXPECT_GE(space->Size(), amount_allocated); 250 EXPECT_LE(space->Size(), growth_limit); 251 252 if (free_increment == 0) { 253 break; 254 } 255 256 // Free some objects 257 for (size_t i = 0; i < last_object; i += free_increment) { 258 mirror::Object* object = lots_of_objects.get()[i]; 259 if (object == nullptr) { 260 continue; 261 } 262 size_t allocation_size = space->AllocationSize(object, nullptr); 263 if (object_size > 0) { 264 EXPECT_GE(allocation_size, static_cast<size_t>(object_size)); 265 } else { 266 EXPECT_GE(allocation_size, 8u); 267 } 268 space->Free(self, object); 269 lots_of_objects.get()[i] = nullptr; 270 amount_allocated -= allocation_size; 271 footprint = space->GetFootprint(); 272 EXPECT_GE(space->Size(), footprint); // invariant 273 } 274 275 free_increment >>= 1; 276 } 277 278 // The space has become empty here before allocating a large object 279 // below. For RosAlloc, revoke thread-local runs, which are kept 280 // even when empty for a performance reason, so that they won't 281 // cause the following large object allocation to fail due to 282 // potential fragmentation. Note they are normally revoked at each 283 // GC (but no GC here.) 284 space->RevokeAllThreadLocalBuffers(); 285 286 // All memory was released, try a large allocation to check freed memory is being coalesced 287 StackHandleScope<1> hs(soa.Self()); 288 auto large_object(hs.NewHandle<mirror::Object>(nullptr)); 289 size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4); 290 size_t bytes_allocated = 0; 291 size_t bytes_tl_bulk_allocated; 292 if (round <= 1) { 293 large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr, 294 &bytes_tl_bulk_allocated)); 295 } else { 296 large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated, 297 nullptr, &bytes_tl_bulk_allocated)); 298 } 299 EXPECT_TRUE(large_object != nullptr); 300 301 // Sanity check footprint 302 footprint = space->GetFootprint(); 303 EXPECT_LE(footprint, growth_limit); 304 EXPECT_GE(space->Size(), footprint); 305 EXPECT_LE(space->Size(), growth_limit); 306 307 // Clean up 308 space->Free(self, large_object.Assign(nullptr)); 309 310 // Sanity check footprint 311 footprint = space->GetFootprint(); 312 EXPECT_LE(footprint, growth_limit); 313 EXPECT_GE(space->Size(), footprint); 314 EXPECT_LE(space->Size(), growth_limit); 315} 316 317template <class Super> 318void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, 319 CreateSpaceFn create_space) { 320 if (object_size < SizeOfZeroLengthByteArray()) { 321 // Too small for the object layout/model. 322 return; 323 } 324 size_t initial_size = 4 * MB; 325 size_t growth_limit = 8 * MB; 326 size_t capacity = 16 * MB; 327 MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr)); 328 ASSERT_TRUE(space != nullptr); 329 330 // Basic sanity 331 EXPECT_EQ(space->Capacity(), growth_limit); 332 EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity); 333 334 // Make space findable to the heap, will also delete space when runtime is cleaned up 335 AddSpace(space); 336 337 // In this round we don't allocate with growth and therefore can't grow past the initial size. 338 // This effectively makes the growth_limit the initial_size, so assert this. 339 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size); 340 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit); 341 // Remove growth limit 342 space->ClearGrowthLimit(); 343 EXPECT_EQ(space->Capacity(), capacity); 344 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity); 345} 346 347#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \ 348 TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \ 349 SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \ 350 } 351 352#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \ 353 TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \ 354 SizeFootPrintGrowthLimitAndTrimDriver(-(size), spaceFn); \ 355 } 356 357#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \ 358 class spaceName##StaticTest : public SpaceTest<CommonRuntimeTest> { \ 359 }; \ 360 \ 361 TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \ 362 TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \ 363 TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \ 364 TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \ 365 TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \ 366 TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \ 367 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \ 368 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \ 369 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \ 370 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \ 371 TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB) 372 373#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \ 374 class spaceName##RandomTest : public SpaceTest<CommonRuntimeTest> { \ 375 }; \ 376 \ 377 TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \ 378 TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \ 379 TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \ 380 TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \ 381 TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \ 382 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \ 383 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \ 384 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \ 385 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \ 386 TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB) 387 388} // namespace space 389} // namespace gc 390} // namespace art 391 392#endif // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_ 393