space_test.h revision 624468cd401cc1ac0dd70c746301e0788a597759
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19
20#include "zygote_space.h"
21
22#include <stdint.h>
23
24#include "common_runtime_test.h"
25#include "globals.h"
26#include "UniquePtr.h"
27#include "mirror/array-inl.h"
28#include "mirror/object-inl.h"
29
30namespace art {
31namespace gc {
32namespace space {
33
34class SpaceTest : public CommonRuntimeTest {
35 public:
36  jobject byte_array_class_;
37
38  SpaceTest() : byte_array_class_(nullptr) {
39  }
40
41  void AddSpace(ContinuousSpace* space) {
42    // For RosAlloc, revoke the thread local runs before moving onto a
43    // new alloc space.
44    Runtime::Current()->GetHeap()->RevokeAllThreadLocalBuffers();
45    Runtime::Current()->GetHeap()->AddSpace(space);
46  }
47
48  mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
49    SirtRef<mirror::ClassLoader> null_loader(self, nullptr);
50    if (byte_array_class_ == nullptr) {
51      mirror::Class* byte_array_class =
52          Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
53      EXPECT_TRUE(byte_array_class != nullptr);
54      byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
55      EXPECT_TRUE(byte_array_class_ != nullptr);
56    }
57    return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
58  }
59
60  mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
61                        size_t* bytes_allocated, size_t* usable_size)
62      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
63    SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
64    mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
65    if (obj != nullptr) {
66      InstallClass(obj, byte_array_class.get(), bytes);
67    }
68    return obj;
69  }
70
71  mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
72                                  size_t* bytes_allocated, size_t* usable_size)
73      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
74    SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
75    mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
76    if (obj != nullptr) {
77      InstallClass(obj, byte_array_class.get(), bytes);
78    }
79    return obj;
80  }
81
82  void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
83      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
84    // Note the minimum size, which is the size of a zero-length byte array.
85    EXPECT_GE(size, SizeOfZeroLengthByteArray());
86    EXPECT_TRUE(byte_array_class != nullptr);
87    o->SetClass(byte_array_class);
88    if (kUseBrooksReadBarrier) {
89      o->SetReadBarrierPointer(o);
90    }
91    mirror::Array* arr = o->AsArray<kVerifyNone>();
92    size_t header_size = SizeOfZeroLengthByteArray();
93    int32_t length = size - header_size;
94    arr->SetLength(length);
95    EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
96  }
97
98  static size_t SizeOfZeroLengthByteArray() {
99    return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
100  }
101
102  typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
103                                        size_t capacity, byte* requested_begin);
104  void InitTestBody(CreateSpaceFn create_space);
105  void ZygoteSpaceTestBody(CreateSpaceFn create_space);
106  void AllocAndFreeTestBody(CreateSpaceFn create_space);
107  void AllocAndFreeListTestBody(CreateSpaceFn create_space);
108
109  void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
110                                           int round, size_t growth_limit);
111  void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
112};
113
114static inline size_t test_rand(size_t* seed) {
115  *seed = *seed * 1103515245 + 12345;
116  return *seed;
117}
118
119void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
120  {
121    // Init < max == growth
122    UniquePtr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
123    EXPECT_TRUE(space.get() != nullptr);
124  }
125  {
126    // Init == max == growth
127    UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
128    EXPECT_TRUE(space.get() != nullptr);
129  }
130  {
131    // Init > max == growth
132    UniquePtr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
133    EXPECT_TRUE(space.get() == nullptr);
134  }
135  {
136    // Growth == init < max
137    UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
138    EXPECT_TRUE(space.get() != nullptr);
139  }
140  {
141    // Growth < init < max
142    UniquePtr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
143    EXPECT_TRUE(space.get() == nullptr);
144  }
145  {
146    // Init < growth < max
147    UniquePtr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
148    EXPECT_TRUE(space.get() != nullptr);
149  }
150  {
151    // Init < max < growth
152    UniquePtr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
153    EXPECT_TRUE(space.get() == nullptr);
154  }
155}
156
157// TODO: This test is not very good, we should improve it.
158// The test should do more allocations before the creation of the ZygoteSpace, and then do
159// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
160// the GC works with the ZygoteSpace.
161void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
162  size_t dummy;
163  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
164  ASSERT_TRUE(space != nullptr);
165
166  // Make space findable to the heap, will also delete space when runtime is cleaned up
167  AddSpace(space);
168  Thread* self = Thread::Current();
169  ScopedObjectAccess soa(self);
170
171  // Succeeds, fits without adjusting the footprint limit.
172  size_t ptr1_bytes_allocated, ptr1_usable_size;
173  SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
174                                           &ptr1_usable_size));
175  EXPECT_TRUE(ptr1.get() != nullptr);
176  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
177  EXPECT_LE(1U * MB, ptr1_usable_size);
178  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
179
180  // Fails, requires a higher footprint limit.
181  mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
182  EXPECT_TRUE(ptr2 == nullptr);
183
184  // Succeeds, adjusts the footprint.
185  size_t ptr3_bytes_allocated, ptr3_usable_size;
186  SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
187                                                     &ptr3_usable_size));
188  EXPECT_TRUE(ptr3.get() != nullptr);
189  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
190  EXPECT_LE(8U * MB, ptr3_usable_size);
191  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
192
193  // Fails, requires a higher footprint limit.
194  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr);
195  EXPECT_TRUE(ptr4 == nullptr);
196
197  // Also fails, requires a higher allowed footprint.
198  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr);
199  EXPECT_TRUE(ptr5 == nullptr);
200
201  // Release some memory.
202  size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
203  EXPECT_EQ(free3, ptr3_bytes_allocated);
204  EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
205  EXPECT_LE(8U * MB, free3);
206
207  // Succeeds, now that memory has been freed.
208  size_t ptr6_bytes_allocated, ptr6_usable_size;
209  SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
210                                                     &ptr6_usable_size));
211  EXPECT_TRUE(ptr6.get() != nullptr);
212  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
213  EXPECT_LE(9U * MB, ptr6_usable_size);
214  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
215
216  // Final clean up.
217  size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
218  space->Free(self, ptr1.reset(nullptr));
219  EXPECT_LE(1U * MB, free1);
220
221  // Make sure that the zygote space isn't directly at the start of the space.
222  EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr) != nullptr);
223
224  gc::Heap* heap = Runtime::Current()->GetHeap();
225  space::Space* old_space = space;
226  heap->RemoveSpace(old_space);
227  space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space",
228                                                              heap->IsLowMemoryMode(),
229                                                              &space);
230  delete old_space;
231  // Add the zygote space.
232  AddSpace(zygote_space);
233
234  // Make space findable to the heap, will also delete space when runtime is cleaned up
235  AddSpace(space);
236
237  // Succeeds, fits without adjusting the footprint limit.
238  ptr1.reset(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
239  EXPECT_TRUE(ptr1.get() != nullptr);
240  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
241  EXPECT_LE(1U * MB, ptr1_usable_size);
242  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
243
244  // Fails, requires a higher footprint limit.
245  ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
246  EXPECT_TRUE(ptr2 == nullptr);
247
248  // Succeeds, adjusts the footprint.
249  ptr3.reset(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
250  EXPECT_TRUE(ptr3.get() != nullptr);
251  EXPECT_LE(2U * MB, ptr3_bytes_allocated);
252  EXPECT_LE(2U * MB, ptr3_usable_size);
253  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
254  space->Free(self, ptr3.reset(nullptr));
255
256  // Final clean up.
257  free1 = space->AllocationSize(ptr1.get(), nullptr);
258  space->Free(self, ptr1.reset(nullptr));
259  EXPECT_LE(1U * MB, free1);
260}
261
262void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
263  size_t dummy = 0;
264  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
265  ASSERT_TRUE(space != nullptr);
266  Thread* self = Thread::Current();
267  ScopedObjectAccess soa(self);
268
269  // Make space findable to the heap, will also delete space when runtime is cleaned up
270  AddSpace(space);
271
272  // Succeeds, fits without adjusting the footprint limit.
273  size_t ptr1_bytes_allocated, ptr1_usable_size;
274  SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
275                                           &ptr1_usable_size));
276  EXPECT_TRUE(ptr1.get() != nullptr);
277  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
278  EXPECT_LE(1U * MB, ptr1_usable_size);
279  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
280
281  // Fails, requires a higher footprint limit.
282  mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
283  EXPECT_TRUE(ptr2 == nullptr);
284
285  // Succeeds, adjusts the footprint.
286  size_t ptr3_bytes_allocated, ptr3_usable_size;
287  SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
288                                                     &ptr3_usable_size));
289  EXPECT_TRUE(ptr3.get() != nullptr);
290  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
291  EXPECT_LE(8U * MB, ptr3_usable_size);
292  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
293
294  // Fails, requires a higher footprint limit.
295  mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr);
296  EXPECT_TRUE(ptr4 == nullptr);
297
298  // Also fails, requires a higher allowed footprint.
299  mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr);
300  EXPECT_TRUE(ptr5 == nullptr);
301
302  // Release some memory.
303  size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
304  EXPECT_EQ(free3, ptr3_bytes_allocated);
305  space->Free(self, ptr3.reset(nullptr));
306  EXPECT_LE(8U * MB, free3);
307
308  // Succeeds, now that memory has been freed.
309  size_t ptr6_bytes_allocated, ptr6_usable_size;
310  SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
311                                                     &ptr6_usable_size));
312  EXPECT_TRUE(ptr6.get() != nullptr);
313  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
314  EXPECT_LE(9U * MB, ptr6_usable_size);
315  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
316
317  // Final clean up.
318  size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
319  space->Free(self, ptr1.reset(nullptr));
320  EXPECT_LE(1U * MB, free1);
321}
322
323void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
324  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
325  ASSERT_TRUE(space != nullptr);
326
327  // Make space findable to the heap, will also delete space when runtime is cleaned up
328  AddSpace(space);
329  Thread* self = Thread::Current();
330  ScopedObjectAccess soa(self);
331
332  // Succeeds, fits without adjusting the max allowed footprint.
333  mirror::Object* lots_of_objects[1024];
334  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
335    size_t allocation_size, usable_size;
336    size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
337    lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
338                               &usable_size);
339    EXPECT_TRUE(lots_of_objects[i] != nullptr);
340    SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
341    lots_of_objects[i] = obj.get();
342    size_t computed_usable_size;
343    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
344    EXPECT_EQ(usable_size, computed_usable_size);
345  }
346
347  // Release memory and check pointers are nullptr.
348  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
349  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
350    EXPECT_TRUE(lots_of_objects[i] == nullptr);
351  }
352
353  // Succeeds, fits by adjusting the max allowed footprint.
354  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
355    size_t allocation_size, usable_size;
356    lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
357    EXPECT_TRUE(lots_of_objects[i] != nullptr);
358    SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
359    lots_of_objects[i] = obj.get();
360    size_t computed_usable_size;
361    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
362    EXPECT_EQ(usable_size, computed_usable_size);
363  }
364
365  // Release memory and check pointers are nullptr
366  // TODO: This isn't compaction safe, fix.
367  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
368  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
369    EXPECT_TRUE(lots_of_objects[i] == nullptr);
370  }
371}
372
373void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
374                                                    int round, size_t growth_limit) {
375  if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
376      ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
377    // No allocation can succeed
378    return;
379  }
380
381  // The space's footprint equals amount of resources requested from system
382  size_t footprint = space->GetFootprint();
383
384  // The space must at least have its book keeping allocated
385  EXPECT_GT(footprint, 0u);
386
387  // But it shouldn't exceed the initial size
388  EXPECT_LE(footprint, growth_limit);
389
390  // space's size shouldn't exceed the initial size
391  EXPECT_LE(space->Size(), growth_limit);
392
393  // this invariant should always hold or else the space has grown to be larger than what the
394  // space believes its size is (which will break invariants)
395  EXPECT_GE(space->Size(), footprint);
396
397  // Fill the space with lots of small objects up to the growth limit
398  size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
399  UniquePtr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
400  size_t last_object = 0;  // last object for which allocation succeeded
401  size_t amount_allocated = 0;  // amount of space allocated
402  Thread* self = Thread::Current();
403  ScopedObjectAccess soa(self);
404  size_t rand_seed = 123456789;
405  for (size_t i = 0; i < max_objects; i++) {
406    size_t alloc_fails = 0;  // number of failed allocations
407    size_t max_fails = 30;  // number of times we fail allocation before giving up
408    for (; alloc_fails < max_fails; alloc_fails++) {
409      size_t alloc_size;
410      if (object_size > 0) {
411        alloc_size = object_size;
412      } else {
413        alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
414        // Note the minimum size, which is the size of a zero-length byte array.
415        size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
416        if (alloc_size < size_of_zero_length_byte_array) {
417          alloc_size = size_of_zero_length_byte_array;
418        }
419      }
420      SirtRef<mirror::Object> object(self, nullptr);
421      size_t bytes_allocated = 0;
422      if (round <= 1) {
423        object.reset(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
424      } else {
425        object.reset(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
426      }
427      footprint = space->GetFootprint();
428      EXPECT_GE(space->Size(), footprint);  // invariant
429      if (object.get() != nullptr) {  // allocation succeeded
430        lots_of_objects[i] = object.get();
431        size_t allocation_size = space->AllocationSize(object.get(), nullptr);
432        EXPECT_EQ(bytes_allocated, allocation_size);
433        if (object_size > 0) {
434          EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
435        } else {
436          EXPECT_GE(allocation_size, 8u);
437        }
438        amount_allocated += allocation_size;
439        break;
440      }
441    }
442    if (alloc_fails == max_fails) {
443      last_object = i;
444      break;
445    }
446  }
447  CHECK_NE(last_object, 0u);  // we should have filled the space
448  EXPECT_GT(amount_allocated, 0u);
449
450  // We shouldn't have gone past the growth_limit
451  EXPECT_LE(amount_allocated, growth_limit);
452  EXPECT_LE(footprint, growth_limit);
453  EXPECT_LE(space->Size(), growth_limit);
454
455  // footprint and size should agree with amount allocated
456  EXPECT_GE(footprint, amount_allocated);
457  EXPECT_GE(space->Size(), amount_allocated);
458
459  // Release storage in a semi-adhoc manner
460  size_t free_increment = 96;
461  while (true) {
462    {
463      ScopedThreadStateChange tsc(self, kNative);
464      // Give the space a haircut.
465      space->Trim();
466    }
467
468    // Bounds sanity
469    footprint = space->GetFootprint();
470    EXPECT_LE(amount_allocated, growth_limit);
471    EXPECT_GE(footprint, amount_allocated);
472    EXPECT_LE(footprint, growth_limit);
473    EXPECT_GE(space->Size(), amount_allocated);
474    EXPECT_LE(space->Size(), growth_limit);
475
476    if (free_increment == 0) {
477      break;
478    }
479
480    // Free some objects
481    for (size_t i = 0; i < last_object; i += free_increment) {
482      mirror::Object* object = lots_of_objects.get()[i];
483      if (object == nullptr) {
484        continue;
485      }
486      size_t allocation_size = space->AllocationSize(object, nullptr);
487      if (object_size > 0) {
488        EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
489      } else {
490        EXPECT_GE(allocation_size, 8u);
491      }
492      space->Free(self, object);
493      lots_of_objects.get()[i] = nullptr;
494      amount_allocated -= allocation_size;
495      footprint = space->GetFootprint();
496      EXPECT_GE(space->Size(), footprint);  // invariant
497    }
498
499    free_increment >>= 1;
500  }
501
502  // The space has become empty here before allocating a large object
503  // below. For RosAlloc, revoke thread-local runs, which are kept
504  // even when empty for a performance reason, so that they won't
505  // cause the following large object allocation to fail due to
506  // potential fragmentation. Note they are normally revoked at each
507  // GC (but no GC here.)
508  space->RevokeAllThreadLocalBuffers();
509
510  // All memory was released, try a large allocation to check freed memory is being coalesced
511  SirtRef<mirror::Object> large_object(self, nullptr);
512  size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
513  size_t bytes_allocated = 0;
514  if (round <= 1) {
515    large_object.reset(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
516  } else {
517    large_object.reset(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
518                                       nullptr));
519  }
520  EXPECT_TRUE(large_object.get() != nullptr);
521
522  // Sanity check footprint
523  footprint = space->GetFootprint();
524  EXPECT_LE(footprint, growth_limit);
525  EXPECT_GE(space->Size(), footprint);
526  EXPECT_LE(space->Size(), growth_limit);
527
528  // Clean up
529  space->Free(self, large_object.reset(nullptr));
530
531  // Sanity check footprint
532  footprint = space->GetFootprint();
533  EXPECT_LE(footprint, growth_limit);
534  EXPECT_GE(space->Size(), footprint);
535  EXPECT_LE(space->Size(), growth_limit);
536}
537
538void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
539  if (object_size < SizeOfZeroLengthByteArray()) {
540    // Too small for the object layout/model.
541    return;
542  }
543  size_t initial_size = 4 * MB;
544  size_t growth_limit = 8 * MB;
545  size_t capacity = 16 * MB;
546  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
547  ASSERT_TRUE(space != nullptr);
548
549  // Basic sanity
550  EXPECT_EQ(space->Capacity(), growth_limit);
551  EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
552
553  // Make space findable to the heap, will also delete space when runtime is cleaned up
554  AddSpace(space);
555
556  // In this round we don't allocate with growth and therefore can't grow past the initial size.
557  // This effectively makes the growth_limit the initial_size, so assert this.
558  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
559  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
560  // Remove growth limit
561  space->ClearGrowthLimit();
562  EXPECT_EQ(space->Capacity(), capacity);
563  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
564}
565
566#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
567  TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
568    SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
569  }
570
571#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
572  TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
573    SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
574  }
575
576#define TEST_SPACE_CREATE_FN_BASE(spaceName, spaceFn) \
577  class spaceName##BaseTest : public SpaceTest { \
578  }; \
579  \
580  TEST_F(spaceName##BaseTest, Init) { \
581    InitTestBody(spaceFn); \
582  } \
583  TEST_F(spaceName##BaseTest, ZygoteSpace) { \
584    ZygoteSpaceTestBody(spaceFn); \
585  } \
586  TEST_F(spaceName##BaseTest, AllocAndFree) { \
587    AllocAndFreeTestBody(spaceFn); \
588  } \
589  TEST_F(spaceName##BaseTest, AllocAndFreeList) { \
590    AllocAndFreeListTestBody(spaceFn); \
591  }
592
593#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
594  class spaceName##StaticTest : public SpaceTest { \
595  }; \
596  \
597  TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
598  TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
599  TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
600  TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
601  TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
602  TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
603  TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
604  TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
605  TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
606  TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
607  TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
608
609#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
610  class spaceName##RandomTest : public SpaceTest { \
611  }; \
612  \
613  TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
614  TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
615  TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
616  TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
617  TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
618  TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
619  TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
620  TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
621  TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
622  TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
623
624}  // namespace space
625}  // namespace gc
626}  // namespace art
627
628#endif  // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
629