space_test.h revision 369810a98e6394b6dd162f5349e38a1f597b3bc7
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19
20#include <stdint.h>
21#include <memory>
22
23#include "common_runtime_test.h"
24#include "globals.h"
25#include "mirror/array-inl.h"
26#include "mirror/object-inl.h"
27#include "scoped_thread_state_change.h"
28#include "zygote_space.h"
29
30namespace art {
31namespace gc {
32namespace space {
33
34class SpaceTest : public CommonRuntimeTest {
35 public:
36  jobject byte_array_class_;
37
38  SpaceTest() : byte_array_class_(nullptr) {
39  }
40
41  void AddSpace(ContinuousSpace* space, bool revoke = true) {
42    Heap* heap = Runtime::Current()->GetHeap();
43    if (revoke) {
44      heap->RevokeAllThreadLocalBuffers();
45    }
46    heap->AddSpace(space);
47    heap->SetSpaceAsDefault(space);
48  }
49
50  mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
51    StackHandleScope<1> hs(self);
52    auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
53    if (byte_array_class_ == nullptr) {
54      mirror::Class* byte_array_class =
55          Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
56      EXPECT_TRUE(byte_array_class != nullptr);
57      byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
58      EXPECT_TRUE(byte_array_class_ != nullptr);
59    }
60    return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
61  }
62
63  mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
64                        size_t* bytes_allocated, size_t* usable_size)
65      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
66    StackHandleScope<1> hs(self);
67    Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
68    mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
69    if (obj != nullptr) {
70      InstallClass(obj, byte_array_class.Get(), bytes);
71    }
72    return obj;
73  }
74
75  mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
76                                  size_t* bytes_allocated, size_t* usable_size)
77      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
78    StackHandleScope<1> hs(self);
79    Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
80    mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
81    if (obj != nullptr) {
82      InstallClass(obj, byte_array_class.Get(), bytes);
83    }
84    return obj;
85  }
86
87  void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
88      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
89    // Note the minimum size, which is the size of a zero-length byte array.
90    EXPECT_GE(size, SizeOfZeroLengthByteArray());
91    EXPECT_TRUE(byte_array_class != nullptr);
92    o->SetClass(byte_array_class);
93    if (kUseBakerOrBrooksReadBarrier) {
94      // Like the proper heap object allocation, install and verify
95      // the correct read barrier pointer.
96      if (kUseBrooksReadBarrier) {
97        o->SetReadBarrierPointer(o);
98      }
99      o->AssertReadBarrierPointer();
100    }
101    mirror::Array* arr = o->AsArray<kVerifyNone>();
102    size_t header_size = SizeOfZeroLengthByteArray();
103    int32_t length = size - header_size;
104    arr->SetLength(length);
105    EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
106  }
107
108  static size_t SizeOfZeroLengthByteArray() {
109    return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
110  }
111
112  typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
113                                        size_t capacity, uint8_t* requested_begin);
114  void InitTestBody(CreateSpaceFn create_space);
115  void ZygoteSpaceTestBody(CreateSpaceFn create_space);
116  void AllocAndFreeTestBody(CreateSpaceFn create_space);
117  void AllocAndFreeListTestBody(CreateSpaceFn create_space);
118
119  void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
120                                           int round, size_t growth_limit);
121  void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
122};
123
124static inline size_t test_rand(size_t* seed) {
125  *seed = *seed * 1103515245 + 12345;
126  return *seed;
127}
128
129void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
130  // This will lead to error messages in the log.
131  ScopedLogSeverity sls(LogSeverity::FATAL);
132
133  {
134    // Init < max == growth
135    std::unique_ptr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
136    EXPECT_TRUE(space.get() != nullptr);
137  }
138  {
139    // Init == max == growth
140    std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
141    EXPECT_TRUE(space.get() != nullptr);
142  }
143  {
144    // Init > max == growth
145    std::unique_ptr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
146    EXPECT_TRUE(space.get() == nullptr);
147  }
148  {
149    // Growth == init < max
150    std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
151    EXPECT_TRUE(space.get() != nullptr);
152  }
153  {
154    // Growth < init < max
155    std::unique_ptr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
156    EXPECT_TRUE(space.get() == nullptr);
157  }
158  {
159    // Init < growth < max
160    std::unique_ptr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
161    EXPECT_TRUE(space.get() != nullptr);
162  }
163  {
164    // Init < max < growth
165    std::unique_ptr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
166    EXPECT_TRUE(space.get() == nullptr);
167  }
168}
169
170// TODO: This test is not very good, we should improve it.
171// The test should do more allocations before the creation of the ZygoteSpace, and then do
172// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
173// the GC works with the ZygoteSpace.
174void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
175  size_t dummy;
176  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
177  ASSERT_TRUE(space != nullptr);
178
179  // Make space findable to the heap, will also delete space when runtime is cleaned up
180  AddSpace(space);
181  Thread* self = Thread::Current();
182  ScopedObjectAccess soa(self);
183
184  // Succeeds, fits without adjusting the footprint limit.
185  size_t ptr1_bytes_allocated, ptr1_usable_size;
186  StackHandleScope<3> hs(soa.Self());
187  MutableHandle<mirror::Object> ptr1(
188      hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
189  EXPECT_TRUE(ptr1.Get() != nullptr);
190  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
191  EXPECT_LE(1U * MB, ptr1_usable_size);
192  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
193
194  // Fails, requires a higher footprint limit.
195  mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
196  EXPECT_TRUE(ptr2 == nullptr);
197
198  // Succeeds, adjusts the footprint.
199  size_t ptr3_bytes_allocated, ptr3_usable_size;
200  MutableHandle<mirror::Object> ptr3(
201      hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
202  EXPECT_TRUE(ptr3.Get() != nullptr);
203  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
204  EXPECT_LE(8U * MB, ptr3_usable_size);
205  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
206
207  // Fails, requires a higher footprint limit.
208  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr);
209  EXPECT_TRUE(ptr4 == nullptr);
210
211  // Also fails, requires a higher allowed footprint.
212  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr);
213  EXPECT_TRUE(ptr5 == nullptr);
214
215  // Release some memory.
216  size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
217  EXPECT_EQ(free3, ptr3_bytes_allocated);
218  EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
219  EXPECT_LE(8U * MB, free3);
220
221  // Succeeds, now that memory has been freed.
222  size_t ptr6_bytes_allocated, ptr6_usable_size;
223  Handle<mirror::Object> ptr6(
224      hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
225  EXPECT_TRUE(ptr6.Get() != nullptr);
226  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
227  EXPECT_LE(9U * MB, ptr6_usable_size);
228  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
229
230  // Final clean up.
231  size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
232  space->Free(self, ptr1.Assign(nullptr));
233  EXPECT_LE(1U * MB, free1);
234
235  // Make sure that the zygote space isn't directly at the start of the space.
236  EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr) != nullptr);
237
238  gc::Heap* heap = Runtime::Current()->GetHeap();
239  space::Space* old_space = space;
240  heap->RemoveSpace(old_space);
241  heap->RevokeAllThreadLocalBuffers();
242  space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space",
243                                                              heap->IsLowMemoryMode(),
244                                                              &space);
245  delete old_space;
246  // Add the zygote space.
247  AddSpace(zygote_space, false);
248
249  // Make space findable to the heap, will also delete space when runtime is cleaned up
250  AddSpace(space, false);
251
252  // Succeeds, fits without adjusting the footprint limit.
253  ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
254  EXPECT_TRUE(ptr1.Get() != nullptr);
255  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
256  EXPECT_LE(1U * MB, ptr1_usable_size);
257  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
258
259  // Fails, requires a higher footprint limit.
260  ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
261  EXPECT_TRUE(ptr2 == nullptr);
262
263  // Succeeds, adjusts the footprint.
264  ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
265  EXPECT_TRUE(ptr3.Get() != nullptr);
266  EXPECT_LE(2U * MB, ptr3_bytes_allocated);
267  EXPECT_LE(2U * MB, ptr3_usable_size);
268  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
269  space->Free(self, ptr3.Assign(nullptr));
270
271  // Final clean up.
272  free1 = space->AllocationSize(ptr1.Get(), nullptr);
273  space->Free(self, ptr1.Assign(nullptr));
274  EXPECT_LE(1U * MB, free1);
275}
276
277void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
278  size_t dummy = 0;
279  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
280  ASSERT_TRUE(space != nullptr);
281  Thread* self = Thread::Current();
282  ScopedObjectAccess soa(self);
283
284  // Make space findable to the heap, will also delete space when runtime is cleaned up
285  AddSpace(space);
286
287  // Succeeds, fits without adjusting the footprint limit.
288  size_t ptr1_bytes_allocated, ptr1_usable_size;
289  StackHandleScope<3> hs(soa.Self());
290  MutableHandle<mirror::Object> ptr1(
291      hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
292  EXPECT_TRUE(ptr1.Get() != nullptr);
293  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
294  EXPECT_LE(1U * MB, ptr1_usable_size);
295  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
296
297  // Fails, requires a higher footprint limit.
298  mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
299  EXPECT_TRUE(ptr2 == nullptr);
300
301  // Succeeds, adjusts the footprint.
302  size_t ptr3_bytes_allocated, ptr3_usable_size;
303  MutableHandle<mirror::Object> ptr3(
304      hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
305  EXPECT_TRUE(ptr3.Get() != nullptr);
306  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
307  EXPECT_LE(8U * MB, ptr3_usable_size);
308  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
309
310  // Fails, requires a higher footprint limit.
311  mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr);
312  EXPECT_TRUE(ptr4 == nullptr);
313
314  // Also fails, requires a higher allowed footprint.
315  mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr);
316  EXPECT_TRUE(ptr5 == nullptr);
317
318  // Release some memory.
319  size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
320  EXPECT_EQ(free3, ptr3_bytes_allocated);
321  space->Free(self, ptr3.Assign(nullptr));
322  EXPECT_LE(8U * MB, free3);
323
324  // Succeeds, now that memory has been freed.
325  size_t ptr6_bytes_allocated, ptr6_usable_size;
326  Handle<mirror::Object> ptr6(
327      hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
328  EXPECT_TRUE(ptr6.Get() != nullptr);
329  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
330  EXPECT_LE(9U * MB, ptr6_usable_size);
331  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
332
333  // Final clean up.
334  size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
335  space->Free(self, ptr1.Assign(nullptr));
336  EXPECT_LE(1U * MB, free1);
337}
338
339void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
340  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
341  ASSERT_TRUE(space != nullptr);
342
343  // Make space findable to the heap, will also delete space when runtime is cleaned up
344  AddSpace(space);
345  Thread* self = Thread::Current();
346  ScopedObjectAccess soa(self);
347
348  // Succeeds, fits without adjusting the max allowed footprint.
349  mirror::Object* lots_of_objects[1024];
350  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
351    size_t allocation_size, usable_size;
352    size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
353    lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
354                               &usable_size);
355    EXPECT_TRUE(lots_of_objects[i] != nullptr);
356    size_t computed_usable_size;
357    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
358    EXPECT_EQ(usable_size, computed_usable_size);
359  }
360
361  // Release memory.
362  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
363
364  // Succeeds, fits by adjusting the max allowed footprint.
365  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
366    size_t allocation_size, usable_size;
367    lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
368    EXPECT_TRUE(lots_of_objects[i] != nullptr);
369    size_t computed_usable_size;
370    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
371    EXPECT_EQ(usable_size, computed_usable_size);
372  }
373
374  // Release memory.
375  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
376}
377
378void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
379                                                    int round, size_t growth_limit) {
380  if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
381      ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
382    // No allocation can succeed
383    return;
384  }
385
386  // The space's footprint equals amount of resources requested from system
387  size_t footprint = space->GetFootprint();
388
389  // The space must at least have its book keeping allocated
390  EXPECT_GT(footprint, 0u);
391
392  // But it shouldn't exceed the initial size
393  EXPECT_LE(footprint, growth_limit);
394
395  // space's size shouldn't exceed the initial size
396  EXPECT_LE(space->Size(), growth_limit);
397
398  // this invariant should always hold or else the space has grown to be larger than what the
399  // space believes its size is (which will break invariants)
400  EXPECT_GE(space->Size(), footprint);
401
402  // Fill the space with lots of small objects up to the growth limit
403  size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
404  std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
405  size_t last_object = 0;  // last object for which allocation succeeded
406  size_t amount_allocated = 0;  // amount of space allocated
407  Thread* self = Thread::Current();
408  ScopedObjectAccess soa(self);
409  size_t rand_seed = 123456789;
410  for (size_t i = 0; i < max_objects; i++) {
411    size_t alloc_fails = 0;  // number of failed allocations
412    size_t max_fails = 30;  // number of times we fail allocation before giving up
413    for (; alloc_fails < max_fails; alloc_fails++) {
414      size_t alloc_size;
415      if (object_size > 0) {
416        alloc_size = object_size;
417      } else {
418        alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
419        // Note the minimum size, which is the size of a zero-length byte array.
420        size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
421        if (alloc_size < size_of_zero_length_byte_array) {
422          alloc_size = size_of_zero_length_byte_array;
423        }
424      }
425      StackHandleScope<1> hs(soa.Self());
426      auto object(hs.NewHandle<mirror::Object>(nullptr));
427      size_t bytes_allocated = 0;
428      if (round <= 1) {
429        object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
430      } else {
431        object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
432      }
433      footprint = space->GetFootprint();
434      EXPECT_GE(space->Size(), footprint);  // invariant
435      if (object.Get() != nullptr) {  // allocation succeeded
436        lots_of_objects[i] = object.Get();
437        size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
438        EXPECT_EQ(bytes_allocated, allocation_size);
439        if (object_size > 0) {
440          EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
441        } else {
442          EXPECT_GE(allocation_size, 8u);
443        }
444        amount_allocated += allocation_size;
445        break;
446      }
447    }
448    if (alloc_fails == max_fails) {
449      last_object = i;
450      break;
451    }
452  }
453  CHECK_NE(last_object, 0u);  // we should have filled the space
454  EXPECT_GT(amount_allocated, 0u);
455
456  // We shouldn't have gone past the growth_limit
457  EXPECT_LE(amount_allocated, growth_limit);
458  EXPECT_LE(footprint, growth_limit);
459  EXPECT_LE(space->Size(), growth_limit);
460
461  // footprint and size should agree with amount allocated
462  EXPECT_GE(footprint, amount_allocated);
463  EXPECT_GE(space->Size(), amount_allocated);
464
465  // Release storage in a semi-adhoc manner
466  size_t free_increment = 96;
467  while (true) {
468    {
469      ScopedThreadStateChange tsc(self, kNative);
470      // Give the space a haircut.
471      space->Trim();
472    }
473
474    // Bounds sanity
475    footprint = space->GetFootprint();
476    EXPECT_LE(amount_allocated, growth_limit);
477    EXPECT_GE(footprint, amount_allocated);
478    EXPECT_LE(footprint, growth_limit);
479    EXPECT_GE(space->Size(), amount_allocated);
480    EXPECT_LE(space->Size(), growth_limit);
481
482    if (free_increment == 0) {
483      break;
484    }
485
486    // Free some objects
487    for (size_t i = 0; i < last_object; i += free_increment) {
488      mirror::Object* object = lots_of_objects.get()[i];
489      if (object == nullptr) {
490        continue;
491      }
492      size_t allocation_size = space->AllocationSize(object, nullptr);
493      if (object_size > 0) {
494        EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
495      } else {
496        EXPECT_GE(allocation_size, 8u);
497      }
498      space->Free(self, object);
499      lots_of_objects.get()[i] = nullptr;
500      amount_allocated -= allocation_size;
501      footprint = space->GetFootprint();
502      EXPECT_GE(space->Size(), footprint);  // invariant
503    }
504
505    free_increment >>= 1;
506  }
507
508  // The space has become empty here before allocating a large object
509  // below. For RosAlloc, revoke thread-local runs, which are kept
510  // even when empty for a performance reason, so that they won't
511  // cause the following large object allocation to fail due to
512  // potential fragmentation. Note they are normally revoked at each
513  // GC (but no GC here.)
514  space->RevokeAllThreadLocalBuffers();
515
516  // All memory was released, try a large allocation to check freed memory is being coalesced
517  StackHandleScope<1> hs(soa.Self());
518  auto large_object(hs.NewHandle<mirror::Object>(nullptr));
519  size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
520  size_t bytes_allocated = 0;
521  if (round <= 1) {
522    large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
523  } else {
524    large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
525                                        nullptr));
526  }
527  EXPECT_TRUE(large_object.Get() != nullptr);
528
529  // Sanity check footprint
530  footprint = space->GetFootprint();
531  EXPECT_LE(footprint, growth_limit);
532  EXPECT_GE(space->Size(), footprint);
533  EXPECT_LE(space->Size(), growth_limit);
534
535  // Clean up
536  space->Free(self, large_object.Assign(nullptr));
537
538  // Sanity check footprint
539  footprint = space->GetFootprint();
540  EXPECT_LE(footprint, growth_limit);
541  EXPECT_GE(space->Size(), footprint);
542  EXPECT_LE(space->Size(), growth_limit);
543}
544
545void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
546  if (object_size < SizeOfZeroLengthByteArray()) {
547    // Too small for the object layout/model.
548    return;
549  }
550  size_t initial_size = 4 * MB;
551  size_t growth_limit = 8 * MB;
552  size_t capacity = 16 * MB;
553  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
554  ASSERT_TRUE(space != nullptr);
555
556  // Basic sanity
557  EXPECT_EQ(space->Capacity(), growth_limit);
558  EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
559
560  // Make space findable to the heap, will also delete space when runtime is cleaned up
561  AddSpace(space);
562
563  // In this round we don't allocate with growth and therefore can't grow past the initial size.
564  // This effectively makes the growth_limit the initial_size, so assert this.
565  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
566  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
567  // Remove growth limit
568  space->ClearGrowthLimit();
569  EXPECT_EQ(space->Capacity(), capacity);
570  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
571}
572
573#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
574  TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
575    SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
576  }
577
578#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
579  TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
580    SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
581  }
582
583#define TEST_SPACE_CREATE_FN_BASE(spaceName, spaceFn) \
584  class spaceName##BaseTest : public SpaceTest { \
585  }; \
586  \
587  TEST_F(spaceName##BaseTest, Init) { \
588    InitTestBody(spaceFn); \
589  } \
590  TEST_F(spaceName##BaseTest, ZygoteSpace) { \
591    ZygoteSpaceTestBody(spaceFn); \
592  } \
593  TEST_F(spaceName##BaseTest, AllocAndFree) { \
594    AllocAndFreeTestBody(spaceFn); \
595  } \
596  TEST_F(spaceName##BaseTest, AllocAndFreeList) { \
597    AllocAndFreeListTestBody(spaceFn); \
598  }
599
600#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
601  class spaceName##StaticTest : public SpaceTest { \
602  }; \
603  \
604  TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
605  TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
606  TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
607  TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
608  TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
609  TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
610  TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
611  TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
612  TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
613  TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
614  TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
615
616#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
617  class spaceName##RandomTest : public SpaceTest { \
618  }; \
619  \
620  TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
621  TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
622  TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
623  TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
624  TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
625  TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
626  TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
627  TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
628  TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
629  TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
630
631}  // namespace space
632}  // namespace gc
633}  // namespace art
634
635#endif  // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
636