space_test.h revision 4460a84be92b5a94ecfb5c650aef4945ab849c93
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19
20#include <stdint.h>
21#include <memory>
22
23#include "common_runtime_test.h"
24#include "globals.h"
25#include "mirror/array-inl.h"
26#include "mirror/object-inl.h"
27#include "scoped_thread_state_change.h"
28#include "zygote_space.h"
29
30namespace art {
31namespace gc {
32namespace space {
33
34class SpaceTest : public CommonRuntimeTest {
35 public:
36  jobject byte_array_class_;
37
38  SpaceTest() : byte_array_class_(nullptr) {
39  }
40
41  void AddSpace(ContinuousSpace* space, bool revoke = true) {
42    Heap* heap = Runtime::Current()->GetHeap();
43    if (revoke) {
44      heap->RevokeAllThreadLocalBuffers();
45    }
46    heap->AddSpace(space);
47    heap->SetSpaceAsDefault(space);
48  }
49
50  mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
51    StackHandleScope<1> hs(self);
52    auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
53    if (byte_array_class_ == nullptr) {
54      mirror::Class* byte_array_class =
55          Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
56      EXPECT_TRUE(byte_array_class != nullptr);
57      byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
58      EXPECT_TRUE(byte_array_class_ != nullptr);
59    }
60    return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
61  }
62
63  mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
64                        size_t* bytes_allocated, size_t* usable_size,
65                        size_t* bytes_tl_bulk_allocated)
66      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
67    StackHandleScope<1> hs(self);
68    Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
69    mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size,
70                                             bytes_tl_bulk_allocated);
71    if (obj != nullptr) {
72      InstallClass(obj, byte_array_class.Get(), bytes);
73    }
74    return obj;
75  }
76
77  mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
78                                  size_t* bytes_allocated, size_t* usable_size,
79                                  size_t* bytes_tl_bulk_allocated)
80      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
81    StackHandleScope<1> hs(self);
82    Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
83    mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
84                                                       bytes_tl_bulk_allocated);
85    if (obj != nullptr) {
86      InstallClass(obj, byte_array_class.Get(), bytes);
87    }
88    return obj;
89  }
90
91  void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
92      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
93    // Note the minimum size, which is the size of a zero-length byte array.
94    EXPECT_GE(size, SizeOfZeroLengthByteArray());
95    EXPECT_TRUE(byte_array_class != nullptr);
96    o->SetClass(byte_array_class);
97    if (kUseBakerOrBrooksReadBarrier) {
98      // Like the proper heap object allocation, install and verify
99      // the correct read barrier pointer.
100      if (kUseBrooksReadBarrier) {
101        o->SetReadBarrierPointer(o);
102      }
103      o->AssertReadBarrierPointer();
104    }
105    mirror::Array* arr = o->AsArray<kVerifyNone>();
106    size_t header_size = SizeOfZeroLengthByteArray();
107    int32_t length = size - header_size;
108    arr->SetLength(length);
109    EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
110  }
111
112  static size_t SizeOfZeroLengthByteArray() {
113    return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
114  }
115
116  typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
117                                        size_t capacity, uint8_t* requested_begin);
118  void InitTestBody(CreateSpaceFn create_space);
119  void ZygoteSpaceTestBody(CreateSpaceFn create_space);
120  void AllocAndFreeTestBody(CreateSpaceFn create_space);
121  void AllocAndFreeListTestBody(CreateSpaceFn create_space);
122
123  void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
124                                           int round, size_t growth_limit);
125  void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
126};
127
128static inline size_t test_rand(size_t* seed) {
129  *seed = *seed * 1103515245 + 12345;
130  return *seed;
131}
132
133void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
134  // This will lead to error messages in the log.
135  ScopedLogSeverity sls(LogSeverity::FATAL);
136
137  {
138    // Init < max == growth
139    std::unique_ptr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
140    EXPECT_TRUE(space.get() != nullptr);
141  }
142  {
143    // Init == max == growth
144    std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
145    EXPECT_TRUE(space.get() != nullptr);
146  }
147  {
148    // Init > max == growth
149    std::unique_ptr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
150    EXPECT_TRUE(space.get() == nullptr);
151  }
152  {
153    // Growth == init < max
154    std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
155    EXPECT_TRUE(space.get() != nullptr);
156  }
157  {
158    // Growth < init < max
159    std::unique_ptr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
160    EXPECT_TRUE(space.get() == nullptr);
161  }
162  {
163    // Init < growth < max
164    std::unique_ptr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
165    EXPECT_TRUE(space.get() != nullptr);
166  }
167  {
168    // Init < max < growth
169    std::unique_ptr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
170    EXPECT_TRUE(space.get() == nullptr);
171  }
172}
173
174// TODO: This test is not very good, we should improve it.
175// The test should do more allocations before the creation of the ZygoteSpace, and then do
176// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
177// the GC works with the ZygoteSpace.
178void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
179  size_t dummy;
180  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
181  ASSERT_TRUE(space != nullptr);
182
183  // Make space findable to the heap, will also delete space when runtime is cleaned up
184  AddSpace(space);
185  Thread* self = Thread::Current();
186  ScopedObjectAccess soa(self);
187
188  // Succeeds, fits without adjusting the footprint limit.
189  size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
190  StackHandleScope<3> hs(soa.Self());
191  MutableHandle<mirror::Object> ptr1(
192      hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
193                         &ptr1_bytes_tl_bulk_allocated)));
194  EXPECT_TRUE(ptr1.Get() != nullptr);
195  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
196  EXPECT_LE(1U * MB, ptr1_usable_size);
197  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
198  EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
199
200  // Fails, requires a higher footprint limit.
201  mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
202  EXPECT_TRUE(ptr2 == nullptr);
203
204  // Succeeds, adjusts the footprint.
205  size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
206  MutableHandle<mirror::Object> ptr3(
207      hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
208                                   &ptr3_bytes_tl_bulk_allocated)));
209  EXPECT_TRUE(ptr3.Get() != nullptr);
210  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
211  EXPECT_LE(8U * MB, ptr3_usable_size);
212  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
213  EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
214
215  // Fails, requires a higher footprint limit.
216  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr, &dummy);
217  EXPECT_TRUE(ptr4 == nullptr);
218
219  // Also fails, requires a higher allowed footprint.
220  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr, &dummy);
221  EXPECT_TRUE(ptr5 == nullptr);
222
223  // Release some memory.
224  size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
225  EXPECT_EQ(free3, ptr3_bytes_allocated);
226  EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
227  EXPECT_LE(8U * MB, free3);
228
229  // Succeeds, now that memory has been freed.
230  size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
231  Handle<mirror::Object> ptr6(
232      hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
233                                   &ptr6_bytes_tl_bulk_allocated)));
234  EXPECT_TRUE(ptr6.Get() != nullptr);
235  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
236  EXPECT_LE(9U * MB, ptr6_usable_size);
237  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
238  EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
239
240  // Final clean up.
241  size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
242  space->Free(self, ptr1.Assign(nullptr));
243  EXPECT_LE(1U * MB, free1);
244
245  // Make sure that the zygote space isn't directly at the start of the space.
246  EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr, &dummy) != nullptr);
247
248  gc::Heap* heap = Runtime::Current()->GetHeap();
249  space::Space* old_space = space;
250  heap->RemoveSpace(old_space);
251  heap->RevokeAllThreadLocalBuffers();
252  space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space",
253                                                              heap->IsLowMemoryMode(),
254                                                              &space);
255  delete old_space;
256  // Add the zygote space.
257  AddSpace(zygote_space, false);
258
259  // Make space findable to the heap, will also delete space when runtime is cleaned up
260  AddSpace(space, false);
261
262  // Succeeds, fits without adjusting the footprint limit.
263  ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
264                    &ptr1_bytes_tl_bulk_allocated));
265  EXPECT_TRUE(ptr1.Get() != nullptr);
266  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
267  EXPECT_LE(1U * MB, ptr1_usable_size);
268  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
269  EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
270
271  // Fails, requires a higher footprint limit.
272  ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
273  EXPECT_TRUE(ptr2 == nullptr);
274
275  // Succeeds, adjusts the footprint.
276  ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
277                              &ptr3_bytes_tl_bulk_allocated));
278  EXPECT_TRUE(ptr3.Get() != nullptr);
279  EXPECT_LE(2U * MB, ptr3_bytes_allocated);
280  EXPECT_LE(2U * MB, ptr3_usable_size);
281  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
282  EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
283  space->Free(self, ptr3.Assign(nullptr));
284
285  // Final clean up.
286  free1 = space->AllocationSize(ptr1.Get(), nullptr);
287  space->Free(self, ptr1.Assign(nullptr));
288  EXPECT_LE(1U * MB, free1);
289}
290
291void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
292  size_t dummy = 0;
293  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
294  ASSERT_TRUE(space != nullptr);
295  Thread* self = Thread::Current();
296  ScopedObjectAccess soa(self);
297
298  // Make space findable to the heap, will also delete space when runtime is cleaned up
299  AddSpace(space);
300
301  // Succeeds, fits without adjusting the footprint limit.
302  size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
303  StackHandleScope<3> hs(soa.Self());
304  MutableHandle<mirror::Object> ptr1(
305      hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
306                         &ptr1_bytes_tl_bulk_allocated)));
307  EXPECT_TRUE(ptr1.Get() != nullptr);
308  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
309  EXPECT_LE(1U * MB, ptr1_usable_size);
310  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
311  EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
312
313  // Fails, requires a higher footprint limit.
314  mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
315  EXPECT_TRUE(ptr2 == nullptr);
316
317  // Succeeds, adjusts the footprint.
318  size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
319  MutableHandle<mirror::Object> ptr3(
320      hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
321                                   &ptr3_bytes_tl_bulk_allocated)));
322  EXPECT_TRUE(ptr3.Get() != nullptr);
323  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
324  EXPECT_LE(8U * MB, ptr3_usable_size);
325  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
326  EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
327
328  // Fails, requires a higher footprint limit.
329  mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
330  EXPECT_TRUE(ptr4 == nullptr);
331
332  // Also fails, requires a higher allowed footprint.
333  mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr, &dummy);
334  EXPECT_TRUE(ptr5 == nullptr);
335
336  // Release some memory.
337  size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
338  EXPECT_EQ(free3, ptr3_bytes_allocated);
339  space->Free(self, ptr3.Assign(nullptr));
340  EXPECT_LE(8U * MB, free3);
341
342  // Succeeds, now that memory has been freed.
343  size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
344  Handle<mirror::Object> ptr6(
345      hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
346                                   &ptr6_bytes_tl_bulk_allocated)));
347  EXPECT_TRUE(ptr6.Get() != nullptr);
348  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
349  EXPECT_LE(9U * MB, ptr6_usable_size);
350  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
351  EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
352
353  // Final clean up.
354  size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
355  space->Free(self, ptr1.Assign(nullptr));
356  EXPECT_LE(1U * MB, free1);
357}
358
359void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
360  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
361  ASSERT_TRUE(space != nullptr);
362
363  // Make space findable to the heap, will also delete space when runtime is cleaned up
364  AddSpace(space);
365  Thread* self = Thread::Current();
366  ScopedObjectAccess soa(self);
367
368  // Succeeds, fits without adjusting the max allowed footprint.
369  mirror::Object* lots_of_objects[1024];
370  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
371    size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
372    size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
373    lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
374                               &usable_size, &bytes_tl_bulk_allocated);
375    EXPECT_TRUE(lots_of_objects[i] != nullptr);
376    size_t computed_usable_size;
377    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
378    EXPECT_EQ(usable_size, computed_usable_size);
379    EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
380                bytes_tl_bulk_allocated >= allocation_size);
381  }
382
383  // Release memory.
384  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
385
386  // Succeeds, fits by adjusting the max allowed footprint.
387  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
388    size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
389    lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size,
390                                         &bytes_tl_bulk_allocated);
391    EXPECT_TRUE(lots_of_objects[i] != nullptr);
392    size_t computed_usable_size;
393    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
394    EXPECT_EQ(usable_size, computed_usable_size);
395    EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
396                bytes_tl_bulk_allocated >= allocation_size);
397  }
398
399  // Release memory.
400  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
401}
402
403void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
404                                                    int round, size_t growth_limit) {
405  if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
406      ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
407    // No allocation can succeed
408    return;
409  }
410
411  // The space's footprint equals amount of resources requested from system
412  size_t footprint = space->GetFootprint();
413
414  // The space must at least have its book keeping allocated
415  EXPECT_GT(footprint, 0u);
416
417  // But it shouldn't exceed the initial size
418  EXPECT_LE(footprint, growth_limit);
419
420  // space's size shouldn't exceed the initial size
421  EXPECT_LE(space->Size(), growth_limit);
422
423  // this invariant should always hold or else the space has grown to be larger than what the
424  // space believes its size is (which will break invariants)
425  EXPECT_GE(space->Size(), footprint);
426
427  // Fill the space with lots of small objects up to the growth limit
428  size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
429  std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
430  size_t last_object = 0;  // last object for which allocation succeeded
431  size_t amount_allocated = 0;  // amount of space allocated
432  Thread* self = Thread::Current();
433  ScopedObjectAccess soa(self);
434  size_t rand_seed = 123456789;
435  for (size_t i = 0; i < max_objects; i++) {
436    size_t alloc_fails = 0;  // number of failed allocations
437    size_t max_fails = 30;  // number of times we fail allocation before giving up
438    for (; alloc_fails < max_fails; alloc_fails++) {
439      size_t alloc_size;
440      if (object_size > 0) {
441        alloc_size = object_size;
442      } else {
443        alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
444        // Note the minimum size, which is the size of a zero-length byte array.
445        size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
446        if (alloc_size < size_of_zero_length_byte_array) {
447          alloc_size = size_of_zero_length_byte_array;
448        }
449      }
450      StackHandleScope<1> hs(soa.Self());
451      auto object(hs.NewHandle<mirror::Object>(nullptr));
452      size_t bytes_allocated = 0;
453      size_t bytes_tl_bulk_allocated;
454      if (round <= 1) {
455        object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr,
456                            &bytes_tl_bulk_allocated));
457      } else {
458        object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr,
459                                      &bytes_tl_bulk_allocated));
460      }
461      footprint = space->GetFootprint();
462      EXPECT_GE(space->Size(), footprint);  // invariant
463      if (object.Get() != nullptr) {  // allocation succeeded
464        lots_of_objects[i] = object.Get();
465        size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
466        EXPECT_EQ(bytes_allocated, allocation_size);
467        if (object_size > 0) {
468          EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
469        } else {
470          EXPECT_GE(allocation_size, 8u);
471        }
472        EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
473                    bytes_tl_bulk_allocated >= allocation_size);
474        amount_allocated += allocation_size;
475        break;
476      }
477    }
478    if (alloc_fails == max_fails) {
479      last_object = i;
480      break;
481    }
482  }
483  CHECK_NE(last_object, 0u);  // we should have filled the space
484  EXPECT_GT(amount_allocated, 0u);
485
486  // We shouldn't have gone past the growth_limit
487  EXPECT_LE(amount_allocated, growth_limit);
488  EXPECT_LE(footprint, growth_limit);
489  EXPECT_LE(space->Size(), growth_limit);
490
491  // footprint and size should agree with amount allocated
492  EXPECT_GE(footprint, amount_allocated);
493  EXPECT_GE(space->Size(), amount_allocated);
494
495  // Release storage in a semi-adhoc manner
496  size_t free_increment = 96;
497  while (true) {
498    {
499      ScopedThreadStateChange tsc(self, kNative);
500      // Give the space a haircut.
501      space->Trim();
502    }
503
504    // Bounds sanity
505    footprint = space->GetFootprint();
506    EXPECT_LE(amount_allocated, growth_limit);
507    EXPECT_GE(footprint, amount_allocated);
508    EXPECT_LE(footprint, growth_limit);
509    EXPECT_GE(space->Size(), amount_allocated);
510    EXPECT_LE(space->Size(), growth_limit);
511
512    if (free_increment == 0) {
513      break;
514    }
515
516    // Free some objects
517    for (size_t i = 0; i < last_object; i += free_increment) {
518      mirror::Object* object = lots_of_objects.get()[i];
519      if (object == nullptr) {
520        continue;
521      }
522      size_t allocation_size = space->AllocationSize(object, nullptr);
523      if (object_size > 0) {
524        EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
525      } else {
526        EXPECT_GE(allocation_size, 8u);
527      }
528      space->Free(self, object);
529      lots_of_objects.get()[i] = nullptr;
530      amount_allocated -= allocation_size;
531      footprint = space->GetFootprint();
532      EXPECT_GE(space->Size(), footprint);  // invariant
533    }
534
535    free_increment >>= 1;
536  }
537
538  // The space has become empty here before allocating a large object
539  // below. For RosAlloc, revoke thread-local runs, which are kept
540  // even when empty for a performance reason, so that they won't
541  // cause the following large object allocation to fail due to
542  // potential fragmentation. Note they are normally revoked at each
543  // GC (but no GC here.)
544  space->RevokeAllThreadLocalBuffers();
545
546  // All memory was released, try a large allocation to check freed memory is being coalesced
547  StackHandleScope<1> hs(soa.Self());
548  auto large_object(hs.NewHandle<mirror::Object>(nullptr));
549  size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
550  size_t bytes_allocated = 0;
551  size_t bytes_tl_bulk_allocated;
552  if (round <= 1) {
553    large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr,
554                              &bytes_tl_bulk_allocated));
555  } else {
556    large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
557                                        nullptr, &bytes_tl_bulk_allocated));
558  }
559  EXPECT_TRUE(large_object.Get() != nullptr);
560
561  // Sanity check footprint
562  footprint = space->GetFootprint();
563  EXPECT_LE(footprint, growth_limit);
564  EXPECT_GE(space->Size(), footprint);
565  EXPECT_LE(space->Size(), growth_limit);
566
567  // Clean up
568  space->Free(self, large_object.Assign(nullptr));
569
570  // Sanity check footprint
571  footprint = space->GetFootprint();
572  EXPECT_LE(footprint, growth_limit);
573  EXPECT_GE(space->Size(), footprint);
574  EXPECT_LE(space->Size(), growth_limit);
575}
576
577void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
578  if (object_size < SizeOfZeroLengthByteArray()) {
579    // Too small for the object layout/model.
580    return;
581  }
582  size_t initial_size = 4 * MB;
583  size_t growth_limit = 8 * MB;
584  size_t capacity = 16 * MB;
585  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
586  ASSERT_TRUE(space != nullptr);
587
588  // Basic sanity
589  EXPECT_EQ(space->Capacity(), growth_limit);
590  EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
591
592  // Make space findable to the heap, will also delete space when runtime is cleaned up
593  AddSpace(space);
594
595  // In this round we don't allocate with growth and therefore can't grow past the initial size.
596  // This effectively makes the growth_limit the initial_size, so assert this.
597  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
598  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
599  // Remove growth limit
600  space->ClearGrowthLimit();
601  EXPECT_EQ(space->Capacity(), capacity);
602  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
603}
604
605#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
606  TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
607    SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
608  }
609
610#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
611  TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
612    SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
613  }
614
615#define TEST_SPACE_CREATE_FN_BASE(spaceName, spaceFn) \
616  class spaceName##BaseTest : public SpaceTest { \
617  }; \
618  \
619  TEST_F(spaceName##BaseTest, Init) { \
620    InitTestBody(spaceFn); \
621  } \
622  TEST_F(spaceName##BaseTest, ZygoteSpace) { \
623    ZygoteSpaceTestBody(spaceFn); \
624  } \
625  TEST_F(spaceName##BaseTest, AllocAndFree) { \
626    AllocAndFreeTestBody(spaceFn); \
627  } \
628  TEST_F(spaceName##BaseTest, AllocAndFreeList) { \
629    AllocAndFreeListTestBody(spaceFn); \
630  }
631
632#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
633  class spaceName##StaticTest : public SpaceTest { \
634  }; \
635  \
636  TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
637  TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
638  TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
639  TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
640  TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
641  TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
642  TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
643  TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
644  TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
645  TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
646  TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
647
648#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
649  class spaceName##RandomTest : public SpaceTest { \
650  }; \
651  \
652  TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
653  TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
654  TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
655  TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
656  TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
657  TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
658  TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
659  TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
660  TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
661  TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
662
663}  // namespace space
664}  // namespace gc
665}  // namespace art
666
667#endif  // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
668