space_test.h revision e401d146407d61eeb99f8d6176b2ac13c4df1e33
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19
20#include <stdint.h>
21#include <memory>
22
23#include "common_runtime_test.h"
24#include "globals.h"
25#include "mirror/array-inl.h"
26#include "mirror/class-inl.h"
27#include "mirror/class_loader.h"
28#include "mirror/object-inl.h"
29#include "scoped_thread_state_change.h"
30#include "zygote_space.h"
31
32namespace art {
33namespace gc {
34namespace space {
35
36class SpaceTest : public CommonRuntimeTest {
37 public:
38  jobject byte_array_class_;
39
40  SpaceTest() : byte_array_class_(nullptr) {
41  }
42
43  void AddSpace(ContinuousSpace* space, bool revoke = true) {
44    Heap* heap = Runtime::Current()->GetHeap();
45    if (revoke) {
46      heap->RevokeAllThreadLocalBuffers();
47    }
48    heap->AddSpace(space);
49    heap->SetSpaceAsDefault(space);
50  }
51
52  mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
53    StackHandleScope<1> hs(self);
54    auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
55    if (byte_array_class_ == nullptr) {
56      mirror::Class* byte_array_class =
57          Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
58      EXPECT_TRUE(byte_array_class != nullptr);
59      byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
60      EXPECT_TRUE(byte_array_class_ != nullptr);
61    }
62    return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
63  }
64
65  mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
66                        size_t* bytes_allocated, size_t* usable_size,
67                        size_t* bytes_tl_bulk_allocated)
68      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
69    StackHandleScope<1> hs(self);
70    Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
71    mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size,
72                                             bytes_tl_bulk_allocated);
73    if (obj != nullptr) {
74      InstallClass(obj, byte_array_class.Get(), bytes);
75    }
76    return obj;
77  }
78
79  mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
80                                  size_t* bytes_allocated, size_t* usable_size,
81                                  size_t* bytes_tl_bulk_allocated)
82      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
83    StackHandleScope<1> hs(self);
84    Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
85    mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
86                                                       bytes_tl_bulk_allocated);
87    if (obj != nullptr) {
88      InstallClass(obj, byte_array_class.Get(), bytes);
89    }
90    return obj;
91  }
92
93  void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
94      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
95    // Note the minimum size, which is the size of a zero-length byte array.
96    EXPECT_GE(size, SizeOfZeroLengthByteArray());
97    EXPECT_TRUE(byte_array_class != nullptr);
98    o->SetClass(byte_array_class);
99    if (kUseBakerOrBrooksReadBarrier) {
100      // Like the proper heap object allocation, install and verify
101      // the correct read barrier pointer.
102      if (kUseBrooksReadBarrier) {
103        o->SetReadBarrierPointer(o);
104      }
105      o->AssertReadBarrierPointer();
106    }
107    mirror::Array* arr = o->AsArray<kVerifyNone>();
108    size_t header_size = SizeOfZeroLengthByteArray();
109    int32_t length = size - header_size;
110    arr->SetLength(length);
111    EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
112  }
113
114  static size_t SizeOfZeroLengthByteArray() {
115    return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
116  }
117
118  typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
119                                        size_t capacity, uint8_t* requested_begin);
120  void InitTestBody(CreateSpaceFn create_space);
121  void ZygoteSpaceTestBody(CreateSpaceFn create_space);
122  void AllocAndFreeTestBody(CreateSpaceFn create_space);
123  void AllocAndFreeListTestBody(CreateSpaceFn create_space);
124
125  void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
126                                           int round, size_t growth_limit);
127  void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
128};
129
130static inline size_t test_rand(size_t* seed) {
131  *seed = *seed * 1103515245 + 12345;
132  return *seed;
133}
134
135void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
136  // This will lead to error messages in the log.
137  ScopedLogSeverity sls(LogSeverity::FATAL);
138
139  {
140    // Init < max == growth
141    std::unique_ptr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
142    EXPECT_TRUE(space.get() != nullptr);
143  }
144  {
145    // Init == max == growth
146    std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
147    EXPECT_TRUE(space.get() != nullptr);
148  }
149  {
150    // Init > max == growth
151    std::unique_ptr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
152    EXPECT_TRUE(space.get() == nullptr);
153  }
154  {
155    // Growth == init < max
156    std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
157    EXPECT_TRUE(space.get() != nullptr);
158  }
159  {
160    // Growth < init < max
161    std::unique_ptr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
162    EXPECT_TRUE(space.get() == nullptr);
163  }
164  {
165    // Init < growth < max
166    std::unique_ptr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
167    EXPECT_TRUE(space.get() != nullptr);
168  }
169  {
170    // Init < max < growth
171    std::unique_ptr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
172    EXPECT_TRUE(space.get() == nullptr);
173  }
174}
175
176// TODO: This test is not very good, we should improve it.
177// The test should do more allocations before the creation of the ZygoteSpace, and then do
178// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
179// the GC works with the ZygoteSpace.
180void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
181  size_t dummy;
182  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
183  ASSERT_TRUE(space != nullptr);
184
185  // Make space findable to the heap, will also delete space when runtime is cleaned up
186  AddSpace(space);
187  Thread* self = Thread::Current();
188  ScopedObjectAccess soa(self);
189
190  // Succeeds, fits without adjusting the footprint limit.
191  size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
192  StackHandleScope<3> hs(soa.Self());
193  MutableHandle<mirror::Object> ptr1(
194      hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
195                         &ptr1_bytes_tl_bulk_allocated)));
196  EXPECT_TRUE(ptr1.Get() != nullptr);
197  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
198  EXPECT_LE(1U * MB, ptr1_usable_size);
199  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
200  EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
201
202  // Fails, requires a higher footprint limit.
203  mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
204  EXPECT_TRUE(ptr2 == nullptr);
205
206  // Succeeds, adjusts the footprint.
207  size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
208  MutableHandle<mirror::Object> ptr3(
209      hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
210                                   &ptr3_bytes_tl_bulk_allocated)));
211  EXPECT_TRUE(ptr3.Get() != nullptr);
212  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
213  EXPECT_LE(8U * MB, ptr3_usable_size);
214  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
215  EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
216
217  // Fails, requires a higher footprint limit.
218  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr, &dummy);
219  EXPECT_TRUE(ptr4 == nullptr);
220
221  // Also fails, requires a higher allowed footprint.
222  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr, &dummy);
223  EXPECT_TRUE(ptr5 == nullptr);
224
225  // Release some memory.
226  size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
227  EXPECT_EQ(free3, ptr3_bytes_allocated);
228  EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
229  EXPECT_LE(8U * MB, free3);
230
231  // Succeeds, now that memory has been freed.
232  size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
233  Handle<mirror::Object> ptr6(
234      hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
235                                   &ptr6_bytes_tl_bulk_allocated)));
236  EXPECT_TRUE(ptr6.Get() != nullptr);
237  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
238  EXPECT_LE(9U * MB, ptr6_usable_size);
239  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
240  EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
241
242  // Final clean up.
243  size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
244  space->Free(self, ptr1.Assign(nullptr));
245  EXPECT_LE(1U * MB, free1);
246
247  // Make sure that the zygote space isn't directly at the start of the space.
248  EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr, &dummy) != nullptr);
249
250  gc::Heap* heap = Runtime::Current()->GetHeap();
251  space::Space* old_space = space;
252  heap->RemoveSpace(old_space);
253  heap->RevokeAllThreadLocalBuffers();
254  space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space",
255                                                              heap->IsLowMemoryMode(),
256                                                              &space);
257  delete old_space;
258  // Add the zygote space.
259  AddSpace(zygote_space, false);
260
261  // Make space findable to the heap, will also delete space when runtime is cleaned up
262  AddSpace(space, false);
263
264  // Succeeds, fits without adjusting the footprint limit.
265  ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
266                    &ptr1_bytes_tl_bulk_allocated));
267  EXPECT_TRUE(ptr1.Get() != nullptr);
268  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
269  EXPECT_LE(1U * MB, ptr1_usable_size);
270  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
271  EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
272
273  // Fails, requires a higher footprint limit.
274  ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
275  EXPECT_TRUE(ptr2 == nullptr);
276
277  // Succeeds, adjusts the footprint.
278  ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
279                              &ptr3_bytes_tl_bulk_allocated));
280  EXPECT_TRUE(ptr3.Get() != nullptr);
281  EXPECT_LE(2U * MB, ptr3_bytes_allocated);
282  EXPECT_LE(2U * MB, ptr3_usable_size);
283  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
284  EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
285  space->Free(self, ptr3.Assign(nullptr));
286
287  // Final clean up.
288  free1 = space->AllocationSize(ptr1.Get(), nullptr);
289  space->Free(self, ptr1.Assign(nullptr));
290  EXPECT_LE(1U * MB, free1);
291}
292
293void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
294  size_t dummy = 0;
295  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
296  ASSERT_TRUE(space != nullptr);
297  Thread* self = Thread::Current();
298  ScopedObjectAccess soa(self);
299
300  // Make space findable to the heap, will also delete space when runtime is cleaned up
301  AddSpace(space);
302
303  // Succeeds, fits without adjusting the footprint limit.
304  size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
305  StackHandleScope<3> hs(soa.Self());
306  MutableHandle<mirror::Object> ptr1(
307      hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
308                         &ptr1_bytes_tl_bulk_allocated)));
309  EXPECT_TRUE(ptr1.Get() != nullptr);
310  EXPECT_LE(1U * MB, ptr1_bytes_allocated);
311  EXPECT_LE(1U * MB, ptr1_usable_size);
312  EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
313  EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
314
315  // Fails, requires a higher footprint limit.
316  mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
317  EXPECT_TRUE(ptr2 == nullptr);
318
319  // Succeeds, adjusts the footprint.
320  size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
321  MutableHandle<mirror::Object> ptr3(
322      hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
323                                   &ptr3_bytes_tl_bulk_allocated)));
324  EXPECT_TRUE(ptr3.Get() != nullptr);
325  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
326  EXPECT_LE(8U * MB, ptr3_usable_size);
327  EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
328  EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
329
330  // Fails, requires a higher footprint limit.
331  mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
332  EXPECT_TRUE(ptr4 == nullptr);
333
334  // Also fails, requires a higher allowed footprint.
335  mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr, &dummy);
336  EXPECT_TRUE(ptr5 == nullptr);
337
338  // Release some memory.
339  size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
340  EXPECT_EQ(free3, ptr3_bytes_allocated);
341  space->Free(self, ptr3.Assign(nullptr));
342  EXPECT_LE(8U * MB, free3);
343
344  // Succeeds, now that memory has been freed.
345  size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
346  Handle<mirror::Object> ptr6(
347      hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
348                                   &ptr6_bytes_tl_bulk_allocated)));
349  EXPECT_TRUE(ptr6.Get() != nullptr);
350  EXPECT_LE(9U * MB, ptr6_bytes_allocated);
351  EXPECT_LE(9U * MB, ptr6_usable_size);
352  EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
353  EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
354
355  // Final clean up.
356  size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
357  space->Free(self, ptr1.Assign(nullptr));
358  EXPECT_LE(1U * MB, free1);
359}
360
361void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
362  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
363  ASSERT_TRUE(space != nullptr);
364
365  // Make space findable to the heap, will also delete space when runtime is cleaned up
366  AddSpace(space);
367  Thread* self = Thread::Current();
368  ScopedObjectAccess soa(self);
369
370  // Succeeds, fits without adjusting the max allowed footprint.
371  mirror::Object* lots_of_objects[1024];
372  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
373    size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
374    size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
375    lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
376                               &usable_size, &bytes_tl_bulk_allocated);
377    EXPECT_TRUE(lots_of_objects[i] != nullptr);
378    size_t computed_usable_size;
379    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
380    EXPECT_EQ(usable_size, computed_usable_size);
381    EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
382                bytes_tl_bulk_allocated >= allocation_size);
383  }
384
385  // Release memory.
386  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
387
388  // Succeeds, fits by adjusting the max allowed footprint.
389  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
390    size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
391    lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size,
392                                         &bytes_tl_bulk_allocated);
393    EXPECT_TRUE(lots_of_objects[i] != nullptr);
394    size_t computed_usable_size;
395    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
396    EXPECT_EQ(usable_size, computed_usable_size);
397    EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
398                bytes_tl_bulk_allocated >= allocation_size);
399  }
400
401  // Release memory.
402  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
403}
404
405void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
406                                                    int round, size_t growth_limit) {
407  if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
408      ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
409    // No allocation can succeed
410    return;
411  }
412
413  // The space's footprint equals amount of resources requested from system
414  size_t footprint = space->GetFootprint();
415
416  // The space must at least have its book keeping allocated
417  EXPECT_GT(footprint, 0u);
418
419  // But it shouldn't exceed the initial size
420  EXPECT_LE(footprint, growth_limit);
421
422  // space's size shouldn't exceed the initial size
423  EXPECT_LE(space->Size(), growth_limit);
424
425  // this invariant should always hold or else the space has grown to be larger than what the
426  // space believes its size is (which will break invariants)
427  EXPECT_GE(space->Size(), footprint);
428
429  // Fill the space with lots of small objects up to the growth limit
430  size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
431  std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
432  size_t last_object = 0;  // last object for which allocation succeeded
433  size_t amount_allocated = 0;  // amount of space allocated
434  Thread* self = Thread::Current();
435  ScopedObjectAccess soa(self);
436  size_t rand_seed = 123456789;
437  for (size_t i = 0; i < max_objects; i++) {
438    size_t alloc_fails = 0;  // number of failed allocations
439    size_t max_fails = 30;  // number of times we fail allocation before giving up
440    for (; alloc_fails < max_fails; alloc_fails++) {
441      size_t alloc_size;
442      if (object_size > 0) {
443        alloc_size = object_size;
444      } else {
445        alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
446        // Note the minimum size, which is the size of a zero-length byte array.
447        size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
448        if (alloc_size < size_of_zero_length_byte_array) {
449          alloc_size = size_of_zero_length_byte_array;
450        }
451      }
452      StackHandleScope<1> hs(soa.Self());
453      auto object(hs.NewHandle<mirror::Object>(nullptr));
454      size_t bytes_allocated = 0;
455      size_t bytes_tl_bulk_allocated;
456      if (round <= 1) {
457        object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr,
458                            &bytes_tl_bulk_allocated));
459      } else {
460        object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr,
461                                      &bytes_tl_bulk_allocated));
462      }
463      footprint = space->GetFootprint();
464      EXPECT_GE(space->Size(), footprint);  // invariant
465      if (object.Get() != nullptr) {  // allocation succeeded
466        lots_of_objects[i] = object.Get();
467        size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
468        EXPECT_EQ(bytes_allocated, allocation_size);
469        if (object_size > 0) {
470          EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
471        } else {
472          EXPECT_GE(allocation_size, 8u);
473        }
474        EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
475                    bytes_tl_bulk_allocated >= allocation_size);
476        amount_allocated += allocation_size;
477        break;
478      }
479    }
480    if (alloc_fails == max_fails) {
481      last_object = i;
482      break;
483    }
484  }
485  CHECK_NE(last_object, 0u);  // we should have filled the space
486  EXPECT_GT(amount_allocated, 0u);
487
488  // We shouldn't have gone past the growth_limit
489  EXPECT_LE(amount_allocated, growth_limit);
490  EXPECT_LE(footprint, growth_limit);
491  EXPECT_LE(space->Size(), growth_limit);
492
493  // footprint and size should agree with amount allocated
494  EXPECT_GE(footprint, amount_allocated);
495  EXPECT_GE(space->Size(), amount_allocated);
496
497  // Release storage in a semi-adhoc manner
498  size_t free_increment = 96;
499  while (true) {
500    {
501      ScopedThreadStateChange tsc(self, kNative);
502      // Give the space a haircut.
503      space->Trim();
504    }
505
506    // Bounds sanity
507    footprint = space->GetFootprint();
508    EXPECT_LE(amount_allocated, growth_limit);
509    EXPECT_GE(footprint, amount_allocated);
510    EXPECT_LE(footprint, growth_limit);
511    EXPECT_GE(space->Size(), amount_allocated);
512    EXPECT_LE(space->Size(), growth_limit);
513
514    if (free_increment == 0) {
515      break;
516    }
517
518    // Free some objects
519    for (size_t i = 0; i < last_object; i += free_increment) {
520      mirror::Object* object = lots_of_objects.get()[i];
521      if (object == nullptr) {
522        continue;
523      }
524      size_t allocation_size = space->AllocationSize(object, nullptr);
525      if (object_size > 0) {
526        EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
527      } else {
528        EXPECT_GE(allocation_size, 8u);
529      }
530      space->Free(self, object);
531      lots_of_objects.get()[i] = nullptr;
532      amount_allocated -= allocation_size;
533      footprint = space->GetFootprint();
534      EXPECT_GE(space->Size(), footprint);  // invariant
535    }
536
537    free_increment >>= 1;
538  }
539
540  // The space has become empty here before allocating a large object
541  // below. For RosAlloc, revoke thread-local runs, which are kept
542  // even when empty for a performance reason, so that they won't
543  // cause the following large object allocation to fail due to
544  // potential fragmentation. Note they are normally revoked at each
545  // GC (but no GC here.)
546  space->RevokeAllThreadLocalBuffers();
547
548  // All memory was released, try a large allocation to check freed memory is being coalesced
549  StackHandleScope<1> hs(soa.Self());
550  auto large_object(hs.NewHandle<mirror::Object>(nullptr));
551  size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
552  size_t bytes_allocated = 0;
553  size_t bytes_tl_bulk_allocated;
554  if (round <= 1) {
555    large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr,
556                              &bytes_tl_bulk_allocated));
557  } else {
558    large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
559                                        nullptr, &bytes_tl_bulk_allocated));
560  }
561  EXPECT_TRUE(large_object.Get() != nullptr);
562
563  // Sanity check footprint
564  footprint = space->GetFootprint();
565  EXPECT_LE(footprint, growth_limit);
566  EXPECT_GE(space->Size(), footprint);
567  EXPECT_LE(space->Size(), growth_limit);
568
569  // Clean up
570  space->Free(self, large_object.Assign(nullptr));
571
572  // Sanity check footprint
573  footprint = space->GetFootprint();
574  EXPECT_LE(footprint, growth_limit);
575  EXPECT_GE(space->Size(), footprint);
576  EXPECT_LE(space->Size(), growth_limit);
577}
578
579void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
580  if (object_size < SizeOfZeroLengthByteArray()) {
581    // Too small for the object layout/model.
582    return;
583  }
584  size_t initial_size = 4 * MB;
585  size_t growth_limit = 8 * MB;
586  size_t capacity = 16 * MB;
587  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
588  ASSERT_TRUE(space != nullptr);
589
590  // Basic sanity
591  EXPECT_EQ(space->Capacity(), growth_limit);
592  EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
593
594  // Make space findable to the heap, will also delete space when runtime is cleaned up
595  AddSpace(space);
596
597  // In this round we don't allocate with growth and therefore can't grow past the initial size.
598  // This effectively makes the growth_limit the initial_size, so assert this.
599  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
600  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
601  // Remove growth limit
602  space->ClearGrowthLimit();
603  EXPECT_EQ(space->Capacity(), capacity);
604  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
605}
606
607#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
608  TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
609    SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
610  }
611
612#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
613  TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
614    SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
615  }
616
617#define TEST_SPACE_CREATE_FN_BASE(spaceName, spaceFn) \
618  class spaceName##BaseTest : public SpaceTest { \
619  }; \
620  \
621  TEST_F(spaceName##BaseTest, Init) { \
622    InitTestBody(spaceFn); \
623  } \
624  TEST_F(spaceName##BaseTest, ZygoteSpace) { \
625    ZygoteSpaceTestBody(spaceFn); \
626  } \
627  TEST_F(spaceName##BaseTest, AllocAndFree) { \
628    AllocAndFreeTestBody(spaceFn); \
629  } \
630  TEST_F(spaceName##BaseTest, AllocAndFreeList) { \
631    AllocAndFreeListTestBody(spaceFn); \
632  }
633
634#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
635  class spaceName##StaticTest : public SpaceTest { \
636  }; \
637  \
638  TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
639  TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
640  TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
641  TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
642  TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
643  TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
644  TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
645  TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
646  TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
647  TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
648  TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
649
650#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
651  class spaceName##RandomTest : public SpaceTest { \
652  }; \
653  \
654  TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
655  TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
656  TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
657  TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
658  TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
659  TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
660  TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
661  TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
662  TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
663  TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
664
665}  // namespace space
666}  // namespace gc
667}  // namespace art
668
669#endif  // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
670