mem_map_test.cc revision 3c3c4a1da1e8c03e78813d175a9974fb9f1097ea
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
19#include <memory>
20
21#include "common_runtime_test.h"
22#include "base/memory_tool.h"
23#include "base/unix_file/fd_file.h"
24
25namespace art {
26
27class MemMapTest : public CommonRuntimeTest {
28 public:
29  static uint8_t* BaseBegin(MemMap* mem_map) {
30    return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
31  }
32
33  static size_t BaseSize(MemMap* mem_map) {
34    return mem_map->base_size_;
35  }
36
37  static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
38    // Find a valid map address and unmap it before returning.
39    std::string error_msg;
40    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
41                                                     nullptr,
42                                                     size,
43                                                     PROT_READ,
44                                                     low_4gb,
45                                                     false,
46                                                     &error_msg));
47    CHECK(map != nullptr);
48    return map->Begin();
49  }
50
51  static void RemapAtEndTest(bool low_4gb) {
52    std::string error_msg;
53    // Cast the page size to size_t.
54    const size_t page_size = static_cast<size_t>(kPageSize);
55    // Map a two-page memory region.
56    MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
57                                      nullptr,
58                                      2 * page_size,
59                                      PROT_READ | PROT_WRITE,
60                                      low_4gb,
61                                      false,
62                                      &error_msg);
63    // Check its state and write to it.
64    uint8_t* base0 = m0->Begin();
65    ASSERT_TRUE(base0 != nullptr) << error_msg;
66    size_t size0 = m0->Size();
67    EXPECT_EQ(m0->Size(), 2 * page_size);
68    EXPECT_EQ(BaseBegin(m0), base0);
69    EXPECT_EQ(BaseSize(m0), size0);
70    memset(base0, 42, 2 * page_size);
71    // Remap the latter half into a second MemMap.
72    MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
73                                "MemMapTest_RemapAtEndTest_map1",
74                                PROT_READ | PROT_WRITE,
75                                &error_msg);
76    // Check the states of the two maps.
77    EXPECT_EQ(m0->Begin(), base0) << error_msg;
78    EXPECT_EQ(m0->Size(), page_size);
79    EXPECT_EQ(BaseBegin(m0), base0);
80    EXPECT_EQ(BaseSize(m0), page_size);
81    uint8_t* base1 = m1->Begin();
82    size_t size1 = m1->Size();
83    EXPECT_EQ(base1, base0 + page_size);
84    EXPECT_EQ(size1, page_size);
85    EXPECT_EQ(BaseBegin(m1), base1);
86    EXPECT_EQ(BaseSize(m1), size1);
87    // Write to the second region.
88    memset(base1, 43, page_size);
89    // Check the contents of the two regions.
90    for (size_t i = 0; i < page_size; ++i) {
91      EXPECT_EQ(base0[i], 42);
92    }
93    for (size_t i = 0; i < page_size; ++i) {
94      EXPECT_EQ(base1[i], 43);
95    }
96    // Unmap the first region.
97    delete m0;
98    // Make sure the second region is still accessible after the first
99    // region is unmapped.
100    for (size_t i = 0; i < page_size; ++i) {
101      EXPECT_EQ(base1[i], 43);
102    }
103    delete m1;
104  }
105
106  void CommonInit() {
107    MemMap::Init();
108  }
109
110#if defined(__LP64__) && !defined(__x86_64__)
111  static uintptr_t GetLinearScanPos() {
112    return MemMap::next_mem_pos_;
113  }
114#endif
115};
116
117#if defined(__LP64__) && !defined(__x86_64__)
118
119#ifdef __BIONIC__
120extern uintptr_t CreateStartPos(uint64_t input);
121#endif
122
123TEST_F(MemMapTest, Start) {
124  CommonInit();
125  uintptr_t start = GetLinearScanPos();
126  EXPECT_LE(64 * KB, start);
127  EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
128#ifdef __BIONIC__
129  // Test a couple of values. Make sure they are different.
130  uintptr_t last = 0;
131  for (size_t i = 0; i < 100; ++i) {
132    uintptr_t random_start = CreateStartPos(i * kPageSize);
133    EXPECT_NE(last, random_start);
134    last = random_start;
135  }
136
137  // Even on max, should be below ART_BASE_ADDRESS.
138  EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
139#endif
140  // End of test.
141}
142#endif
143
144TEST_F(MemMapTest, MapAnonymousEmpty) {
145  CommonInit();
146  std::string error_msg;
147  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
148                                                   nullptr,
149                                                   0,
150                                                   PROT_READ,
151                                                   false,
152                                                   false,
153                                                   &error_msg));
154  ASSERT_TRUE(map.get() != nullptr) << error_msg;
155  ASSERT_TRUE(error_msg.empty());
156  map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
157                                 nullptr,
158                                 kPageSize,
159                                 PROT_READ | PROT_WRITE,
160                                 false,
161                                 false,
162                                 &error_msg));
163  ASSERT_TRUE(map.get() != nullptr) << error_msg;
164  ASSERT_TRUE(error_msg.empty());
165}
166
167TEST_F(MemMapTest, MapAnonymousFailNullError) {
168  CommonInit();
169  // Test that we don't crash with a null error_str when mapping at an invalid location.
170  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
171                                                    reinterpret_cast<uint8_t*>(kPageSize),
172                                                    0x20000,
173                                                    PROT_READ | PROT_WRITE,
174                                                    false,
175                                                    false,
176                                                    nullptr));
177  ASSERT_EQ(nullptr, map.get());
178}
179
180#ifdef __LP64__
181TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
182  CommonInit();
183  std::string error_msg;
184  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
185                                                   nullptr,
186                                                   kPageSize,
187                                                   PROT_READ | PROT_WRITE,
188                                                   true,
189                                                   false,
190                                                   &error_msg));
191  ASSERT_TRUE(map.get() != nullptr) << error_msg;
192  ASSERT_TRUE(error_msg.empty());
193  ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
194}
195TEST_F(MemMapTest, MapFile32Bit) {
196  CommonInit();
197  std::string error_msg;
198  ScratchFile scratch_file;
199  constexpr size_t kMapSize = kPageSize;
200  std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
201  ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
202  std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
203                                              PROT_READ,
204                                              MAP_PRIVATE,
205                                              scratch_file.GetFd(),
206                                              /*start*/0,
207                                              /*low_4gb*/true,
208                                              scratch_file.GetFilename().c_str(),
209                                              &error_msg));
210  ASSERT_TRUE(map != nullptr) << error_msg;
211  ASSERT_TRUE(error_msg.empty());
212  ASSERT_EQ(map->Size(), kMapSize);
213  ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
214}
215#endif
216
217TEST_F(MemMapTest, MapAnonymousExactAddr) {
218  CommonInit();
219  std::string error_msg;
220  // Find a valid address.
221  uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
222  // Map at an address that should work, which should succeed.
223  std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
224                                                    valid_address,
225                                                    kPageSize,
226                                                    PROT_READ | PROT_WRITE,
227                                                    false,
228                                                    false,
229                                                    &error_msg));
230  ASSERT_TRUE(map0.get() != nullptr) << error_msg;
231  ASSERT_TRUE(error_msg.empty());
232  ASSERT_TRUE(map0->BaseBegin() == valid_address);
233  // Map at an unspecified address, which should succeed.
234  std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
235                                                    nullptr,
236                                                    kPageSize,
237                                                    PROT_READ | PROT_WRITE,
238                                                    false,
239                                                    false,
240                                                    &error_msg));
241  ASSERT_TRUE(map1.get() != nullptr) << error_msg;
242  ASSERT_TRUE(error_msg.empty());
243  ASSERT_TRUE(map1->BaseBegin() != nullptr);
244  // Attempt to map at the same address, which should fail.
245  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
246                                                    reinterpret_cast<uint8_t*>(map1->BaseBegin()),
247                                                    kPageSize,
248                                                    PROT_READ | PROT_WRITE,
249                                                    false,
250                                                    false,
251                                                    &error_msg));
252  ASSERT_TRUE(map2.get() == nullptr) << error_msg;
253  ASSERT_TRUE(!error_msg.empty());
254}
255
256TEST_F(MemMapTest, RemapAtEnd) {
257  RemapAtEndTest(false);
258}
259
260#ifdef __LP64__
261TEST_F(MemMapTest, RemapAtEnd32bit) {
262  RemapAtEndTest(true);
263}
264#endif
265
266TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
267  // Some MIPS32 hardware (namely the Creator Ci20 development board)
268  // cannot allocate in the 2GB-4GB region.
269  TEST_DISABLED_FOR_MIPS();
270
271  CommonInit();
272  // This test may not work under valgrind.
273  if (RUNNING_ON_MEMORY_TOOL == 0) {
274    constexpr size_t size = 0x100000;
275    // Try all addresses starting from 2GB to 4GB.
276    size_t start_addr = 2 * GB;
277    std::string error_msg;
278    std::unique_ptr<MemMap> map;
279    for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
280      map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
281                                     reinterpret_cast<uint8_t*>(start_addr),
282                                     size,
283                                     PROT_READ | PROT_WRITE,
284                                     /*low_4gb*/true,
285                                     false,
286                                     &error_msg));
287      if (map != nullptr) {
288        break;
289      }
290    }
291    ASSERT_TRUE(map.get() != nullptr) << error_msg;
292    ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
293    ASSERT_TRUE(error_msg.empty());
294    ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
295  }
296}
297
298TEST_F(MemMapTest, MapAnonymousOverflow) {
299  CommonInit();
300  std::string error_msg;
301  uintptr_t ptr = 0;
302  ptr -= kPageSize;  // Now it's close to the top.
303  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
304                                                   reinterpret_cast<uint8_t*>(ptr),
305                                                   2 * kPageSize,  // brings it over the top.
306                                                   PROT_READ | PROT_WRITE,
307                                                   false,
308                                                   false,
309                                                   &error_msg));
310  ASSERT_EQ(nullptr, map.get());
311  ASSERT_FALSE(error_msg.empty());
312}
313
314#ifdef __LP64__
315TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
316  CommonInit();
317  std::string error_msg;
318  std::unique_ptr<MemMap> map(
319      MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
320                           reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
321                           kPageSize,
322                           PROT_READ | PROT_WRITE,
323                           true,
324                           false,
325                           &error_msg));
326  ASSERT_EQ(nullptr, map.get());
327  ASSERT_FALSE(error_msg.empty());
328}
329
330TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
331  CommonInit();
332  std::string error_msg;
333  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
334                                                   reinterpret_cast<uint8_t*>(0xF0000000),
335                                                   0x20000000,
336                                                   PROT_READ | PROT_WRITE,
337                                                   true,
338                                                   false,
339                                                   &error_msg));
340  ASSERT_EQ(nullptr, map.get());
341  ASSERT_FALSE(error_msg.empty());
342}
343#endif
344
345TEST_F(MemMapTest, MapAnonymousReuse) {
346  CommonInit();
347  std::string error_msg;
348  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
349                                                   nullptr,
350                                                   0x20000,
351                                                   PROT_READ | PROT_WRITE,
352                                                   false,
353                                                   false,
354                                                   &error_msg));
355  ASSERT_NE(nullptr, map.get());
356  ASSERT_TRUE(error_msg.empty());
357  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
358                                                    reinterpret_cast<uint8_t*>(map->BaseBegin()),
359                                                    0x10000,
360                                                    PROT_READ | PROT_WRITE,
361                                                    false,
362                                                    true,
363                                                    &error_msg));
364  ASSERT_NE(nullptr, map2.get());
365  ASSERT_TRUE(error_msg.empty());
366}
367
368TEST_F(MemMapTest, CheckNoGaps) {
369  CommonInit();
370  std::string error_msg;
371  constexpr size_t kNumPages = 3;
372  // Map a 3-page mem map.
373  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
374                                                   nullptr,
375                                                   kPageSize * kNumPages,
376                                                   PROT_READ | PROT_WRITE,
377                                                   false,
378                                                   false,
379                                                   &error_msg));
380  ASSERT_TRUE(map.get() != nullptr) << error_msg;
381  ASSERT_TRUE(error_msg.empty());
382  // Record the base address.
383  uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
384  // Unmap it.
385  map.reset();
386
387  // Map at the same address, but in page-sized separate mem maps,
388  // assuming the space at the address is still available.
389  std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
390                                                    map_base,
391                                                    kPageSize,
392                                                    PROT_READ | PROT_WRITE,
393                                                    false,
394                                                    false,
395                                                    &error_msg));
396  ASSERT_TRUE(map0.get() != nullptr) << error_msg;
397  ASSERT_TRUE(error_msg.empty());
398  std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
399                                                    map_base + kPageSize,
400                                                    kPageSize,
401                                                    PROT_READ | PROT_WRITE,
402                                                    false,
403                                                    false,
404                                                    &error_msg));
405  ASSERT_TRUE(map1.get() != nullptr) << error_msg;
406  ASSERT_TRUE(error_msg.empty());
407  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
408                                                    map_base + kPageSize * 2,
409                                                    kPageSize,
410                                                    PROT_READ | PROT_WRITE,
411                                                    false,
412                                                    false,
413                                                    &error_msg));
414  ASSERT_TRUE(map2.get() != nullptr) << error_msg;
415  ASSERT_TRUE(error_msg.empty());
416
417  // One-map cases.
418  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
419  ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
420  ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
421
422  // Two or three-map cases.
423  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
424  ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
425  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
426
427  // Unmap the middle one.
428  map1.reset();
429
430  // Should return false now that there's a gap in the middle.
431  ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
432}
433
434TEST_F(MemMapTest, AlignBy) {
435  CommonInit();
436  std::string error_msg;
437  // Cast the page size to size_t.
438  const size_t page_size = static_cast<size_t>(kPageSize);
439  // Map a region.
440  std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
441                                                  nullptr,
442                                                  14 * page_size,
443                                                  PROT_READ | PROT_WRITE,
444                                                  false,
445                                                  false,
446                                                  &error_msg));
447  uint8_t* base0 = m0->Begin();
448  ASSERT_TRUE(base0 != nullptr) << error_msg;
449  ASSERT_EQ(m0->Size(), 14 * page_size);
450  ASSERT_EQ(BaseBegin(m0.get()), base0);
451  ASSERT_EQ(BaseSize(m0.get()), m0->Size());
452
453  // Break it into several regions by using RemapAtEnd.
454  std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
455                                            "MemMapTest_AlignByTest_map1",
456                                            PROT_READ | PROT_WRITE,
457                                            &error_msg));
458  uint8_t* base1 = m1->Begin();
459  ASSERT_TRUE(base1 != nullptr) << error_msg;
460  ASSERT_EQ(base1, base0 + 3 * page_size);
461  ASSERT_EQ(m0->Size(), 3 * page_size);
462
463  std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
464                                            "MemMapTest_AlignByTest_map2",
465                                            PROT_READ | PROT_WRITE,
466                                            &error_msg));
467  uint8_t* base2 = m2->Begin();
468  ASSERT_TRUE(base2 != nullptr) << error_msg;
469  ASSERT_EQ(base2, base1 + 4 * page_size);
470  ASSERT_EQ(m1->Size(), 4 * page_size);
471
472  std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
473                                            "MemMapTest_AlignByTest_map1",
474                                            PROT_READ | PROT_WRITE,
475                                            &error_msg));
476  uint8_t* base3 = m3->Begin();
477  ASSERT_TRUE(base3 != nullptr) << error_msg;
478  ASSERT_EQ(base3, base2 + 3 * page_size);
479  ASSERT_EQ(m2->Size(), 3 * page_size);
480  ASSERT_EQ(m3->Size(), 4 * page_size);
481
482  uint8_t* end0 = base0 + m0->Size();
483  uint8_t* end1 = base1 + m1->Size();
484  uint8_t* end2 = base2 + m2->Size();
485  uint8_t* end3 = base3 + m3->Size();
486
487  ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
488
489  if (IsAlignedParam(base0, 2 * page_size)) {
490    ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
491    ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
492    ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
493    ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
494  } else {
495    ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
496    ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
497    ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
498    ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
499  }
500
501  // Align by 2 * page_size;
502  m0->AlignBy(2 * page_size);
503  m1->AlignBy(2 * page_size);
504  m2->AlignBy(2 * page_size);
505  m3->AlignBy(2 * page_size);
506
507  EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
508  EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
509  EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
510  EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
511
512  EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
513  EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
514  EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
515  EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
516
517  if (IsAlignedParam(base0, 2 * page_size)) {
518    EXPECT_EQ(m0->Begin(), base0);
519    EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
520    EXPECT_EQ(m1->Begin(), base1 + page_size);
521    EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
522    EXPECT_EQ(m2->Begin(), base2 + page_size);
523    EXPECT_EQ(m2->Begin() + m2->Size(), end2);
524    EXPECT_EQ(m3->Begin(), base3);
525    EXPECT_EQ(m3->Begin() + m3->Size(), end3);
526  } else {
527    EXPECT_EQ(m0->Begin(), base0 + page_size);
528    EXPECT_EQ(m0->Begin() + m0->Size(), end0);
529    EXPECT_EQ(m1->Begin(), base1);
530    EXPECT_EQ(m1->Begin() + m1->Size(), end1);
531    EXPECT_EQ(m2->Begin(), base2);
532    EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
533    EXPECT_EQ(m3->Begin(), base3 + page_size);
534    EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
535  }
536}
537
538}  // namespace art
539