mem_map.h revision 625a64aad13905d8a2454bf3cc0e874487b110d5
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MEM_MAP_H_
18#define ART_RUNTIME_MEM_MAP_H_
19
20#include "base/mutex.h"
21
22#include <string>
23#include <map>
24
25#include <stddef.h>
26#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
27#include <sys/types.h>
28
29#include "base/allocator.h"
30#include "globals.h"
31
32namespace art {
33
34#if defined(__LP64__) && (!defined(__x86_64__) || defined(__APPLE__))
35#define USE_ART_LOW_4G_ALLOCATOR 1
36#else
37#define USE_ART_LOW_4G_ALLOCATOR 0
38#endif
39
40#ifdef __linux__
41static constexpr bool kMadviseZeroes = true;
42#else
43static constexpr bool kMadviseZeroes = false;
44#endif
45
46// Used to keep track of mmap segments.
47//
48// On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
49// for free pages. For security, the start of this scan should be randomized. This requires a
50// dynamic initializer.
51// For this to work, it is paramount that there are no other static initializers that access MemMap.
52// Otherwise, calls might see uninitialized values.
53class MemMap {
54 public:
55  // Request an anonymous region of length 'byte_count' and a requested base address.
56  // Use null as the requested base address if you don't care.
57  // "reuse" allows re-mapping an address range from an existing mapping.
58  //
59  // The word "anonymous" in this context means "not backed by a file". The supplied
60  // 'name' will be used -- on systems that support it -- to give the mapping
61  // a name.
62  //
63  // On success, returns returns a MemMap instance.  On failure, returns null.
64  static MemMap* MapAnonymous(const char* name,
65                              uint8_t* addr,
66                              size_t byte_count,
67                              int prot,
68                              bool low_4gb,
69                              bool reuse,
70                              std::string* error_msg,
71                              bool use_ashmem = true);
72
73  // Create placeholder for a region allocated by direct call to mmap.
74  // This is useful when we do not have control over the code calling mmap,
75  // but when we still want to keep track of it in the list.
76  // The region is not considered to be owned and will not be unmmaped.
77  static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
78
79  // Map part of a file, taking care of non-page aligned offsets.  The
80  // "start" offset is absolute, not relative.
81  //
82  // On success, returns returns a MemMap instance.  On failure, returns null.
83  static MemMap* MapFile(size_t byte_count,
84                         int prot,
85                         int flags,
86                         int fd,
87                         off_t start,
88                         bool low_4gb,
89                         const char* filename,
90                         std::string* error_msg) {
91    return MapFileAtAddress(nullptr,
92                            byte_count,
93                            prot,
94                            flags,
95                            fd,
96                            start,
97                            /*low_4gb*/low_4gb,
98                            /*reuse*/false,
99                            filename,
100                            error_msg);
101  }
102
103  // Map part of a file, taking care of non-page aligned offsets.  The "start" offset is absolute,
104  // not relative. This version allows requesting a specific address for the base of the mapping.
105  // "reuse" allows us to create a view into an existing mapping where we do not take ownership of
106  // the memory. If error_msg is null then we do not print /proc/maps to the log if
107  // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
108  // printing /proc/maps takes several milliseconds in the worst case.
109  //
110  // On success, returns returns a MemMap instance.  On failure, returns null.
111  static MemMap* MapFileAtAddress(uint8_t* addr,
112                                  size_t byte_count,
113                                  int prot,
114                                  int flags,
115                                  int fd,
116                                  off_t start,
117                                  bool low_4gb,
118                                  bool reuse,
119                                  const char* filename,
120                                  std::string* error_msg);
121
122  // Releases the memory mapping.
123  ~MemMap() REQUIRES(!Locks::mem_maps_lock_);
124
125  const std::string& GetName() const {
126    return name_;
127  }
128
129  bool Sync();
130
131  bool Protect(int prot);
132
133  void MadviseDontNeedAndZero();
134
135  int GetProtect() const {
136    return prot_;
137  }
138
139  uint8_t* Begin() const {
140    return begin_;
141  }
142
143  size_t Size() const {
144    return size_;
145  }
146
147  // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
148  void SetSize(size_t new_size);
149
150  uint8_t* End() const {
151    return Begin() + Size();
152  }
153
154  void* BaseBegin() const {
155    return base_begin_;
156  }
157
158  size_t BaseSize() const {
159    return base_size_;
160  }
161
162  void* BaseEnd() const {
163    return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
164  }
165
166  bool HasAddress(const void* addr) const {
167    return Begin() <= addr && addr < End();
168  }
169
170  // Unmap the pages at end and remap them to create another memory map.
171  MemMap* RemapAtEnd(uint8_t* new_end,
172                     const char* tail_name,
173                     int tail_prot,
174                     std::string* error_msg,
175                     bool use_ashmem = true);
176
177  static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
178      REQUIRES(!Locks::mem_maps_lock_);
179  static void DumpMaps(std::ostream& os, bool terse = false)
180      REQUIRES(!Locks::mem_maps_lock_);
181
182  typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
183
184  static void Init() REQUIRES(!Locks::mem_maps_lock_);
185  static void Shutdown() REQUIRES(!Locks::mem_maps_lock_);
186
187 private:
188  MemMap(const std::string& name,
189         uint8_t* begin,
190         size_t size,
191         void* base_begin,
192         size_t base_size,
193         int prot,
194         bool reuse,
195         size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_);
196
197  static void DumpMapsLocked(std::ostream& os, bool terse)
198      REQUIRES(Locks::mem_maps_lock_);
199  static bool HasMemMap(MemMap* map)
200      REQUIRES(Locks::mem_maps_lock_);
201  static MemMap* GetLargestMemMapAt(void* address)
202      REQUIRES(Locks::mem_maps_lock_);
203  static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
204      REQUIRES(!Locks::mem_maps_lock_);
205
206  // Internal version of mmap that supports low 4gb emulation.
207  static void* MapInternal(void* addr,
208                           size_t length,
209                           int prot,
210                           int flags,
211                           int fd,
212                           off_t offset,
213                           bool low_4gb);
214
215  const std::string name_;
216  uint8_t* const begin_;  // Start of data.
217  size_t size_;  // Length of data.
218
219  void* const base_begin_;  // Page-aligned base address.
220  size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
221  int prot_;  // Protection of the map.
222
223  // When reuse_ is true, this is just a view of an existing mapping
224  // and we do not take ownership and are not responsible for
225  // unmapping.
226  const bool reuse_;
227
228  const size_t redzone_size_;
229
230#if USE_ART_LOW_4G_ALLOCATOR
231  static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
232#endif
233
234  // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
235  static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
236
237  friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
238};
239std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
240std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps);
241
242}  // namespace art
243
244#endif  // ART_RUNTIME_MEM_MAP_H_
245