mem_map.h revision bddaea2b88b0a19d9cc7a4dea772af8e829323b3
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MEM_MAP_H_
18#define ART_RUNTIME_MEM_MAP_H_
19
20#include "base/mutex.h"
21
22#include <string>
23#include <map>
24
25#include <stddef.h>
26#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
27#include <sys/types.h>
28
29#include "globals.h"
30
31namespace art {
32
33#if defined(__LP64__) && (!defined(__x86_64__) || defined(__APPLE__))
34#define USE_ART_LOW_4G_ALLOCATOR 1
35#else
36#define USE_ART_LOW_4G_ALLOCATOR 0
37#endif
38
39#ifdef __linux__
40static constexpr bool kMadviseZeroes = true;
41#else
42static constexpr bool kMadviseZeroes = false;
43#endif
44
45// Used to keep track of mmap segments.
46//
47// On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
48// for free pages. For security, the start of this scan should be randomized. This requires a
49// dynamic initializer.
50// For this to work, it is paramount that there are no other static initializers that access MemMap.
51// Otherwise, calls might see uninitialized values.
52class MemMap {
53 public:
54  // Request an anonymous region of length 'byte_count' and a requested base address.
55  // Use NULL as the requested base address if you don't care.
56  //
57  // The word "anonymous" in this context means "not backed by a file". The supplied
58  // 'ashmem_name' will be used -- on systems that support it -- to give the mapping
59  // a name.
60  //
61  // On success, returns returns a MemMap instance.  On failure, returns a NULL;
62  static MemMap* MapAnonymous(const char* ashmem_name, byte* addr, size_t byte_count, int prot,
63                              bool low_4gb, std::string* error_msg);
64
65  // Map part of a file, taking care of non-page aligned offsets.  The
66  // "start" offset is absolute, not relative.
67  //
68  // On success, returns returns a MemMap instance.  On failure, returns a NULL;
69  static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start,
70                         const char* filename, std::string* error_msg) {
71    return MapFileAtAddress(NULL, byte_count, prot, flags, fd, start, false, filename, error_msg);
72  }
73
74  // Map part of a file, taking care of non-page aligned offsets.  The
75  // "start" offset is absolute, not relative. This version allows
76  // requesting a specific address for the base of the
77  // mapping. "reuse" allows us to create a view into an existing
78  // mapping where we do not take ownership of the memory.
79  //
80  // On success, returns returns a MemMap instance.  On failure, returns a
81  // nullptr;
82  static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
83                                  off_t start, bool reuse, const char* filename,
84                                  std::string* error_msg);
85
86  // Releases the memory mapping
87  ~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
88
89  const std::string& GetName() const {
90    return name_;
91  }
92
93  bool Protect(int prot);
94
95  void MadviseDontNeedAndZero();
96
97  int GetProtect() const {
98    return prot_;
99  }
100
101  byte* Begin() const {
102    return begin_;
103  }
104
105  size_t Size() const {
106    return size_;
107  }
108
109  byte* End() const {
110    return Begin() + Size();
111  }
112
113  void* BaseBegin() const {
114    return base_begin_;
115  }
116
117  size_t BaseSize() const {
118    return base_size_;
119  }
120
121  void* BaseEnd() const {
122    return reinterpret_cast<byte*>(BaseBegin()) + BaseSize();
123  }
124
125  bool HasAddress(const void* addr) const {
126    return Begin() <= addr && addr < End();
127  }
128
129  // Unmap the pages at end and remap them to create another memory map.
130  MemMap* RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
131                     std::string* error_msg);
132
133  static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
134      LOCKS_EXCLUDED(Locks::mem_maps_lock_);
135  static void DumpMaps(std::ostream& os)
136      LOCKS_EXCLUDED(Locks::mem_maps_lock_);
137
138 private:
139  MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
140         int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
141
142  static void DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
143      LOCKS_EXCLUDED(Locks::mem_maps_lock_);
144  static void DumpMapsLocked(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
145      EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
146  static bool HasMemMap(MemMap* map)
147      EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
148  static MemMap* GetLargestMemMapAt(void* address)
149      EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
150
151  const std::string name_;
152  byte* const begin_;  // Start of data.
153  size_t size_;  // Length of data.
154
155  void* const base_begin_;  // Page-aligned base address.
156  size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
157  int prot_;  // Protection of the map.
158
159  // When reuse_ is true, this is just a view of an existing mapping
160  // and we do not take ownership and are not responsible for
161  // unmapping.
162  const bool reuse_;
163
164#if USE_ART_LOW_4G_ALLOCATOR
165  static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
166#endif
167
168  // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
169  static std::multimap<void*, MemMap*> maps_ GUARDED_BY(Locks::mem_maps_lock_);
170
171  friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
172};
173std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
174
175}  // namespace art
176
177#endif  // ART_RUNTIME_MEM_MAP_H_
178