asan_allocator.h revision 321e1254d3bf8d67232aaff133961573fa0e3ec4
1//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// ASan-private header for asan_allocator.cc.
13//===----------------------------------------------------------------------===//
14
15#ifndef ASAN_ALLOCATOR_H
16#define ASAN_ALLOCATOR_H
17
18#include "asan_internal.h"
19#include "asan_interceptors.h"
20
21// We are in the process of transitioning from the old allocator (version 1)
22// to a new one (version 2). The change is quite intrusive so both allocators
23// will co-exist in the source base for a while. The actual allocator is chosen
24// at build time by redefining this macrozz.
25#define ASAN_ALLOCATOR_VERSION 1
26
27namespace __asan {
28
29static const uptr kNumberOfSizeClasses = 255;
30struct AsanChunk;
31
32class AsanChunkView {
33 public:
34  explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
35  bool IsValid() { return chunk_ != 0; }
36  uptr Beg();       // first byte of user memory.
37  uptr End();       // last byte of user memory.
38  uptr UsedSize();  // size requested by the user.
39  uptr AllocTid();
40  uptr FreeTid();
41  void GetAllocStack(StackTrace *stack);
42  void GetFreeStack(StackTrace *stack);
43  bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
44    if (addr >= Beg() && (addr + access_size) <= End()) {
45      *offset = addr - Beg();
46      return true;
47    }
48    return false;
49  }
50  bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
51    if (addr < Beg()) {
52      *offset = Beg() - addr;
53      return true;
54    }
55    return false;
56  }
57  bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
58    if (addr + access_size >= End()) {
59      if (addr <= End())
60        *offset = 0;
61      else
62        *offset = addr - End();
63      return true;
64    }
65    return false;
66  }
67
68 private:
69  AsanChunk *const chunk_;
70};
71
72AsanChunkView FindHeapChunkByAddress(uptr address);
73
74class AsanChunkFifoList {
75 public:
76  explicit AsanChunkFifoList(LinkerInitialized) { }
77  AsanChunkFifoList() { clear(); }
78  void Push(AsanChunk *n);
79  void PushList(AsanChunkFifoList *q);
80  AsanChunk *Pop();
81  uptr size() { return size_; }
82  void clear() {
83    first_ = last_ = 0;
84    size_ = 0;
85  }
86 private:
87  AsanChunk *first_;
88  AsanChunk *last_;
89  uptr size_;
90};
91
92struct AsanThreadLocalMallocStorage {
93  explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
94      : quarantine_(x) { }
95  AsanThreadLocalMallocStorage() {
96    CHECK(REAL(memset));
97    REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
98  }
99
100  AsanChunkFifoList quarantine_;
101  AsanChunk *free_lists_[kNumberOfSizeClasses];
102  void CommitBack();
103};
104
105// Fake stack frame contains local variables of one function.
106// This struct should fit into a stack redzone (32 bytes).
107struct FakeFrame {
108  uptr magic;  // Modified by the instrumented code.
109  uptr descr;  // Modified by the instrumented code.
110  FakeFrame *next;
111  u64 real_stack     : 48;
112  u64 size_minus_one : 16;
113};
114
115struct FakeFrameFifo {
116 public:
117  void FifoPush(FakeFrame *node);
118  FakeFrame *FifoPop();
119 private:
120  FakeFrame *first_, *last_;
121};
122
123class FakeFrameLifo {
124 public:
125  void LifoPush(FakeFrame *node) {
126    node->next = top_;
127    top_ = node;
128  }
129  void LifoPop() {
130    CHECK(top_);
131    top_ = top_->next;
132  }
133  FakeFrame *top() { return top_; }
134 private:
135  FakeFrame *top_;
136};
137
138// For each thread we create a fake stack and place stack objects on this fake
139// stack instead of the real stack. The fake stack is not really a stack but
140// a fast malloc-like allocator so that when a function exits the fake stack
141// is not poped but remains there for quite some time until gets used again.
142// So, we poison the objects on the fake stack when function returns.
143// It helps us find use-after-return bugs.
144// We can not rely on __asan_stack_free being called on every function exit,
145// so we maintain a lifo list of all current fake frames and update it on every
146// call to __asan_stack_malloc.
147class FakeStack {
148 public:
149  FakeStack();
150  explicit FakeStack(LinkerInitialized) {}
151  void Init(uptr stack_size);
152  void StopUsingFakeStack() { alive_ = false; }
153  void Cleanup();
154  uptr AllocateStack(uptr size, uptr real_stack);
155  static void OnFree(uptr ptr, uptr size, uptr real_stack);
156  // Return the bottom of the maped region.
157  uptr AddrIsInFakeStack(uptr addr);
158  bool StackSize() { return stack_size_; }
159
160 private:
161  static const uptr kMinStackFrameSizeLog = 9;  // Min frame is 512B.
162  static const uptr kMaxStackFrameSizeLog = 16;  // Max stack frame is 64K.
163  static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
164  static const uptr kNumberOfSizeClasses =
165      kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
166
167  bool AddrIsInSizeClass(uptr addr, uptr size_class);
168
169  // Each size class should be large enough to hold all frames.
170  uptr ClassMmapSize(uptr size_class);
171
172  uptr ClassSize(uptr size_class) {
173    return 1UL << (size_class + kMinStackFrameSizeLog);
174  }
175
176  void DeallocateFrame(FakeFrame *fake_frame);
177
178  uptr ComputeSizeClass(uptr alloc_size);
179  void AllocateOneSizeClass(uptr size_class);
180
181  uptr stack_size_;
182  bool   alive_;
183
184  uptr allocated_size_classes_[kNumberOfSizeClasses];
185  FakeFrameFifo size_classes_[kNumberOfSizeClasses];
186  FakeFrameLifo call_stack_;
187};
188
189void *asan_memalign(uptr alignment, uptr size, StackTrace *stack);
190void asan_free(void *ptr, StackTrace *stack);
191
192void *asan_malloc(uptr size, StackTrace *stack);
193void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
194void *asan_realloc(void *p, uptr size, StackTrace *stack);
195void *asan_valloc(uptr size, StackTrace *stack);
196void *asan_pvalloc(uptr size, StackTrace *stack);
197
198int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
199                          StackTrace *stack);
200uptr asan_malloc_usable_size(void *ptr, StackTrace *stack);
201
202uptr asan_mz_size(const void *ptr);
203void asan_mz_force_lock();
204void asan_mz_force_unlock();
205
206// Log2 and RoundUpToPowerOfTwo should be inlined for performance.
207#if defined(_WIN32) && !defined(__clang__)
208#include <intrin.h>
209#endif
210
211static inline uptr Log2(uptr x) {
212  CHECK(IsPowerOfTwo(x));
213#if !defined(_WIN32) || defined(__clang__)
214  return __builtin_ctzl(x);
215#elif defined(_WIN64)
216  unsigned long ret;  // NOLINT
217  _BitScanForward64(&ret, x);
218  return ret;
219#else
220  unsigned long ret;  // NOLINT
221  _BitScanForward(&ret, x);
222  return ret;
223#endif
224}
225
226static inline uptr RoundUpToPowerOfTwo(uptr size) {
227  CHECK(size);
228  if (IsPowerOfTwo(size)) return size;
229
230  unsigned long up;  // NOLINT
231#if !defined(_WIN32) || defined(__clang__)
232  up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
233#elif defined(_WIN64)
234  _BitScanReverse64(&up, size);
235#else
236  _BitScanReverse(&up, size);
237#endif
238  CHECK(size < (1ULL << (up + 1)));
239  CHECK(size > (1ULL << up));
240  return 1UL << (up + 1);
241}
242
243
244}  // namespace __asan
245#endif  // ASAN_ALLOCATOR_H
246