1//===-- sanitizer_stackdepot.cc -------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_stackdepot.h"
15
16#include "sanitizer_common.h"
17#include "sanitizer_stackdepotbase.h"
18
19namespace __sanitizer {
20
21struct StackDepotDesc {
22  const uptr *stack;
23  uptr size;
24  u32 hash() const {
25    // murmur2
26    const u32 m = 0x5bd1e995;
27    const u32 seed = 0x9747b28c;
28    const u32 r = 24;
29    u32 h = seed ^ (size * sizeof(uptr));
30    for (uptr i = 0; i < size; i++) {
31      u32 k = stack[i];
32      k *= m;
33      k ^= k >> r;
34      k *= m;
35      h *= m;
36      h ^= k;
37    }
38    h ^= h >> 13;
39    h *= m;
40    h ^= h >> 15;
41    return h;
42  }
43  bool is_valid() { return size > 0 && stack; }
44};
45
46struct StackDepotNode {
47  StackDepotNode *link;
48  u32 id;
49  atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
50  uptr size;
51  uptr stack[1];  // [size]
52
53  static const u32 kTabSizeLog = 20;
54  // Lower kTabSizeLog bits are equal for all items in one bucket.
55  // We use these bits to store the per-stack use counter.
56  static const u32 kUseCountBits = kTabSizeLog;
57  static const u32 kMaxUseCount = 1 << kUseCountBits;
58  static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
59  static const u32 kHashMask = ~kUseCountMask;
60
61  typedef StackDepotDesc args_type;
62  bool eq(u32 hash, const args_type &args) const {
63    u32 hash_bits =
64        atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
65    if ((hash & kHashMask) != hash_bits || args.size != size) return false;
66    uptr i = 0;
67    for (; i < size; i++) {
68      if (stack[i] != args.stack[i]) return false;
69    }
70    return true;
71  }
72  static uptr storage_size(const args_type &args) {
73    return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
74  }
75  void store(const args_type &args, u32 hash) {
76    atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
77    size = args.size;
78    internal_memcpy(stack, args.stack, size * sizeof(uptr));
79  }
80  args_type load() const {
81    args_type ret = {&stack[0], size};
82    return ret;
83  }
84  StackDepotHandle get_handle() { return StackDepotHandle(this); }
85
86  typedef StackDepotHandle handle_type;
87};
88
89COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
90
91u32 StackDepotHandle::id() { return node_->id; }
92int StackDepotHandle::use_count() {
93  return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
94         StackDepotNode::kUseCountMask;
95}
96void StackDepotHandle::inc_use_count_unsafe() {
97  u32 prev =
98      atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
99      StackDepotNode::kUseCountMask;
100  CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
101}
102uptr StackDepotHandle::size() { return node_->size; }
103uptr *StackDepotHandle::stack() { return &node_->stack[0]; }
104
105// FIXME(dvyukov): this single reserved bit is used in TSan.
106typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
107    StackDepot;
108static StackDepot theDepot;
109
110StackDepotStats *StackDepotGetStats() {
111  return theDepot.GetStats();
112}
113
114u32 StackDepotPut(const uptr *stack, uptr size) {
115  StackDepotDesc desc = {stack, size};
116  StackDepotHandle h = theDepot.Put(desc);
117  return h.valid() ? h.id() : 0;
118}
119
120StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) {
121  StackDepotDesc desc = {stack, size};
122  return theDepot.Put(desc);
123}
124
125const uptr *StackDepotGet(u32 id, uptr *size) {
126  StackDepotDesc desc = theDepot.Get(id);
127  *size = desc.size;
128  return desc.stack;
129}
130
131bool StackDepotReverseMap::IdDescPair::IdComparator(
132    const StackDepotReverseMap::IdDescPair &a,
133    const StackDepotReverseMap::IdDescPair &b) {
134  return a.id < b.id;
135}
136
137StackDepotReverseMap::StackDepotReverseMap()
138    : map_(StackDepotGetStats()->n_uniq_ids + 100) {
139  for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
140    atomic_uintptr_t *p = &theDepot.tab[idx];
141    uptr v = atomic_load(p, memory_order_consume);
142    StackDepotNode *s = (StackDepotNode*)(v & ~1);
143    for (; s; s = s->link) {
144      IdDescPair pair = {s->id, s};
145      map_.push_back(pair);
146    }
147  }
148  InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
149}
150
151const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
152  if (!map_.size()) return 0;
153  IdDescPair pair = {id, 0};
154  uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
155                                  IdDescPair::IdComparator);
156  if (idx > map_.size()) {
157    *size = 0;
158    return 0;
159  }
160  StackDepotNode *desc = map_[idx].desc;
161  *size = desc->size;
162  return desc->stack;
163}
164
165}  // namespace __sanitizer
166