1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
18#define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
19
20#include <algorithm>
21#include <memory>
22#include <string>
23
24#include "atomic.h"
25#include "base/bit_utils.h"
26#include "base/logging.h"
27#include "base/macros.h"
28#include "mem_map.h"
29#include "stack.h"
30
31namespace art {
32namespace gc {
33namespace accounting {
34
35// Internal representation is StackReference<T>, so this only works with mirror::Object or it's
36// subclasses.
37template <typename T>
38class AtomicStack {
39 public:
40  class ObjectComparator {
41   public:
42    // These two comparators are for std::binary_search.
43    bool operator()(const T* a, const StackReference<T>& b) const NO_THREAD_SAFETY_ANALYSIS {
44      return a < b.AsMirrorPtr();
45    }
46    bool operator()(const StackReference<T>& a, const T* b) const NO_THREAD_SAFETY_ANALYSIS {
47      return a.AsMirrorPtr() < b;
48    }
49    // This comparator is for std::sort.
50    bool operator()(const StackReference<T>& a, const StackReference<T>& b) const
51        NO_THREAD_SAFETY_ANALYSIS {
52      return a.AsMirrorPtr() < b.AsMirrorPtr();
53    }
54  };
55
56  // Capacity is how many elements we can store in the stack.
57  static AtomicStack* Create(const std::string& name, size_t growth_limit, size_t capacity) {
58    std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, growth_limit, capacity));
59    mark_stack->Init();
60    return mark_stack.release();
61  }
62
63  ~AtomicStack() {}
64
65  void Reset() {
66    DCHECK(mem_map_.get() != nullptr);
67    DCHECK(begin_ != nullptr);
68    front_index_.StoreRelaxed(0);
69    back_index_.StoreRelaxed(0);
70    debug_is_sorted_ = true;
71    mem_map_->MadviseDontNeedAndZero();
72  }
73
74  // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
75
76  // Returns false if we overflowed the stack.
77  bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
78    return AtomicPushBackInternal(value, capacity_);
79  }
80
81  // Returns false if we overflowed the stack.
82  bool AtomicPushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
83    return AtomicPushBackInternal(value, growth_limit_);
84  }
85
86  // Atomically bump the back index by the given number of
87  // slots. Returns false if we overflowed the stack.
88  bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
89                      StackReference<T>** end_address)
90      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91    if (kIsDebugBuild) {
92      debug_is_sorted_ = false;
93    }
94    int32_t index;
95    int32_t new_index;
96    do {
97      index = back_index_.LoadRelaxed();
98      new_index = index + num_slots;
99      if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
100        // Stack overflow.
101        return false;
102      }
103    } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index));
104    *start_address = begin_ + index;
105    *end_address = begin_ + new_index;
106    if (kIsDebugBuild) {
107      // Sanity check that the memory is zero.
108      for (int32_t i = index; i < new_index; ++i) {
109        DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr))
110            << "i=" << i << " index=" << index << " new_index=" << new_index;
111      }
112    }
113    return true;
114  }
115
116  void AssertAllZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
117    if (kIsDebugBuild) {
118      for (size_t i = 0; i < capacity_; ++i) {
119        DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
120      }
121    }
122  }
123
124  void PushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
125    if (kIsDebugBuild) {
126      debug_is_sorted_ = false;
127    }
128    const int32_t index = back_index_.LoadRelaxed();
129    DCHECK_LT(static_cast<size_t>(index), growth_limit_);
130    back_index_.StoreRelaxed(index + 1);
131    begin_[index].Assign(value);
132  }
133
134  T* PopBack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
135    DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
136    // Decrement the back index non atomically.
137    back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
138    return begin_[back_index_.LoadRelaxed()].AsMirrorPtr();
139  }
140
141  // Take an item from the front of the stack.
142  T PopFront() {
143    int32_t index = front_index_.LoadRelaxed();
144    DCHECK_LT(index, back_index_.LoadRelaxed());
145    front_index_.StoreRelaxed(index + 1);
146    return begin_[index];
147  }
148
149  // Pop a number of elements.
150  void PopBackCount(int32_t n) {
151    DCHECK_GE(Size(), static_cast<size_t>(n));
152    back_index_.FetchAndSubSequentiallyConsistent(n);
153  }
154
155  bool IsEmpty() const {
156    return Size() == 0;
157  }
158
159  size_t Size() const {
160    DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed());
161    return back_index_.LoadRelaxed() - front_index_.LoadRelaxed();
162  }
163
164  StackReference<T>* Begin() const {
165    return begin_ + front_index_.LoadRelaxed();
166  }
167  StackReference<T>* End() const {
168    return begin_ + back_index_.LoadRelaxed();
169  }
170
171  size_t Capacity() const {
172    return capacity_;
173  }
174
175  // Will clear the stack.
176  void Resize(size_t new_capacity) {
177    capacity_ = new_capacity;
178    growth_limit_ = new_capacity;
179    Init();
180  }
181
182  void Sort() {
183    int32_t start_back_index = back_index_.LoadRelaxed();
184    int32_t start_front_index = front_index_.LoadRelaxed();
185    std::sort(Begin(), End(), ObjectComparator());
186    CHECK_EQ(start_back_index, back_index_.LoadRelaxed());
187    CHECK_EQ(start_front_index, front_index_.LoadRelaxed());
188    if (kIsDebugBuild) {
189      debug_is_sorted_ = true;
190    }
191  }
192
193  bool ContainsSorted(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
194    DCHECK(debug_is_sorted_);
195    return std::binary_search(Begin(), End(), value, ObjectComparator());
196  }
197
198  bool Contains(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
199    for (auto cur = Begin(), end = End(); cur != end; ++cur) {
200      if (cur->AsMirrorPtr() == value) {
201        return true;
202      }
203    }
204    return false;
205  }
206
207 private:
208  AtomicStack(const std::string& name, size_t growth_limit, size_t capacity)
209      : name_(name),
210        back_index_(0),
211        front_index_(0),
212        begin_(nullptr),
213        growth_limit_(growth_limit),
214        capacity_(capacity),
215        debug_is_sorted_(true) {
216  }
217
218  // Returns false if we overflowed the stack.
219  bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
220      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
221    if (kIsDebugBuild) {
222      debug_is_sorted_ = false;
223    }
224    int32_t index;
225    do {
226      index = back_index_.LoadRelaxed();
227      if (UNLIKELY(static_cast<size_t>(index) >= limit)) {
228        // Stack overflow.
229        return false;
230      }
231    } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
232    begin_[index].Assign(value);
233    return true;
234  }
235
236  // Size in number of elements.
237  void Init() {
238    std::string error_msg;
239    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
240                                        PROT_READ | PROT_WRITE, false, false, &error_msg));
241    CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
242    uint8_t* addr = mem_map_->Begin();
243    CHECK(addr != nullptr);
244    debug_is_sorted_ = true;
245    begin_ = reinterpret_cast<StackReference<T>*>(addr);
246    Reset();
247  }
248
249  // Name of the mark stack.
250  std::string name_;
251  // Memory mapping of the atomic stack.
252  std::unique_ptr<MemMap> mem_map_;
253  // Back index (index after the last element pushed).
254  AtomicInteger back_index_;
255  // Front index, used for implementing PopFront.
256  AtomicInteger front_index_;
257  // Base of the atomic stack.
258  StackReference<T>* begin_;
259  // Current maximum which we can push back to, must be <= capacity_.
260  size_t growth_limit_;
261  // Maximum number of elements.
262  size_t capacity_;
263  // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
264  bool debug_is_sorted_;
265
266  DISALLOW_COPY_AND_ASSIGN(AtomicStack);
267};
268
269typedef AtomicStack<mirror::Object> ObjectStack;
270
271}  // namespace accounting
272}  // namespace gc
273}  // namespace art
274
275#endif  // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
276