1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
18#define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
19
20#include <algorithm>
21#include <memory>
22#include <string>
23
24#include "atomic.h"
25#include "base/logging.h"
26#include "base/macros.h"
27#include "mem_map.h"
28#include "stack.h"
29
30namespace art {
31namespace gc {
32namespace accounting {
33
34// Internal representation is StackReference<T>, so this only works with mirror::Object or it's
35// subclasses.
36template <typename T>
37class AtomicStack {
38 public:
39  class ObjectComparator {
40   public:
41    // These two comparators are for std::binary_search.
42    bool operator()(const T* a, const StackReference<T>& b) const NO_THREAD_SAFETY_ANALYSIS {
43      return a < b.AsMirrorPtr();
44    }
45    bool operator()(const StackReference<T>& a, const T* b) const NO_THREAD_SAFETY_ANALYSIS {
46      return a.AsMirrorPtr() < b;
47    }
48    // This comparator is for std::sort.
49    bool operator()(const StackReference<T>& a, const StackReference<T>& b) const
50        NO_THREAD_SAFETY_ANALYSIS {
51      return a.AsMirrorPtr() < b.AsMirrorPtr();
52    }
53  };
54
55  // Capacity is how many elements we can store in the stack.
56  static AtomicStack* Create(const std::string& name, size_t growth_limit, size_t capacity) {
57    std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, growth_limit, capacity));
58    mark_stack->Init();
59    return mark_stack.release();
60  }
61
62  ~AtomicStack() {}
63
64  void Reset() {
65    DCHECK(mem_map_.get() != nullptr);
66    DCHECK(begin_ != nullptr);
67    front_index_.StoreRelaxed(0);
68    back_index_.StoreRelaxed(0);
69    debug_is_sorted_ = true;
70    mem_map_->MadviseDontNeedAndZero();
71  }
72
73  // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
74
75  // Returns false if we overflowed the stack.
76  bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
77    return AtomicPushBackInternal(value, capacity_);
78  }
79
80  // Returns false if we overflowed the stack.
81  bool AtomicPushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
82    return AtomicPushBackInternal(value, growth_limit_);
83  }
84
85  // Atomically bump the back index by the given number of
86  // slots. Returns false if we overflowed the stack.
87  bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
88                      StackReference<T>** end_address)
89      SHARED_REQUIRES(Locks::mutator_lock_) {
90    if (kIsDebugBuild) {
91      debug_is_sorted_ = false;
92    }
93    int32_t index;
94    int32_t new_index;
95    do {
96      index = back_index_.LoadRelaxed();
97      new_index = index + num_slots;
98      if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
99        // Stack overflow.
100        return false;
101      }
102    } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index));
103    *start_address = begin_ + index;
104    *end_address = begin_ + new_index;
105    if (kIsDebugBuild) {
106      // Sanity check that the memory is zero.
107      for (int32_t i = index; i < new_index; ++i) {
108        DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr))
109            << "i=" << i << " index=" << index << " new_index=" << new_index;
110      }
111    }
112    return true;
113  }
114
115  void AssertAllZero() SHARED_REQUIRES(Locks::mutator_lock_) {
116    if (kIsDebugBuild) {
117      for (size_t i = 0; i < capacity_; ++i) {
118        DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
119      }
120    }
121  }
122
123  void PushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
124    if (kIsDebugBuild) {
125      debug_is_sorted_ = false;
126    }
127    const int32_t index = back_index_.LoadRelaxed();
128    DCHECK_LT(static_cast<size_t>(index), growth_limit_);
129    back_index_.StoreRelaxed(index + 1);
130    begin_[index].Assign(value);
131  }
132
133  T* PopBack() SHARED_REQUIRES(Locks::mutator_lock_) {
134    DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
135    // Decrement the back index non atomically.
136    back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
137    return begin_[back_index_.LoadRelaxed()].AsMirrorPtr();
138  }
139
140  // Take an item from the front of the stack.
141  T PopFront() {
142    int32_t index = front_index_.LoadRelaxed();
143    DCHECK_LT(index, back_index_.LoadRelaxed());
144    front_index_.StoreRelaxed(index + 1);
145    return begin_[index];
146  }
147
148  // Pop a number of elements.
149  void PopBackCount(int32_t n) {
150    DCHECK_GE(Size(), static_cast<size_t>(n));
151    back_index_.FetchAndSubSequentiallyConsistent(n);
152  }
153
154  bool IsEmpty() const {
155    return Size() == 0;
156  }
157
158  bool IsFull() const {
159    return Size() == growth_limit_;
160  }
161
162  size_t Size() const {
163    DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed());
164    return back_index_.LoadRelaxed() - front_index_.LoadRelaxed();
165  }
166
167  StackReference<T>* Begin() const {
168    return begin_ + front_index_.LoadRelaxed();
169  }
170  StackReference<T>* End() const {
171    return begin_ + back_index_.LoadRelaxed();
172  }
173
174  size_t Capacity() const {
175    return capacity_;
176  }
177
178  // Will clear the stack.
179  void Resize(size_t new_capacity) {
180    capacity_ = new_capacity;
181    growth_limit_ = new_capacity;
182    Init();
183  }
184
185  void Sort() {
186    int32_t start_back_index = back_index_.LoadRelaxed();
187    int32_t start_front_index = front_index_.LoadRelaxed();
188    std::sort(Begin(), End(), ObjectComparator());
189    CHECK_EQ(start_back_index, back_index_.LoadRelaxed());
190    CHECK_EQ(start_front_index, front_index_.LoadRelaxed());
191    if (kIsDebugBuild) {
192      debug_is_sorted_ = true;
193    }
194  }
195
196  bool ContainsSorted(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
197    DCHECK(debug_is_sorted_);
198    return std::binary_search(Begin(), End(), value, ObjectComparator());
199  }
200
201  bool Contains(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
202    for (auto cur = Begin(), end = End(); cur != end; ++cur) {
203      if (cur->AsMirrorPtr() == value) {
204        return true;
205      }
206    }
207    return false;
208  }
209
210 private:
211  AtomicStack(const std::string& name, size_t growth_limit, size_t capacity)
212      : name_(name),
213        back_index_(0),
214        front_index_(0),
215        begin_(nullptr),
216        growth_limit_(growth_limit),
217        capacity_(capacity),
218        debug_is_sorted_(true) {
219  }
220
221  // Returns false if we overflowed the stack.
222  bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
223      SHARED_REQUIRES(Locks::mutator_lock_) {
224    if (kIsDebugBuild) {
225      debug_is_sorted_ = false;
226    }
227    int32_t index;
228    do {
229      index = back_index_.LoadRelaxed();
230      if (UNLIKELY(static_cast<size_t>(index) >= limit)) {
231        // Stack overflow.
232        return false;
233      }
234    } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
235    begin_[index].Assign(value);
236    return true;
237  }
238
239  // Size in number of elements.
240  void Init() {
241    std::string error_msg;
242    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
243                                        PROT_READ | PROT_WRITE, false, false, &error_msg));
244    CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
245    uint8_t* addr = mem_map_->Begin();
246    CHECK(addr != nullptr);
247    debug_is_sorted_ = true;
248    begin_ = reinterpret_cast<StackReference<T>*>(addr);
249    Reset();
250  }
251
252  // Name of the mark stack.
253  std::string name_;
254  // Memory mapping of the atomic stack.
255  std::unique_ptr<MemMap> mem_map_;
256  // Back index (index after the last element pushed).
257  AtomicInteger back_index_;
258  // Front index, used for implementing PopFront.
259  AtomicInteger front_index_;
260  // Base of the atomic stack.
261  StackReference<T>* begin_;
262  // Current maximum which we can push back to, must be <= capacity_.
263  size_t growth_limit_;
264  // Maximum number of elements.
265  size_t capacity_;
266  // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
267  bool debug_is_sorted_;
268
269  DISALLOW_COPY_AND_ASSIGN(AtomicStack);
270};
271
272typedef AtomicStack<mirror::Object> ObjectStack;
273
274}  // namespace accounting
275}  // namespace gc
276}  // namespace art
277
278#endif  // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
279