1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
18#define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
19
20#include <algorithm>
21#include <memory>
22#include <string>
23
24#include "atomic.h"
25#include "base/logging.h"
26#include "base/macros.h"
27#include "mem_map.h"
28#include "utils.h"
29
30namespace art {
31namespace gc {
32namespace accounting {
33
34template <typename T>
35class AtomicStack {
36 public:
37  // Capacity is how many elements we can store in the stack.
38  static AtomicStack* Create(const std::string& name, size_t growth_limit, size_t capacity) {
39    std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, growth_limit, capacity));
40    mark_stack->Init();
41    return mark_stack.release();
42  }
43
44  ~AtomicStack() {}
45
46  void Reset() {
47    DCHECK(mem_map_.get() != nullptr);
48    DCHECK(begin_ != NULL);
49    front_index_.StoreRelaxed(0);
50    back_index_.StoreRelaxed(0);
51    debug_is_sorted_ = true;
52    mem_map_->MadviseDontNeedAndZero();
53  }
54
55  // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
56
57  // Returns false if we overflowed the stack.
58  bool AtomicPushBackIgnoreGrowthLimit(const T& value) {
59    return AtomicPushBackInternal(value, capacity_);
60  }
61
62  // Returns false if we overflowed the stack.
63  bool AtomicPushBack(const T& value) {
64    return AtomicPushBackInternal(value, growth_limit_);
65  }
66
67  // Atomically bump the back index by the given number of
68  // slots. Returns false if we overflowed the stack.
69  bool AtomicBumpBack(size_t num_slots, T** start_address, T** end_address) {
70    if (kIsDebugBuild) {
71      debug_is_sorted_ = false;
72    }
73    int32_t index;
74    int32_t new_index;
75    do {
76      index = back_index_.LoadRelaxed();
77      new_index = index + num_slots;
78      if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
79        // Stack overflow.
80        return false;
81      }
82    } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index));
83    *start_address = &begin_[index];
84    *end_address = &begin_[new_index];
85    if (kIsDebugBuild) {
86      // Sanity check that the memory is zero.
87      for (int32_t i = index; i < new_index; ++i) {
88        DCHECK_EQ(begin_[i], static_cast<T>(0))
89            << "i=" << i << " index=" << index << " new_index=" << new_index;
90      }
91    }
92    return true;
93  }
94
95  void AssertAllZero() {
96    if (kIsDebugBuild) {
97      for (size_t i = 0; i < capacity_; ++i) {
98        DCHECK_EQ(begin_[i], static_cast<T>(0)) << "i=" << i;
99      }
100    }
101  }
102
103  void PushBack(const T& value) {
104    if (kIsDebugBuild) {
105      debug_is_sorted_ = false;
106    }
107    int32_t index = back_index_.LoadRelaxed();
108    DCHECK_LT(static_cast<size_t>(index), growth_limit_);
109    back_index_.StoreRelaxed(index + 1);
110    begin_[index] = value;
111  }
112
113  T PopBack() {
114    DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
115    // Decrement the back index non atomically.
116    back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
117    return begin_[back_index_.LoadRelaxed()];
118  }
119
120  // Take an item from the front of the stack.
121  T PopFront() {
122    int32_t index = front_index_.LoadRelaxed();
123    DCHECK_LT(index, back_index_.LoadRelaxed());
124    front_index_.StoreRelaxed(index + 1);
125    return begin_[index];
126  }
127
128  // Pop a number of elements.
129  void PopBackCount(int32_t n) {
130    DCHECK_GE(Size(), static_cast<size_t>(n));
131    back_index_.FetchAndSubSequentiallyConsistent(n);
132  }
133
134  bool IsEmpty() const {
135    return Size() == 0;
136  }
137
138  size_t Size() const {
139    DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed());
140    return back_index_.LoadRelaxed() - front_index_.LoadRelaxed();
141  }
142
143  T* Begin() const {
144    return const_cast<T*>(begin_ + front_index_.LoadRelaxed());
145  }
146
147  T* End() const {
148    return const_cast<T*>(begin_ + back_index_.LoadRelaxed());
149  }
150
151  size_t Capacity() const {
152    return capacity_;
153  }
154
155  // Will clear the stack.
156  void Resize(size_t new_capacity) {
157    capacity_ = new_capacity;
158    growth_limit_ = new_capacity;
159    Init();
160  }
161
162  void Sort() {
163    int32_t start_back_index = back_index_.LoadRelaxed();
164    int32_t start_front_index = front_index_.LoadRelaxed();
165    std::sort(Begin(), End());
166    CHECK_EQ(start_back_index, back_index_.LoadRelaxed());
167    CHECK_EQ(start_front_index, front_index_.LoadRelaxed());
168    if (kIsDebugBuild) {
169      debug_is_sorted_ = true;
170    }
171  }
172
173  bool ContainsSorted(const T& value) const {
174    DCHECK(debug_is_sorted_);
175    return std::binary_search(Begin(), End(), value);
176  }
177
178  bool Contains(const T& value) const {
179    return std::find(Begin(), End(), value) != End();
180  }
181
182 private:
183  AtomicStack(const std::string& name, size_t growth_limit, size_t capacity)
184      : name_(name),
185        back_index_(0),
186        front_index_(0),
187        begin_(nullptr),
188        growth_limit_(growth_limit),
189        capacity_(capacity),
190        debug_is_sorted_(true) {
191  }
192
193  // Returns false if we overflowed the stack.
194  bool AtomicPushBackInternal(const T& value, size_t limit) ALWAYS_INLINE {
195    if (kIsDebugBuild) {
196      debug_is_sorted_ = false;
197    }
198    int32_t index;
199    do {
200      index = back_index_.LoadRelaxed();
201      if (UNLIKELY(static_cast<size_t>(index) >= limit)) {
202        // Stack overflow.
203        return false;
204      }
205    } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
206    begin_[index] = value;
207    return true;
208  }
209
210  // Size in number of elements.
211  void Init() {
212    std::string error_msg;
213    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T),
214                                        PROT_READ | PROT_WRITE, false, &error_msg));
215    CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
216    byte* addr = mem_map_->Begin();
217    CHECK(addr != NULL);
218    debug_is_sorted_ = true;
219    begin_ = reinterpret_cast<T*>(addr);
220    Reset();
221  }
222
223  // Name of the mark stack.
224  std::string name_;
225  // Memory mapping of the atomic stack.
226  std::unique_ptr<MemMap> mem_map_;
227  // Back index (index after the last element pushed).
228  AtomicInteger back_index_;
229  // Front index, used for implementing PopFront.
230  AtomicInteger front_index_;
231  // Base of the atomic stack.
232  T* begin_;
233  // Current maximum which we can push back to, must be <= capacity_.
234  size_t growth_limit_;
235  // Maximum number of elements.
236  size_t capacity_;
237  // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
238  bool debug_is_sorted_;
239
240  DISALLOW_COPY_AND_ASSIGN(AtomicStack);
241};
242
243typedef AtomicStack<mirror::Object*> ObjectStack;
244
245}  // namespace accounting
246}  // namespace gc
247}  // namespace art
248
249#endif  // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
250