indirect_reference_table.h revision ea2e1bd713ca8295ba4fcd01e77a3ce532ea61e4
1/* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 18#define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 19 20#include <stdint.h> 21 22#include <iosfwd> 23#include <string> 24 25#include "base/logging.h" 26#include "base/mutex.h" 27#include "mem_map.h" 28#include "object_callbacks.h" 29#include "offsets.h" 30#include "read_barrier.h" 31 32namespace art { 33namespace mirror { 34class Object; 35} // namespace mirror 36 37/* 38 * Maintain a table of indirect references. Used for local/global JNI 39 * references. 40 * 41 * The table contains object references that are part of the GC root set. 42 * When an object is added we return an IndirectRef that is not a valid 43 * pointer but can be used to find the original value in O(1) time. 44 * Conversions to and from indirect references are performed on upcalls 45 * and downcalls, so they need to be very fast. 46 * 47 * To be efficient for JNI local variable storage, we need to provide 48 * operations that allow us to operate on segments of the table, where 49 * segments are pushed and popped as if on a stack. For example, deletion 50 * of an entry should only succeed if it appears in the current segment, 51 * and we want to be able to strip off the current segment quickly when 52 * a method returns. Additions to the table must be made in the current 53 * segment even if space is available in an earlier area. 54 * 55 * A new segment is created when we call into native code from interpreted 56 * code, or when we handle the JNI PushLocalFrame function. 57 * 58 * The GC must be able to scan the entire table quickly. 59 * 60 * In summary, these must be very fast: 61 * - adding or removing a segment 62 * - adding references to a new segment 63 * - converting an indirect reference back to an Object 64 * These can be a little slower, but must still be pretty quick: 65 * - adding references to a "mature" segment 66 * - removing individual references 67 * - scanning the entire table straight through 68 * 69 * If there's more than one segment, we don't guarantee that the table 70 * will fill completely before we fail due to lack of space. We do ensure 71 * that the current segment will pack tightly, which should satisfy JNI 72 * requirements (e.g. EnsureLocalCapacity). 73 * 74 * To make everything fit nicely in 32-bit integers, the maximum size of 75 * the table is capped at 64K. 76 * 77 * Only SynchronizedGet is synchronized. 78 */ 79 80/* 81 * Indirect reference definition. This must be interchangeable with JNI's 82 * jobject, and it's convenient to let null be null, so we use void*. 83 * 84 * We need a 16-bit table index and a 2-bit reference type (global, local, 85 * weak global). Real object pointers will have zeroes in the low 2 or 3 86 * bits (4- or 8-byte alignment), so it's useful to put the ref type 87 * in the low bits and reserve zero as an invalid value. 88 * 89 * The remaining 14 bits can be used to detect stale indirect references. 90 * For example, if objects don't move, we can use a hash of the original 91 * Object* to make sure the entry hasn't been re-used. (If the Object* 92 * we find there doesn't match because of heap movement, we could do a 93 * secondary check on the preserved hash value; this implies that creating 94 * a global/local ref queries the hash value and forces it to be saved.) 95 * 96 * A more rigorous approach would be to put a serial number in the extra 97 * bits, and keep a copy of the serial number in a parallel table. This is 98 * easier when objects can move, but requires 2x the memory and additional 99 * memory accesses on add/get. It will catch additional problems, e.g.: 100 * create iref1 for obj, delete iref1, create iref2 for same obj, lookup 101 * iref1. A pattern based on object bits will miss this. 102 */ 103typedef void* IndirectRef; 104 105// Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress(). 106static mirror::Object* const kInvalidIndirectRefObject = reinterpret_cast<mirror::Object*>(0xdead4321); 107static mirror::Object* const kClearedJniWeakGlobal = reinterpret_cast<mirror::Object*>(0xdead1234); 108 109/* 110 * Indirect reference kind, used as the two low bits of IndirectRef. 111 * 112 * For convenience these match up with enum jobjectRefType from jni.h. 113 */ 114enum IndirectRefKind { 115 kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>> 116 kLocal = 1, // <<local reference>> 117 kGlobal = 2, // <<global reference>> 118 kWeakGlobal = 3 // <<weak global reference>> 119}; 120std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs); 121 122/* 123 * Determine what kind of indirect reference this is. 124 */ 125static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) { 126 return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03); 127} 128 129/* 130 * Extended debugging structure. We keep a parallel array of these, one 131 * per slot in the table. 132 */ 133static const size_t kIRTPrevCount = 4; 134struct IndirectRefSlot { 135 uint32_t serial; 136 const mirror::Object* previous[kIRTPrevCount]; 137}; 138 139/* use as initial value for "cookie", and when table has only one segment */ 140static const uint32_t IRT_FIRST_SEGMENT = 0; 141 142/* 143 * Table definition. 144 * 145 * For the global reference table, the expected common operations are 146 * adding a new entry and removing a recently-added entry (usually the 147 * most-recently-added entry). For JNI local references, the common 148 * operations are adding a new entry and removing an entire table segment. 149 * 150 * If "alloc_entries_" is not equal to "max_entries_", the table may expand 151 * when entries are added, which means the memory may move. If you want 152 * to keep pointers into "table" rather than offsets, you must use a 153 * fixed-size table. 154 * 155 * If we delete entries from the middle of the list, we will be left with 156 * "holes". We track the number of holes so that, when adding new elements, 157 * we can quickly decide to do a trivial append or go slot-hunting. 158 * 159 * When the top-most entry is removed, any holes immediately below it are 160 * also removed. Thus, deletion of an entry may reduce "topIndex" by more 161 * than one. 162 * 163 * To get the desired behavior for JNI locals, we need to know the bottom 164 * and top of the current "segment". The top is managed internally, and 165 * the bottom is passed in as a function argument. When we call a native method or 166 * push a local frame, the current top index gets pushed on, and serves 167 * as the new bottom. When we pop a frame off, the value from the stack 168 * becomes the new top index, and the value stored in the previous frame 169 * becomes the new bottom. 170 * 171 * To avoid having to re-scan the table after a pop, we want to push the 172 * number of holes in the table onto the stack. Because of our 64K-entry 173 * cap, we can combine the two into a single unsigned 32-bit value. 174 * Instead of a "bottom" argument we take a "cookie", which includes the 175 * bottom index and the count of holes below the bottom. 176 * 177 * Common alternative implementation: make IndirectRef a pointer to the 178 * actual reference slot. Instead of getting a table and doing a lookup, 179 * the lookup can be done instantly. Operations like determining the 180 * type and deleting the reference are more expensive because the table 181 * must be hunted for (i.e. you have to do a pointer comparison to see 182 * which table it's in), you can't move the table when expanding it (so 183 * realloc() is out), and tricks like serial number checking to detect 184 * stale references aren't possible (though we may be able to get similar 185 * benefits with other approaches). 186 * 187 * TODO: consider a "lastDeleteIndex" for quick hole-filling when an 188 * add immediately follows a delete; must invalidate after segment pop 189 * (which could increase the cost/complexity of method call/return). 190 * Might be worth only using it for JNI globals. 191 * 192 * TODO: may want completely different add/remove algorithms for global 193 * and local refs to improve performance. A large circular buffer might 194 * reduce the amortized cost of adding global references. 195 * 196 */ 197union IRTSegmentState { 198 uint32_t all; 199 struct { 200 uint32_t topIndex:16; /* index of first unused entry */ 201 uint32_t numHoles:16; /* #of holes in entire table */ 202 } parts; 203}; 204 205class IrtIterator { 206 public: 207 explicit IrtIterator(mirror::Object** table, size_t i, size_t capacity) 208 : table_(table), i_(i), capacity_(capacity) { 209 SkipNullsAndTombstones(); 210 } 211 212 IrtIterator& operator++() { 213 ++i_; 214 SkipNullsAndTombstones(); 215 return *this; 216 } 217 218 mirror::Object** operator*() { 219 // This does not have a read barrier as this is used to visit roots. 220 return &table_[i_]; 221 } 222 223 bool equals(const IrtIterator& rhs) const { 224 return (i_ == rhs.i_ && table_ == rhs.table_); 225 } 226 227 private: 228 void SkipNullsAndTombstones() { 229 // We skip NULLs and tombstones. Clients don't want to see implementation details. 230 while (i_ < capacity_ && (table_[i_] == NULL || table_[i_] == kClearedJniWeakGlobal)) { 231 ++i_; 232 } 233 } 234 235 mirror::Object** const table_; 236 size_t i_; 237 size_t capacity_; 238}; 239 240bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) { 241 return lhs.equals(rhs); 242} 243 244bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) { 245 return !lhs.equals(rhs); 246} 247 248class IndirectReferenceTable { 249 public: 250 IndirectReferenceTable(size_t initialCount, size_t maxCount, IndirectRefKind kind); 251 252 ~IndirectReferenceTable(); 253 254 /* 255 * Add a new entry. "obj" must be a valid non-NULL object reference. 256 * 257 * Returns NULL if the table is full (max entries reached, or alloc 258 * failed during expansion). 259 */ 260 IndirectRef Add(uint32_t cookie, mirror::Object* obj) 261 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 262 263 /* 264 * Given an IndirectRef in the table, return the Object it refers to. 265 * 266 * Returns kInvalidIndirectRefObject if iref is invalid. 267 */ 268 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 269 mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 270 ALWAYS_INLINE; 271 272 // Synchronized get which reads a reference, acquiring a lock if necessary. 273 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 274 mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/, 275 IndirectRef iref) const 276 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 277 return Get<kReadBarrierOption>(iref); 278 } 279 280 /* 281 * Remove an existing entry. 282 * 283 * If the entry is not between the current top index and the bottom index 284 * specified by the cookie, we don't remove anything. This is the behavior 285 * required by JNI's DeleteLocalRef function. 286 * 287 * Returns "false" if nothing was removed. 288 */ 289 bool Remove(uint32_t cookie, IndirectRef iref); 290 291 void AssertEmpty(); 292 293 void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 294 295 /* 296 * Return the #of entries in the entire table. This includes holes, and 297 * so may be larger than the actual number of "live" entries. 298 */ 299 size_t Capacity() const { 300 return segment_state_.parts.topIndex; 301 } 302 303 // Note IrtIterator does not have a read barrier as it's used to visit roots. 304 IrtIterator begin() { 305 return IrtIterator(table_, 0, Capacity()); 306 } 307 308 IrtIterator end() { 309 return IrtIterator(table_, Capacity(), Capacity()); 310 } 311 312 void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type); 313 314 uint32_t GetSegmentState() const { 315 return segment_state_.all; 316 } 317 318 void SetSegmentState(uint32_t new_state) { 319 segment_state_.all = new_state; 320 } 321 322 static Offset SegmentStateOffset() { 323 return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_)); 324 } 325 326 private: 327 /* 328 * Extract the table index from an indirect reference. 329 */ 330 static uint32_t ExtractIndex(IndirectRef iref) { 331 uintptr_t uref = reinterpret_cast<uintptr_t>(iref); 332 return (uref >> 2) & 0xffff; 333 } 334 335 /* 336 * The object pointer itself is subject to relocation in some GC 337 * implementations, so we shouldn't really be using it here. 338 */ 339 IndirectRef ToIndirectRef(uint32_t tableIndex) const { 340 DCHECK_LT(tableIndex, 65536U); 341 uint32_t serialChunk = slot_data_[tableIndex].serial; 342 uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_; 343 return reinterpret_cast<IndirectRef>(uref); 344 } 345 346 /* 347 * Update extended debug info when an entry is added. 348 * 349 * We advance the serial number, invalidating any outstanding references to 350 * this slot. 351 */ 352 void UpdateSlotAdd(const mirror::Object* obj, int slot) { 353 if (slot_data_ != NULL) { 354 IndirectRefSlot* pSlot = &slot_data_[slot]; 355 pSlot->serial++; 356 pSlot->previous[pSlot->serial % kIRTPrevCount] = obj; 357 } 358 } 359 360 // Abort if check_jni is not enabled. 361 static void AbortIfNoCheckJNI(); 362 363 /* extra debugging checks */ 364 bool GetChecked(IndirectRef) const; 365 bool CheckEntry(const char*, IndirectRef, int) const; 366 367 /* semi-public - read/write by jni down calls */ 368 IRTSegmentState segment_state_; 369 370 // Mem map where we store the indirect refs. 371 std::unique_ptr<MemMap> table_mem_map_; 372 // Mem map where we store the extended debugging info. 373 std::unique_ptr<MemMap> slot_mem_map_; 374 // bottom of the stack. Do not directly access the object references 375 // in this as they are roots. Use Get() that has a read barrier. 376 mirror::Object** table_; 377 /* bit mask, ORed into all irefs */ 378 IndirectRefKind kind_; 379 /* extended debugging info */ 380 IndirectRefSlot* slot_data_; 381 /* #of entries we have space for */ 382 size_t alloc_entries_; 383 /* max #of entries allowed */ 384 size_t max_entries_; 385}; 386 387} // namespace art 388 389#endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 390