indirect_reference_table.h revision ffddfdf6fec0b9d98a692e27242eecb15af5ead2
1/* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 18#define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 19 20#include <stdint.h> 21 22#include <iosfwd> 23#include <string> 24 25#include "base/logging.h" 26#include "base/mutex.h" 27#include "mem_map.h" 28#include "object_callbacks.h" 29#include "offsets.h" 30 31namespace art { 32namespace mirror { 33class Object; 34} // namespace mirror 35 36/* 37 * Maintain a table of indirect references. Used for local/global JNI 38 * references. 39 * 40 * The table contains object references that are part of the GC root set. 41 * When an object is added we return an IndirectRef that is not a valid 42 * pointer but can be used to find the original value in O(1) time. 43 * Conversions to and from indirect references are performed on upcalls 44 * and downcalls, so they need to be very fast. 45 * 46 * To be efficient for JNI local variable storage, we need to provide 47 * operations that allow us to operate on segments of the table, where 48 * segments are pushed and popped as if on a stack. For example, deletion 49 * of an entry should only succeed if it appears in the current segment, 50 * and we want to be able to strip off the current segment quickly when 51 * a method returns. Additions to the table must be made in the current 52 * segment even if space is available in an earlier area. 53 * 54 * A new segment is created when we call into native code from interpreted 55 * code, or when we handle the JNI PushLocalFrame function. 56 * 57 * The GC must be able to scan the entire table quickly. 58 * 59 * In summary, these must be very fast: 60 * - adding or removing a segment 61 * - adding references to a new segment 62 * - converting an indirect reference back to an Object 63 * These can be a little slower, but must still be pretty quick: 64 * - adding references to a "mature" segment 65 * - removing individual references 66 * - scanning the entire table straight through 67 * 68 * If there's more than one segment, we don't guarantee that the table 69 * will fill completely before we fail due to lack of space. We do ensure 70 * that the current segment will pack tightly, which should satisfy JNI 71 * requirements (e.g. EnsureLocalCapacity). 72 * 73 * To make everything fit nicely in 32-bit integers, the maximum size of 74 * the table is capped at 64K. 75 * 76 * Only SynchronizedGet is synchronized. 77 */ 78 79/* 80 * Indirect reference definition. This must be interchangeable with JNI's 81 * jobject, and it's convenient to let null be null, so we use void*. 82 * 83 * We need a 16-bit table index and a 2-bit reference type (global, local, 84 * weak global). Real object pointers will have zeroes in the low 2 or 3 85 * bits (4- or 8-byte alignment), so it's useful to put the ref type 86 * in the low bits and reserve zero as an invalid value. 87 * 88 * The remaining 14 bits can be used to detect stale indirect references. 89 * For example, if objects don't move, we can use a hash of the original 90 * Object* to make sure the entry hasn't been re-used. (If the Object* 91 * we find there doesn't match because of heap movement, we could do a 92 * secondary check on the preserved hash value; this implies that creating 93 * a global/local ref queries the hash value and forces it to be saved.) 94 * 95 * A more rigorous approach would be to put a serial number in the extra 96 * bits, and keep a copy of the serial number in a parallel table. This is 97 * easier when objects can move, but requires 2x the memory and additional 98 * memory accesses on add/get. It will catch additional problems, e.g.: 99 * create iref1 for obj, delete iref1, create iref2 for same obj, lookup 100 * iref1. A pattern based on object bits will miss this. 101 */ 102typedef void* IndirectRef; 103 104// Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress(). 105static mirror::Object* const kInvalidIndirectRefObject = reinterpret_cast<mirror::Object*>(0xdead4321); 106static mirror::Object* const kClearedJniWeakGlobal = reinterpret_cast<mirror::Object*>(0xdead1234); 107 108/* 109 * Indirect reference kind, used as the two low bits of IndirectRef. 110 * 111 * For convenience these match up with enum jobjectRefType from jni.h. 112 */ 113enum IndirectRefKind { 114 kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>> 115 kLocal = 1, // <<local reference>> 116 kGlobal = 2, // <<global reference>> 117 kWeakGlobal = 3 // <<weak global reference>> 118}; 119std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs); 120 121/* 122 * Determine what kind of indirect reference this is. 123 */ 124static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) { 125 return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03); 126} 127 128/* 129 * Extended debugging structure. We keep a parallel array of these, one 130 * per slot in the table. 131 */ 132static const size_t kIRTPrevCount = 4; 133struct IndirectRefSlot { 134 uint32_t serial; 135 const mirror::Object* previous[kIRTPrevCount]; 136}; 137 138/* use as initial value for "cookie", and when table has only one segment */ 139static const uint32_t IRT_FIRST_SEGMENT = 0; 140 141/* 142 * Table definition. 143 * 144 * For the global reference table, the expected common operations are 145 * adding a new entry and removing a recently-added entry (usually the 146 * most-recently-added entry). For JNI local references, the common 147 * operations are adding a new entry and removing an entire table segment. 148 * 149 * If "alloc_entries_" is not equal to "max_entries_", the table may expand 150 * when entries are added, which means the memory may move. If you want 151 * to keep pointers into "table" rather than offsets, you must use a 152 * fixed-size table. 153 * 154 * If we delete entries from the middle of the list, we will be left with 155 * "holes". We track the number of holes so that, when adding new elements, 156 * we can quickly decide to do a trivial append or go slot-hunting. 157 * 158 * When the top-most entry is removed, any holes immediately below it are 159 * also removed. Thus, deletion of an entry may reduce "topIndex" by more 160 * than one. 161 * 162 * To get the desired behavior for JNI locals, we need to know the bottom 163 * and top of the current "segment". The top is managed internally, and 164 * the bottom is passed in as a function argument. When we call a native method or 165 * push a local frame, the current top index gets pushed on, and serves 166 * as the new bottom. When we pop a frame off, the value from the stack 167 * becomes the new top index, and the value stored in the previous frame 168 * becomes the new bottom. 169 * 170 * To avoid having to re-scan the table after a pop, we want to push the 171 * number of holes in the table onto the stack. Because of our 64K-entry 172 * cap, we can combine the two into a single unsigned 32-bit value. 173 * Instead of a "bottom" argument we take a "cookie", which includes the 174 * bottom index and the count of holes below the bottom. 175 * 176 * Common alternative implementation: make IndirectRef a pointer to the 177 * actual reference slot. Instead of getting a table and doing a lookup, 178 * the lookup can be done instantly. Operations like determining the 179 * type and deleting the reference are more expensive because the table 180 * must be hunted for (i.e. you have to do a pointer comparison to see 181 * which table it's in), you can't move the table when expanding it (so 182 * realloc() is out), and tricks like serial number checking to detect 183 * stale references aren't possible (though we may be able to get similar 184 * benefits with other approaches). 185 * 186 * TODO: consider a "lastDeleteIndex" for quick hole-filling when an 187 * add immediately follows a delete; must invalidate after segment pop 188 * (which could increase the cost/complexity of method call/return). 189 * Might be worth only using it for JNI globals. 190 * 191 * TODO: may want completely different add/remove algorithms for global 192 * and local refs to improve performance. A large circular buffer might 193 * reduce the amortized cost of adding global references. 194 * 195 */ 196union IRTSegmentState { 197 uint32_t all; 198 struct { 199 uint32_t topIndex:16; /* index of first unused entry */ 200 uint32_t numHoles:16; /* #of holes in entire table */ 201 } parts; 202}; 203 204class IrtIterator { 205 public: 206 explicit IrtIterator(mirror::Object** table, size_t i, size_t capacity) 207 : table_(table), i_(i), capacity_(capacity) { 208 SkipNullsAndTombstones(); 209 } 210 211 IrtIterator& operator++() { 212 ++i_; 213 SkipNullsAndTombstones(); 214 return *this; 215 } 216 217 mirror::Object** operator*() { 218 return &table_[i_]; 219 } 220 221 bool equals(const IrtIterator& rhs) const { 222 return (i_ == rhs.i_ && table_ == rhs.table_); 223 } 224 225 private: 226 void SkipNullsAndTombstones() { 227 // We skip NULLs and tombstones. Clients don't want to see implementation details. 228 while (i_ < capacity_ && (table_[i_] == NULL || table_[i_] == kClearedJniWeakGlobal)) { 229 ++i_; 230 } 231 } 232 233 mirror::Object** const table_; 234 size_t i_; 235 size_t capacity_; 236}; 237 238bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) { 239 return lhs.equals(rhs); 240} 241 242bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) { 243 return !lhs.equals(rhs); 244} 245 246class IndirectReferenceTable { 247 public: 248 IndirectReferenceTable(size_t initialCount, size_t maxCount, IndirectRefKind kind); 249 250 ~IndirectReferenceTable(); 251 252 /* 253 * Add a new entry. "obj" must be a valid non-NULL object reference. 254 * 255 * Returns NULL if the table is full (max entries reached, or alloc 256 * failed during expansion). 257 */ 258 IndirectRef Add(uint32_t cookie, mirror::Object* obj) 259 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 260 261 /* 262 * Given an IndirectRef in the table, return the Object it refers to. 263 * 264 * Returns kInvalidIndirectRefObject if iref is invalid. 265 */ 266 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 267 mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 268 ALWAYS_INLINE; 269 270 // Synchronized get which reads a reference, acquiring a lock if necessary. 271 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 272 mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/, 273 IndirectRef iref) const 274 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 275 return Get<kReadBarrierOption>(iref); 276 } 277 278 /* 279 * Remove an existing entry. 280 * 281 * If the entry is not between the current top index and the bottom index 282 * specified by the cookie, we don't remove anything. This is the behavior 283 * required by JNI's DeleteLocalRef function. 284 * 285 * Returns "false" if nothing was removed. 286 */ 287 bool Remove(uint32_t cookie, IndirectRef iref); 288 289 void AssertEmpty(); 290 291 void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 292 293 /* 294 * Return the #of entries in the entire table. This includes holes, and 295 * so may be larger than the actual number of "live" entries. 296 */ 297 size_t Capacity() const { 298 return segment_state_.parts.topIndex; 299 } 300 301 IrtIterator begin() { 302 return IrtIterator(table_, 0, Capacity()); 303 } 304 305 IrtIterator end() { 306 return IrtIterator(table_, Capacity(), Capacity()); 307 } 308 309 void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type); 310 311 uint32_t GetSegmentState() const { 312 return segment_state_.all; 313 } 314 315 void SetSegmentState(uint32_t new_state) { 316 segment_state_.all = new_state; 317 } 318 319 static Offset SegmentStateOffset() { 320 return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_)); 321 } 322 323 private: 324 /* 325 * Extract the table index from an indirect reference. 326 */ 327 static uint32_t ExtractIndex(IndirectRef iref) { 328 uintptr_t uref = reinterpret_cast<uintptr_t>(iref); 329 return (uref >> 2) & 0xffff; 330 } 331 332 /* 333 * The object pointer itself is subject to relocation in some GC 334 * implementations, so we shouldn't really be using it here. 335 */ 336 IndirectRef ToIndirectRef(const mirror::Object* /*o*/, uint32_t tableIndex) const { 337 DCHECK_LT(tableIndex, 65536U); 338 uint32_t serialChunk = slot_data_[tableIndex].serial; 339 uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_; 340 return reinterpret_cast<IndirectRef>(uref); 341 } 342 343 /* 344 * Update extended debug info when an entry is added. 345 * 346 * We advance the serial number, invalidating any outstanding references to 347 * this slot. 348 */ 349 void UpdateSlotAdd(const mirror::Object* obj, int slot) { 350 if (slot_data_ != NULL) { 351 IndirectRefSlot* pSlot = &slot_data_[slot]; 352 pSlot->serial++; 353 pSlot->previous[pSlot->serial % kIRTPrevCount] = obj; 354 } 355 } 356 357 // Abort if check_jni is not enabled. 358 static void AbortIfNoCheckJNI(); 359 360 /* extra debugging checks */ 361 bool GetChecked(IndirectRef) const; 362 bool CheckEntry(const char*, IndirectRef, int) const; 363 364 /* semi-public - read/write by jni down calls */ 365 IRTSegmentState segment_state_; 366 367 // Mem map where we store the indirect refs. 368 std::unique_ptr<MemMap> table_mem_map_; 369 // Mem map where we store the extended debugging info. 370 std::unique_ptr<MemMap> slot_mem_map_; 371 // bottom of the stack. If a JNI weak global table, do not directly 372 // access the object references in this as they are weak roots. Use 373 // Get() that has a read barrier. 374 mirror::Object** table_; 375 /* bit mask, ORed into all irefs */ 376 IndirectRefKind kind_; 377 /* extended debugging info */ 378 IndirectRefSlot* slot_data_; 379 /* #of entries we have space for */ 380 size_t alloc_entries_; 381 /* max #of entries allowed */ 382 size_t max_entries_; 383}; 384 385} // namespace art 386 387#endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 388