indirect_reference_table.h revision dc061d038e4e48fe2a967fd4a9c200d112df5698
1/* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 18#define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 19 20#include <stdint.h> 21 22#include <iosfwd> 23#include <string> 24 25#include "base/bit_utils.h" 26#include "base/logging.h" 27#include "base/mutex.h" 28#include "gc_root.h" 29#include "obj_ptr.h" 30#include "object_callbacks.h" 31#include "offsets.h" 32#include "read_barrier_option.h" 33 34namespace art { 35 36class RootInfo; 37 38namespace mirror { 39class Object; 40} // namespace mirror 41 42class MemMap; 43 44/* 45 * Maintain a table of indirect references. Used for local/global JNI 46 * references. 47 * 48 * The table contains object references that are part of the GC root set. 49 * When an object is added we return an IndirectRef that is not a valid 50 * pointer but can be used to find the original value in O(1) time. 51 * Conversions to and from indirect references are performed on upcalls 52 * and downcalls, so they need to be very fast. 53 * 54 * To be efficient for JNI local variable storage, we need to provide 55 * operations that allow us to operate on segments of the table, where 56 * segments are pushed and popped as if on a stack. For example, deletion 57 * of an entry should only succeed if it appears in the current segment, 58 * and we want to be able to strip off the current segment quickly when 59 * a method returns. Additions to the table must be made in the current 60 * segment even if space is available in an earlier area. 61 * 62 * A new segment is created when we call into native code from interpreted 63 * code, or when we handle the JNI PushLocalFrame function. 64 * 65 * The GC must be able to scan the entire table quickly. 66 * 67 * In summary, these must be very fast: 68 * - adding or removing a segment 69 * - adding references to a new segment 70 * - converting an indirect reference back to an Object 71 * These can be a little slower, but must still be pretty quick: 72 * - adding references to a "mature" segment 73 * - removing individual references 74 * - scanning the entire table straight through 75 * 76 * If there's more than one segment, we don't guarantee that the table 77 * will fill completely before we fail due to lack of space. We do ensure 78 * that the current segment will pack tightly, which should satisfy JNI 79 * requirements (e.g. EnsureLocalCapacity). 80 * 81 * To make everything fit nicely in 32-bit integers, the maximum size of 82 * the table is capped at 64K. 83 * 84 * Only SynchronizedGet is synchronized. 85 */ 86 87/* 88 * Indirect reference definition. This must be interchangeable with JNI's 89 * jobject, and it's convenient to let null be null, so we use void*. 90 * 91 * We need a 16-bit table index and a 2-bit reference type (global, local, 92 * weak global). Real object pointers will have zeroes in the low 2 or 3 93 * bits (4- or 8-byte alignment), so it's useful to put the ref type 94 * in the low bits and reserve zero as an invalid value. 95 * 96 * The remaining 14 bits can be used to detect stale indirect references. 97 * For example, if objects don't move, we can use a hash of the original 98 * Object* to make sure the entry hasn't been re-used. (If the Object* 99 * we find there doesn't match because of heap movement, we could do a 100 * secondary check on the preserved hash value; this implies that creating 101 * a global/local ref queries the hash value and forces it to be saved.) 102 * 103 * A more rigorous approach would be to put a serial number in the extra 104 * bits, and keep a copy of the serial number in a parallel table. This is 105 * easier when objects can move, but requires 2x the memory and additional 106 * memory accesses on add/get. It will catch additional problems, e.g.: 107 * create iref1 for obj, delete iref1, create iref2 for same obj, lookup 108 * iref1. A pattern based on object bits will miss this. 109 */ 110typedef void* IndirectRef; 111 112/* 113 * Indirect reference kind, used as the two low bits of IndirectRef. 114 * 115 * For convenience these match up with enum jobjectRefType from jni.h. 116 */ 117enum IndirectRefKind { 118 kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>> 119 kLocal = 1, // <<local reference>> 120 kGlobal = 2, // <<global reference>> 121 kWeakGlobal = 3, // <<weak global reference>> 122 kLastKind = kWeakGlobal 123}; 124std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs); 125const char* GetIndirectRefKindString(const IndirectRefKind& kind); 126 127/* use as initial value for "cookie", and when table has only one segment */ 128static const uint32_t IRT_FIRST_SEGMENT = 0; 129 130/* 131 * Table definition. 132 * 133 * For the global reference table, the expected common operations are 134 * adding a new entry and removing a recently-added entry (usually the 135 * most-recently-added entry). For JNI local references, the common 136 * operations are adding a new entry and removing an entire table segment. 137 * 138 * If "alloc_entries_" is not equal to "max_entries_", the table may expand 139 * when entries are added, which means the memory may move. If you want 140 * to keep pointers into "table" rather than offsets, you must use a 141 * fixed-size table. 142 * 143 * If we delete entries from the middle of the list, we will be left with 144 * "holes". We track the number of holes so that, when adding new elements, 145 * we can quickly decide to do a trivial append or go slot-hunting. 146 * 147 * When the top-most entry is removed, any holes immediately below it are 148 * also removed. Thus, deletion of an entry may reduce "topIndex" by more 149 * than one. 150 * 151 * To get the desired behavior for JNI locals, we need to know the bottom 152 * and top of the current "segment". The top is managed internally, and 153 * the bottom is passed in as a function argument. When we call a native method or 154 * push a local frame, the current top index gets pushed on, and serves 155 * as the new bottom. When we pop a frame off, the value from the stack 156 * becomes the new top index, and the value stored in the previous frame 157 * becomes the new bottom. 158 * 159 * To avoid having to re-scan the table after a pop, we want to push the 160 * number of holes in the table onto the stack. Because of our 64K-entry 161 * cap, we can combine the two into a single unsigned 32-bit value. 162 * Instead of a "bottom" argument we take a "cookie", which includes the 163 * bottom index and the count of holes below the bottom. 164 * 165 * Common alternative implementation: make IndirectRef a pointer to the 166 * actual reference slot. Instead of getting a table and doing a lookup, 167 * the lookup can be done instantly. Operations like determining the 168 * type and deleting the reference are more expensive because the table 169 * must be hunted for (i.e. you have to do a pointer comparison to see 170 * which table it's in), you can't move the table when expanding it (so 171 * realloc() is out), and tricks like serial number checking to detect 172 * stale references aren't possible (though we may be able to get similar 173 * benefits with other approaches). 174 * 175 * TODO: consider a "lastDeleteIndex" for quick hole-filling when an 176 * add immediately follows a delete; must invalidate after segment pop 177 * (which could increase the cost/complexity of method call/return). 178 * Might be worth only using it for JNI globals. 179 * 180 * TODO: may want completely different add/remove algorithms for global 181 * and local refs to improve performance. A large circular buffer might 182 * reduce the amortized cost of adding global references. 183 * 184 */ 185union IRTSegmentState { 186 uint32_t all; 187 struct { 188 uint32_t topIndex:16; /* index of first unused entry */ 189 uint32_t numHoles:16; /* #of holes in entire table */ 190 } parts; 191}; 192 193// Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2. 194// Contains multiple entries but only one active one, this helps us detect use after free errors 195// since the serial stored in the indirect ref wont match. 196static constexpr size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3; 197 198class IrtEntry { 199 public: 200 void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); 201 202 GcRoot<mirror::Object>* GetReference() { 203 DCHECK_LT(serial_, kIRTPrevCount); 204 return &references_[serial_]; 205 } 206 207 uint32_t GetSerial() const { 208 return serial_; 209 } 210 211 void SetReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); 212 213 private: 214 uint32_t serial_; 215 GcRoot<mirror::Object> references_[kIRTPrevCount]; 216}; 217static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t), 218 "Unexpected sizeof(IrtEntry)"); 219static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)"); 220 221class IrtIterator { 222 public: 223 IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_) 224 : table_(table), i_(i), capacity_(capacity) { 225 } 226 227 IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) { 228 ++i_; 229 return *this; 230 } 231 232 GcRoot<mirror::Object>* operator*() REQUIRES_SHARED(Locks::mutator_lock_) { 233 // This does not have a read barrier as this is used to visit roots. 234 return table_[i_].GetReference(); 235 } 236 237 bool equals(const IrtIterator& rhs) const { 238 return (i_ == rhs.i_ && table_ == rhs.table_); 239 } 240 241 private: 242 IrtEntry* const table_; 243 size_t i_; 244 const size_t capacity_; 245}; 246 247bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) { 248 return lhs.equals(rhs); 249} 250 251bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) { 252 return !lhs.equals(rhs); 253} 254 255class IndirectReferenceTable { 256 public: 257 /* 258 * WARNING: Construction of the IndirectReferenceTable may fail. 259 * error_msg must not be null. If error_msg is set by the constructor, then 260 * construction has failed and the IndirectReferenceTable will be in an 261 * invalid state. Use IsValid to check whether the object is in an invalid 262 * state. 263 */ 264 IndirectReferenceTable(size_t max_count, IndirectRefKind kind, std::string* error_msg); 265 266 ~IndirectReferenceTable(); 267 268 /* 269 * Checks whether construction of the IndirectReferenceTable succeeded. 270 * 271 * This object must only be used if IsValid() returns true. It is safe to 272 * call IsValid from multiple threads without locking or other explicit 273 * synchronization. 274 */ 275 bool IsValid() const; 276 277 /* 278 * Add a new entry. "obj" must be a valid non-nullptr object reference. 279 * 280 * Returns nullptr if the table is full (max entries reached, or alloc 281 * failed during expansion). 282 */ 283 IndirectRef Add(uint32_t cookie, ObjPtr<mirror::Object> obj) 284 REQUIRES_SHARED(Locks::mutator_lock_); 285 286 /* 287 * Given an IndirectRef in the table, return the Object it refers to. 288 * 289 * Returns kInvalidIndirectRefObject if iref is invalid. 290 */ 291 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 292 ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_) 293 ALWAYS_INLINE; 294 295 // Synchronized get which reads a reference, acquiring a lock if necessary. 296 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 297 ObjPtr<mirror::Object> SynchronizedGet(IndirectRef iref) const 298 REQUIRES_SHARED(Locks::mutator_lock_) { 299 return Get<kReadBarrierOption>(iref); 300 } 301 302 /* 303 * Update an existing entry. 304 * 305 * Updates an existing indirect reference to point to a new object. 306 */ 307 void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); 308 309 /* 310 * Remove an existing entry. 311 * 312 * If the entry is not between the current top index and the bottom index 313 * specified by the cookie, we don't remove anything. This is the behavior 314 * required by JNI's DeleteLocalRef function. 315 * 316 * Returns "false" if nothing was removed. 317 */ 318 bool Remove(uint32_t cookie, IndirectRef iref); 319 320 void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_); 321 322 void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); 323 324 /* 325 * Return the #of entries in the entire table. This includes holes, and 326 * so may be larger than the actual number of "live" entries. 327 */ 328 size_t Capacity() const { 329 return segment_state_.parts.topIndex; 330 } 331 332 // Note IrtIterator does not have a read barrier as it's used to visit roots. 333 IrtIterator begin() { 334 return IrtIterator(table_, 0, Capacity()); 335 } 336 337 IrtIterator end() { 338 return IrtIterator(table_, Capacity(), Capacity()); 339 } 340 341 void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) 342 REQUIRES_SHARED(Locks::mutator_lock_); 343 344 uint32_t GetSegmentState() const { 345 return segment_state_.all; 346 } 347 348 void SetSegmentState(uint32_t new_state) { 349 segment_state_.all = new_state; 350 } 351 352 static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) { 353 // Note: Currently segment_state_ is at offset 0. We're testing the expected value in 354 // jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that 355 // is not pointer-size-safe. 356 return Offset(0); 357 } 358 359 // Release pages past the end of the table that may have previously held references. 360 void Trim() REQUIRES_SHARED(Locks::mutator_lock_); 361 362 // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind. 363 ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) { 364 return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref)); 365 } 366 367 private: 368 static constexpr size_t kSerialBits = MinimumBitsToStore(kIRTPrevCount); 369 static constexpr uint32_t kShiftedSerialMask = (1u << kSerialBits) - 1; 370 371 static constexpr size_t kKindBits = MinimumBitsToStore( 372 static_cast<uint32_t>(IndirectRefKind::kLastKind)); 373 static constexpr uint32_t kKindMask = (1u << kKindBits) - 1; 374 375 static constexpr uintptr_t EncodeIndex(uint32_t table_index) { 376 static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size"); 377 DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kSerialBits - kKindBits); 378 return (static_cast<uintptr_t>(table_index) << kKindBits << kSerialBits); 379 } 380 static constexpr uint32_t DecodeIndex(uintptr_t uref) { 381 return static_cast<uint32_t>((uref >> kKindBits) >> kSerialBits); 382 } 383 384 static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) { 385 return static_cast<uintptr_t>(kind); 386 } 387 static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) { 388 return static_cast<IndirectRefKind>(uref & kKindMask); 389 } 390 391 static constexpr uintptr_t EncodeSerial(uint32_t serial) { 392 DCHECK_LE(MinimumBitsToStore(serial), kSerialBits); 393 return serial << kKindBits; 394 } 395 static constexpr uint32_t DecodeSerial(uintptr_t uref) { 396 return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask; 397 } 398 399 constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const { 400 DCHECK_LT(table_index, max_entries_); 401 return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kind_); 402 } 403 404 static void ConstexprChecks(); 405 406 // Extract the table index from an indirect reference. 407 ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) { 408 return DecodeIndex(reinterpret_cast<uintptr_t>(iref)); 409 } 410 411 IndirectRef ToIndirectRef(uint32_t table_index) const { 412 DCHECK_LT(table_index, max_entries_); 413 uint32_t serial = table_[table_index].GetSerial(); 414 return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial)); 415 } 416 417 // Abort if check_jni is not enabled. Otherwise, just log as an error. 418 static void AbortIfNoCheckJNI(const std::string& msg); 419 420 /* extra debugging checks */ 421 bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_); 422 bool CheckEntry(const char*, IndirectRef, int) const; 423 424 /* semi-public - read/write by jni down calls */ 425 IRTSegmentState segment_state_; 426 427 // Mem map where we store the indirect refs. 428 std::unique_ptr<MemMap> table_mem_map_; 429 // bottom of the stack. Do not directly access the object references 430 // in this as they are roots. Use Get() that has a read barrier. 431 IrtEntry* table_; 432 /* bit mask, ORed into all irefs */ 433 const IndirectRefKind kind_; 434 /* max #of entries allowed */ 435 const size_t max_entries_; 436}; 437 438} // namespace art 439 440#endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ 441