1/* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "indirect_reference_table-inl.h" 18 19#include "jni_internal.h" 20#include "nth_caller_visitor.h" 21#include "reference_table.h" 22#include "runtime.h" 23#include "scoped_thread_state_change.h" 24#include "thread.h" 25#include "utils.h" 26#include "verify_object-inl.h" 27 28#include <cstdlib> 29 30namespace art { 31 32static constexpr bool kDumpStackOnNonLocalReference = false; 33 34template<typename T> 35class MutatorLockedDumpable { 36 public: 37 explicit MutatorLockedDumpable(T& value) 38 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) { 39 } 40 41 void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 42 value_.Dump(os); 43 } 44 45 private: 46 T& value_; 47 48 DISALLOW_COPY_AND_ASSIGN(MutatorLockedDumpable); 49}; 50 51template<typename T> 52std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs) 53// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis 54// currently fails for this. 55 NO_THREAD_SAFETY_ANALYSIS { 56 rhs.Dump(os); 57 return os; 58} 59 60void IndirectReferenceTable::AbortIfNoCheckJNI() { 61 // If -Xcheck:jni is on, it'll give a more detailed error before aborting. 62 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 63 if (!vm->IsCheckJniEnabled()) { 64 // Otherwise, we want to abort rather than hand back a bad reference. 65 LOG(FATAL) << "JNI ERROR (app bug): see above."; 66 } 67} 68 69IndirectReferenceTable::IndirectReferenceTable(size_t initialCount, 70 size_t maxCount, IndirectRefKind desiredKind, 71 bool abort_on_error) 72 : kind_(desiredKind), 73 max_entries_(maxCount) { 74 CHECK_GT(initialCount, 0U); 75 CHECK_LE(initialCount, maxCount); 76 CHECK_NE(desiredKind, kHandleScopeOrInvalid); 77 78 std::string error_str; 79 const size_t table_bytes = maxCount * sizeof(IrtEntry); 80 table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes, 81 PROT_READ | PROT_WRITE, false, false, &error_str)); 82 if (abort_on_error) { 83 CHECK(table_mem_map_.get() != nullptr) << error_str; 84 CHECK_EQ(table_mem_map_->Size(), table_bytes); 85 CHECK(table_mem_map_->Begin() != nullptr); 86 } else if (table_mem_map_.get() == nullptr || 87 table_mem_map_->Size() != table_bytes || 88 table_mem_map_->Begin() == nullptr) { 89 table_mem_map_.reset(); 90 LOG(ERROR) << error_str; 91 return; 92 } 93 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin()); 94 segment_state_.all = IRT_FIRST_SEGMENT; 95} 96 97IndirectReferenceTable::~IndirectReferenceTable() { 98} 99 100bool IndirectReferenceTable::IsValid() const { 101 return table_mem_map_.get() != nullptr; 102} 103 104IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) { 105 IRTSegmentState prevState; 106 prevState.all = cookie; 107 size_t topIndex = segment_state_.parts.topIndex; 108 109 CHECK(obj != nullptr); 110 VerifyObject(obj); 111 DCHECK(table_ != nullptr); 112 DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles); 113 114 if (topIndex == max_entries_) { 115 LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow " 116 << "(max=" << max_entries_ << ")\n" 117 << MutatorLockedDumpable<IndirectReferenceTable>(*this); 118 } 119 120 // We know there's enough room in the table. Now we just need to find 121 // the right spot. If there's a hole, find it and fill it; otherwise, 122 // add to the end of the list. 123 IndirectRef result; 124 int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles; 125 size_t index; 126 if (numHoles > 0) { 127 DCHECK_GT(topIndex, 1U); 128 // Find the first hole; likely to be near the end of the list. 129 IrtEntry* pScan = &table_[topIndex - 1]; 130 DCHECK(!pScan->GetReference()->IsNull()); 131 --pScan; 132 while (!pScan->GetReference()->IsNull()) { 133 DCHECK_GE(pScan, table_ + prevState.parts.topIndex); 134 --pScan; 135 } 136 index = pScan - table_; 137 segment_state_.parts.numHoles--; 138 } else { 139 // Add to the end. 140 index = topIndex++; 141 segment_state_.parts.topIndex = topIndex; 142 } 143 table_[index].Add(obj); 144 result = ToIndirectRef(index); 145 if ((false)) { 146 LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.parts.topIndex 147 << " holes=" << segment_state_.parts.numHoles; 148 } 149 150 DCHECK(result != nullptr); 151 return result; 152} 153 154void IndirectReferenceTable::AssertEmpty() { 155 for (size_t i = 0; i < Capacity(); ++i) { 156 if (!table_[i].GetReference()->IsNull()) { 157 ScopedObjectAccess soa(Thread::Current()); 158 LOG(FATAL) << "Internal Error: non-empty local reference table\n" 159 << MutatorLockedDumpable<IndirectReferenceTable>(*this); 160 } 161 } 162} 163 164// Removes an object. We extract the table offset bits from "iref" 165// and zap the corresponding entry, leaving a hole if it's not at the top. 166// If the entry is not between the current top index and the bottom index 167// specified by the cookie, we don't remove anything. This is the behavior 168// required by JNI's DeleteLocalRef function. 169// This method is not called when a local frame is popped; this is only used 170// for explicit single removals. 171// Returns "false" if nothing was removed. 172bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { 173 IRTSegmentState prevState; 174 prevState.all = cookie; 175 int topIndex = segment_state_.parts.topIndex; 176 int bottomIndex = prevState.parts.topIndex; 177 178 DCHECK(table_ != nullptr); 179 DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles); 180 181 if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) { 182 auto* self = Thread::Current(); 183 if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) { 184 auto* env = self->GetJniEnv(); 185 DCHECK(env != nullptr); 186 if (env->check_jni) { 187 ScopedObjectAccess soa(self); 188 LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread"; 189 if (kDumpStackOnNonLocalReference) { 190 self->Dump(LOG(WARNING)); 191 } 192 } 193 return true; 194 } 195 } 196 const int idx = ExtractIndex(iref); 197 if (idx < bottomIndex) { 198 // Wrong segment. 199 LOG(WARNING) << "Attempt to remove index outside index area (" << idx 200 << " vs " << bottomIndex << "-" << topIndex << ")"; 201 return false; 202 } 203 if (idx >= topIndex) { 204 // Bad --- stale reference? 205 LOG(WARNING) << "Attempt to remove invalid index " << idx 206 << " (bottom=" << bottomIndex << " top=" << topIndex << ")"; 207 return false; 208 } 209 210 if (idx == topIndex - 1) { 211 // Top-most entry. Scan up and consume holes. 212 213 if (!CheckEntry("remove", iref, idx)) { 214 return false; 215 } 216 217 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); 218 int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles; 219 if (numHoles != 0) { 220 while (--topIndex > bottomIndex && numHoles != 0) { 221 if ((false)) { 222 LOG(INFO) << "+++ checking for hole at " << topIndex - 1 223 << " (cookie=" << cookie << ") val=" 224 << table_[topIndex - 1].GetReference()->Read<kWithoutReadBarrier>(); 225 } 226 if (!table_[topIndex - 1].GetReference()->IsNull()) { 227 break; 228 } 229 if ((false)) { 230 LOG(INFO) << "+++ ate hole at " << (topIndex - 1); 231 } 232 numHoles--; 233 } 234 segment_state_.parts.numHoles = numHoles + prevState.parts.numHoles; 235 segment_state_.parts.topIndex = topIndex; 236 } else { 237 segment_state_.parts.topIndex = topIndex-1; 238 if ((false)) { 239 LOG(INFO) << "+++ ate last entry " << topIndex - 1; 240 } 241 } 242 } else { 243 // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody 244 // from deleting it twice and screwing up the hole count. 245 if (table_[idx].GetReference()->IsNull()) { 246 LOG(INFO) << "--- WEIRD: removing null entry " << idx; 247 return false; 248 } 249 if (!CheckEntry("remove", iref, idx)) { 250 return false; 251 } 252 253 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); 254 segment_state_.parts.numHoles++; 255 if ((false)) { 256 LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles; 257 } 258 } 259 260 return true; 261} 262 263void IndirectReferenceTable::Trim() { 264 const size_t top_index = Capacity(); 265 auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); 266 uint8_t* release_end = table_mem_map_->End(); 267 madvise(release_start, release_end - release_start, MADV_DONTNEED); 268} 269 270void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) { 271 BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info); 272 for (auto ref : *this) { 273 if (!ref->IsNull()) { 274 root_visitor.VisitRoot(*ref); 275 DCHECK(!ref->IsNull()); 276 } 277 } 278} 279 280void IndirectReferenceTable::Dump(std::ostream& os) const { 281 os << kind_ << " table dump:\n"; 282 ReferenceTable::Table entries; 283 for (size_t i = 0; i < Capacity(); ++i) { 284 mirror::Object* obj = table_[i].GetReference()->Read<kWithoutReadBarrier>(); 285 if (obj != nullptr) { 286 obj = table_[i].GetReference()->Read(); 287 entries.push_back(GcRoot<mirror::Object>(obj)); 288 } 289 } 290 ReferenceTable::Dump(os, entries); 291} 292 293} // namespace art 294