intern_table.cc revision a91a4bc1f8960f64c5f7e4616d46e21b8e1bfba2
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "intern_table.h"
18
19#include <memory>
20
21#include "gc/space/image_space.h"
22#include "mirror/dex_cache.h"
23#include "mirror/object_array-inl.h"
24#include "mirror/object-inl.h"
25#include "mirror/string.h"
26#include "thread.h"
27#include "utf.h"
28
29namespace art {
30
31InternTable::InternTable()
32    : log_new_roots_(false), allow_new_interns_(true),
33      new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
34}
35
36size_t InternTable::Size() const {
37  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
38  return strong_interns_.size() + weak_interns_.size();
39}
40
41size_t InternTable::StrongSize() const {
42  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
43  return strong_interns_.size();
44}
45
46size_t InternTable::WeakSize() const {
47  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
48  return weak_interns_.size();
49}
50
51void InternTable::DumpForSigQuit(std::ostream& os) const {
52  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
53  os << "Intern table: " << strong_interns_.size() << " strong; "
54     << weak_interns_.size() << " weak\n";
55}
56
57void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
58  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
59  if ((flags & kVisitRootFlagAllRoots) != 0) {
60    for (auto& strong_intern : strong_interns_) {
61      callback(reinterpret_cast<mirror::Object**>(&strong_intern.second), arg, 0,
62               kRootInternedString);
63      DCHECK(strong_intern.second != nullptr);
64    }
65  } else if ((flags & kVisitRootFlagNewRoots) != 0) {
66    for (auto& pair : new_strong_intern_roots_) {
67       mirror::String* old_ref = pair.second;
68       callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootInternedString);
69       if (UNLIKELY(pair.second != old_ref)) {
70         // Uh ohes, GC moved a root in the log. Need to search the strong interns and update the
71         // corresponding object. This is slow, but luckily for us, this may only happen with a
72         // concurrent moving GC.
73         for (auto it = strong_interns_.lower_bound(pair.first), end = strong_interns_.end();
74             it != end && it->first == pair.first; ++it) {
75           // If the class stored matches the old class, update it to the new value.
76           if (old_ref == it->second) {
77             it->second = pair.second;
78           }
79         }
80       }
81     }
82  }
83
84  if ((flags & kVisitRootFlagClearRootLog) != 0) {
85    new_strong_intern_roots_.clear();
86  }
87  if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
88    log_new_roots_ = true;
89  } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
90    log_new_roots_ = false;
91  }
92  // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
93}
94
95mirror::String* InternTable::LookupStrong(mirror::String* s, int32_t hash_code) {
96  return Lookup(&strong_interns_, s, hash_code);
97}
98
99mirror::String* InternTable::LookupWeak(mirror::String* s, int32_t hash_code) {
100  // Weak interns need a read barrier because they are weak roots.
101  return Lookup(&weak_interns_, s, hash_code);
102}
103
104mirror::String* InternTable::Lookup(Table* table, mirror::String* s, int32_t hash_code) {
105  Locks::intern_table_lock_->AssertHeld(Thread::Current());
106  for (auto it = table->lower_bound(hash_code), end = table->end();
107       it != end && it->first == hash_code; ++it) {
108    mirror::String* existing_string;
109    mirror::String** root = &it->second;
110    existing_string = ReadBarrier::BarrierForRoot<mirror::String, kWithReadBarrier>(root);
111    if (existing_string->Equals(s)) {
112      return existing_string;
113    }
114  }
115  return NULL;
116}
117
118mirror::String* InternTable::InsertStrong(mirror::String* s, int32_t hash_code) {
119  Runtime* runtime = Runtime::Current();
120  if (runtime->IsActiveTransaction()) {
121    runtime->RecordStrongStringInsertion(s, hash_code);
122  }
123  if (log_new_roots_) {
124    new_strong_intern_roots_.push_back(std::make_pair(hash_code, s));
125  }
126  strong_interns_.insert(std::make_pair(hash_code, s));
127  return s;
128}
129
130mirror::String* InternTable::InsertWeak(mirror::String* s, int32_t hash_code) {
131  Runtime* runtime = Runtime::Current();
132  if (runtime->IsActiveTransaction()) {
133    runtime->RecordWeakStringInsertion(s, hash_code);
134  }
135  weak_interns_.insert(std::make_pair(hash_code, s));
136  return s;
137}
138
139void InternTable::RemoveStrong(mirror::String* s, int32_t hash_code) {
140  Remove(&strong_interns_, s, hash_code);
141}
142
143void InternTable::RemoveWeak(mirror::String* s, int32_t hash_code) {
144  Runtime* runtime = Runtime::Current();
145  if (runtime->IsActiveTransaction()) {
146    runtime->RecordWeakStringRemoval(s, hash_code);
147  }
148  Remove(&weak_interns_, s, hash_code);
149}
150
151void InternTable::Remove(Table* table, mirror::String* s, int32_t hash_code) {
152  for (auto it = table->lower_bound(hash_code), end = table->end();
153       it != end && it->first == hash_code; ++it) {
154    mirror::String* existing_string;
155    mirror::String** root = &it->second;
156    existing_string = ReadBarrier::BarrierForRoot<mirror::String, kWithReadBarrier>(root);
157    if (existing_string == s) {
158      table->erase(it);
159      return;
160    }
161  }
162}
163
164// Insert/remove methods used to undo changes made during an aborted transaction.
165mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s, int32_t hash_code) {
166  DCHECK(!Runtime::Current()->IsActiveTransaction());
167  return InsertStrong(s, hash_code);
168}
169mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s, int32_t hash_code) {
170  DCHECK(!Runtime::Current()->IsActiveTransaction());
171  return InsertWeak(s, hash_code);
172}
173void InternTable::RemoveStrongFromTransaction(mirror::String* s, int32_t hash_code) {
174  DCHECK(!Runtime::Current()->IsActiveTransaction());
175  RemoveStrong(s, hash_code);
176}
177void InternTable::RemoveWeakFromTransaction(mirror::String* s, int32_t hash_code) {
178  DCHECK(!Runtime::Current()->IsActiveTransaction());
179  RemoveWeak(s, hash_code);
180}
181
182static mirror::String* LookupStringFromImage(mirror::String* s)
183    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
184  gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
185  if (image == NULL) {
186    return NULL;  // No image present.
187  }
188  mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
189  mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
190  const std::string utf8 = s->ToModifiedUtf8();
191  for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
192    mirror::DexCache* dex_cache = dex_caches->Get(i);
193    const DexFile* dex_file = dex_cache->GetDexFile();
194    // Binary search the dex file for the string index.
195    const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
196    if (string_id != NULL) {
197      uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
198      mirror::String* image = dex_cache->GetResolvedString(string_idx);
199      if (image != NULL) {
200        return image;
201      }
202    }
203  }
204  return NULL;
205}
206
207void InternTable::AllowNewInterns() {
208  Thread* self = Thread::Current();
209  MutexLock mu(self, *Locks::intern_table_lock_);
210  allow_new_interns_ = true;
211  new_intern_condition_.Broadcast(self);
212}
213
214void InternTable::DisallowNewInterns() {
215  Thread* self = Thread::Current();
216  MutexLock mu(self, *Locks::intern_table_lock_);
217  allow_new_interns_ = false;
218}
219
220mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
221  Thread* self = Thread::Current();
222  MutexLock mu(self, *Locks::intern_table_lock_);
223
224  DCHECK(s != NULL);
225  uint32_t hash_code = s->GetHashCode();
226
227  while (UNLIKELY(!allow_new_interns_)) {
228    new_intern_condition_.WaitHoldingLocks(self);
229  }
230
231  if (is_strong) {
232    // Check the strong table for a match.
233    mirror::String* strong = LookupStrong(s, hash_code);
234    if (strong != NULL) {
235      return strong;
236    }
237
238    // Check the image for a match.
239    mirror::String* image = LookupStringFromImage(s);
240    if (image != NULL) {
241      return InsertStrong(image, hash_code);
242    }
243
244    // There is no match in the strong table, check the weak table.
245    mirror::String* weak = LookupWeak(s, hash_code);
246    if (weak != NULL) {
247      // A match was found in the weak table. Promote to the strong table.
248      RemoveWeak(weak, hash_code);
249      return InsertStrong(weak, hash_code);
250    }
251
252    // No match in the strong table or the weak table. Insert into the strong
253    // table.
254    return InsertStrong(s, hash_code);
255  }
256
257  // Check the strong table for a match.
258  mirror::String* strong = LookupStrong(s, hash_code);
259  if (strong != NULL) {
260    return strong;
261  }
262  // Check the image for a match.
263  mirror::String* image = LookupStringFromImage(s);
264  if (image != NULL) {
265    return InsertWeak(image, hash_code);
266  }
267  // Check the weak table for a match.
268  mirror::String* weak = LookupWeak(s, hash_code);
269  if (weak != NULL) {
270    return weak;
271  }
272  // Insert into the weak table.
273  return InsertWeak(s, hash_code);
274}
275
276mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
277  DCHECK(utf8_data != nullptr);
278  return InternStrong(mirror::String::AllocFromModifiedUtf8(
279      Thread::Current(), utf16_length, utf8_data));
280}
281
282mirror::String* InternTable::InternStrong(const char* utf8_data) {
283  DCHECK(utf8_data != nullptr);
284  return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
285}
286
287mirror::String* InternTable::InternStrong(mirror::String* s) {
288  if (s == nullptr) {
289    return nullptr;
290  }
291  return Insert(s, true);
292}
293
294mirror::String* InternTable::InternWeak(mirror::String* s) {
295  if (s == nullptr) {
296    return nullptr;
297  }
298  return Insert(s, false);
299}
300
301bool InternTable::ContainsWeak(mirror::String* s) {
302  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
303  const mirror::String* found = LookupWeak(s, s->GetHashCode());
304  return found == s;
305}
306
307void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
308  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
309  for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
310    // This does not need a read barrier because this is called by GC.
311    mirror::Object* object = it->second;
312    mirror::Object* new_object = callback(object, arg);
313    if (new_object == nullptr) {
314      // TODO: use it = weak_interns_.erase(it) when we get a c++11 stl.
315      weak_interns_.erase(it++);
316    } else {
317      it->second = down_cast<mirror::String*>(new_object);
318      ++it;
319    }
320  }
321}
322
323}  // namespace art
324