1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "intern_table.h"
18
19#include <memory>
20
21#include "gc_root-inl.h"
22#include "gc/space/image_space.h"
23#include "mirror/dex_cache.h"
24#include "mirror/object_array-inl.h"
25#include "mirror/object-inl.h"
26#include "mirror/string-inl.h"
27#include "thread.h"
28#include "utf.h"
29
30namespace art {
31
32InternTable::InternTable()
33    : image_added_to_intern_table_(false), log_new_roots_(false),
34      allow_new_interns_(true),
35      new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
36}
37
38size_t InternTable::Size() const {
39  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
40  return strong_interns_.Size() + weak_interns_.Size();
41}
42
43size_t InternTable::StrongSize() const {
44  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
45  return strong_interns_.Size();
46}
47
48size_t InternTable::WeakSize() const {
49  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
50  return weak_interns_.Size();
51}
52
53void InternTable::DumpForSigQuit(std::ostream& os) const {
54  os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
55}
56
57void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
58  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
59  if ((flags & kVisitRootFlagAllRoots) != 0) {
60    strong_interns_.VisitRoots(visitor);
61  } else if ((flags & kVisitRootFlagNewRoots) != 0) {
62    for (auto& root : new_strong_intern_roots_) {
63      mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
64      root.VisitRoot(visitor, RootInfo(kRootInternedString));
65      mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
66      if (new_ref != old_ref) {
67        // The GC moved a root in the log. Need to search the strong interns and update the
68        // corresponding object. This is slow, but luckily for us, this may only happen with a
69        // concurrent moving GC.
70        strong_interns_.Remove(old_ref);
71        strong_interns_.Insert(new_ref);
72      }
73    }
74  }
75  if ((flags & kVisitRootFlagClearRootLog) != 0) {
76    new_strong_intern_roots_.clear();
77  }
78  if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
79    log_new_roots_ = true;
80  } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
81    log_new_roots_ = false;
82  }
83  // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
84}
85
86mirror::String* InternTable::LookupStrong(mirror::String* s) {
87  return strong_interns_.Find(s);
88}
89
90mirror::String* InternTable::LookupWeak(mirror::String* s) {
91  return weak_interns_.Find(s);
92}
93
94void InternTable::SwapPostZygoteWithPreZygote() {
95  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
96  weak_interns_.SwapPostZygoteWithPreZygote();
97  strong_interns_.SwapPostZygoteWithPreZygote();
98}
99
100mirror::String* InternTable::InsertStrong(mirror::String* s) {
101  Runtime* runtime = Runtime::Current();
102  if (runtime->IsActiveTransaction()) {
103    runtime->RecordStrongStringInsertion(s);
104  }
105  if (log_new_roots_) {
106    new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
107  }
108  strong_interns_.Insert(s);
109  return s;
110}
111
112mirror::String* InternTable::InsertWeak(mirror::String* s) {
113  Runtime* runtime = Runtime::Current();
114  if (runtime->IsActiveTransaction()) {
115    runtime->RecordWeakStringInsertion(s);
116  }
117  weak_interns_.Insert(s);
118  return s;
119}
120
121void InternTable::RemoveStrong(mirror::String* s) {
122  strong_interns_.Remove(s);
123}
124
125void InternTable::RemoveWeak(mirror::String* s) {
126  Runtime* runtime = Runtime::Current();
127  if (runtime->IsActiveTransaction()) {
128    runtime->RecordWeakStringRemoval(s);
129  }
130  weak_interns_.Remove(s);
131}
132
133// Insert/remove methods used to undo changes made during an aborted transaction.
134mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
135  DCHECK(!Runtime::Current()->IsActiveTransaction());
136  return InsertStrong(s);
137}
138mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
139  DCHECK(!Runtime::Current()->IsActiveTransaction());
140  return InsertWeak(s);
141}
142void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
143  DCHECK(!Runtime::Current()->IsActiveTransaction());
144  RemoveStrong(s);
145}
146void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
147  DCHECK(!Runtime::Current()->IsActiveTransaction());
148  RemoveWeak(s);
149}
150
151void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
152  CHECK(image_space != nullptr);
153  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
154  if (!image_added_to_intern_table_) {
155    const ImageHeader* const header = &image_space->GetImageHeader();
156    // Check if we have the interned strings section.
157    const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
158    if (section.Size() > 0) {
159      ReadFromMemoryLocked(image_space->Begin() + section.Offset());
160    } else {
161      // TODO: Delete this logic?
162      mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
163      mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
164      for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
165        mirror::DexCache* dex_cache = dex_caches->Get(i);
166        const DexFile* dex_file = dex_cache->GetDexFile();
167        const size_t num_strings = dex_file->NumStringIds();
168        for (size_t j = 0; j < num_strings; ++j) {
169          mirror::String* image_string = dex_cache->GetResolvedString(j);
170          if (image_string != nullptr) {
171            mirror::String* found = LookupStrong(image_string);
172            if (found == nullptr) {
173              InsertStrong(image_string);
174            } else {
175              DCHECK_EQ(found, image_string);
176            }
177          }
178        }
179      }
180    }
181    image_added_to_intern_table_ = true;
182  }
183}
184
185mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
186    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
187  if (image_added_to_intern_table_) {
188    return nullptr;
189  }
190  gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
191  if (image == nullptr) {
192    return nullptr;  // No image present.
193  }
194  mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
195  mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
196  const std::string utf8 = s->ToModifiedUtf8();
197  for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
198    mirror::DexCache* dex_cache = dex_caches->Get(i);
199    const DexFile* dex_file = dex_cache->GetDexFile();
200    // Binary search the dex file for the string index.
201    const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
202    if (string_id != nullptr) {
203      uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
204      // GetResolvedString() contains a RB.
205      mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
206      if (image_string != nullptr) {
207        return image_string;
208      }
209    }
210  }
211  return nullptr;
212}
213
214void InternTable::AllowNewInterns() {
215  Thread* self = Thread::Current();
216  MutexLock mu(self, *Locks::intern_table_lock_);
217  allow_new_interns_ = true;
218  new_intern_condition_.Broadcast(self);
219}
220
221void InternTable::DisallowNewInterns() {
222  Thread* self = Thread::Current();
223  MutexLock mu(self, *Locks::intern_table_lock_);
224  allow_new_interns_ = false;
225}
226
227void InternTable::EnsureNewInternsDisallowed() {
228  // Lock and unlock once to ensure that no threads are still in the
229  // middle of adding new interns.
230  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
231  CHECK(!allow_new_interns_);
232}
233
234mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
235  if (s == nullptr) {
236    return nullptr;
237  }
238  Thread* self = Thread::Current();
239  MutexLock mu(self, *Locks::intern_table_lock_);
240  while (UNLIKELY(!allow_new_interns_)) {
241    new_intern_condition_.WaitHoldingLocks(self);
242  }
243  // Check the strong table for a match.
244  mirror::String* strong = LookupStrong(s);
245  if (strong != nullptr) {
246    return strong;
247  }
248  // There is no match in the strong table, check the weak table.
249  mirror::String* weak = LookupWeak(s);
250  if (weak != nullptr) {
251    if (is_strong) {
252      // A match was found in the weak table. Promote to the strong table.
253      RemoveWeak(weak);
254      return InsertStrong(weak);
255    }
256    return weak;
257  }
258  // Check the image for a match.
259  mirror::String* image = LookupStringFromImage(s);
260  if (image != nullptr) {
261    return is_strong ? InsertStrong(image) : InsertWeak(image);
262  }
263  // No match in the strong table or the weak table. Insert into the strong / weak table.
264  return is_strong ? InsertStrong(s) : InsertWeak(s);
265}
266
267mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
268  DCHECK(utf8_data != nullptr);
269  return InternStrong(mirror::String::AllocFromModifiedUtf8(
270      Thread::Current(), utf16_length, utf8_data));
271}
272
273mirror::String* InternTable::InternStrong(const char* utf8_data) {
274  DCHECK(utf8_data != nullptr);
275  return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
276}
277
278mirror::String* InternTable::InternStrong(mirror::String* s) {
279  return Insert(s, true);
280}
281
282mirror::String* InternTable::InternWeak(mirror::String* s) {
283  return Insert(s, false);
284}
285
286bool InternTable::ContainsWeak(mirror::String* s) {
287  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
288  return LookupWeak(s) == s;
289}
290
291void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
292  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
293  weak_interns_.SweepWeaks(callback, arg);
294}
295
296void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
297  const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection(
298      ImageHeader::kSectionInternedStrings);
299  // Read the string tables from the image.
300  const uint8_t* ptr = image_space->Begin() + intern_section.Offset();
301  const size_t offset = ReadFromMemory(ptr);
302  CHECK_LE(offset, intern_section.Size());
303}
304
305size_t InternTable::ReadFromMemory(const uint8_t* ptr) {
306  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
307  return ReadFromMemoryLocked(ptr);
308}
309
310size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) {
311  return strong_interns_.ReadIntoPreZygoteTable(ptr);
312}
313
314size_t InternTable::WriteToMemory(uint8_t* ptr) {
315  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
316  return strong_interns_.WriteFromPostZygoteTable(ptr);
317}
318
319std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
320  if (kIsDebugBuild) {
321    Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
322  }
323  return static_cast<size_t>(root.Read()->GetHashCode());
324}
325
326bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
327                                               const GcRoot<mirror::String>& b) const {
328  if (kIsDebugBuild) {
329    Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
330  }
331  return a.Read()->Equals(b.Read());
332}
333
334size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) {
335  CHECK_EQ(pre_zygote_table_.Size(), 0u);
336  size_t read_count = 0;
337  pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count);
338  return read_count;
339}
340
341size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) {
342  return post_zygote_table_.WriteToMemory(ptr);
343}
344
345void InternTable::Table::Remove(mirror::String* s) {
346  auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
347  if (it != post_zygote_table_.end()) {
348    post_zygote_table_.Erase(it);
349  } else {
350    it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
351    DCHECK(it != pre_zygote_table_.end());
352    pre_zygote_table_.Erase(it);
353  }
354}
355
356mirror::String* InternTable::Table::Find(mirror::String* s) {
357  Locks::intern_table_lock_->AssertHeld(Thread::Current());
358  auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
359  if (it != pre_zygote_table_.end()) {
360    return it->Read();
361  }
362  it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
363  if (it != post_zygote_table_.end()) {
364    return it->Read();
365  }
366  return nullptr;
367}
368
369void InternTable::Table::SwapPostZygoteWithPreZygote() {
370  if (pre_zygote_table_.Empty()) {
371    std::swap(pre_zygote_table_, post_zygote_table_);
372    VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
373  } else {
374    // This case happens if read the intern table from the image.
375    VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_";
376  }
377}
378
379void InternTable::Table::Insert(mirror::String* s) {
380  // Always insert the post zygote table, this gets swapped when we create the zygote to be the
381  // pre zygote table.
382  post_zygote_table_.Insert(GcRoot<mirror::String>(s));
383}
384
385void InternTable::Table::VisitRoots(RootVisitor* visitor) {
386  BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
387      visitor, RootInfo(kRootInternedString));
388  for (auto& intern : pre_zygote_table_) {
389    buffered_visitor.VisitRoot(intern);
390  }
391  for (auto& intern : post_zygote_table_) {
392    buffered_visitor.VisitRoot(intern);
393  }
394}
395
396void InternTable::Table::SweepWeaks(IsMarkedCallback* callback, void* arg) {
397  SweepWeaks(&pre_zygote_table_, callback, arg);
398  SweepWeaks(&post_zygote_table_, callback, arg);
399}
400
401void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) {
402  for (auto it = set->begin(), end = set->end(); it != end;) {
403    // This does not need a read barrier because this is called by GC.
404    mirror::Object* object = it->Read<kWithoutReadBarrier>();
405    mirror::Object* new_object = callback(object, arg);
406    if (new_object == nullptr) {
407      it = set->Erase(it);
408    } else {
409      *it = GcRoot<mirror::String>(new_object->AsString());
410      ++it;
411    }
412  }
413}
414
415size_t InternTable::Table::Size() const {
416  return pre_zygote_table_.Size() + post_zygote_table_.Size();
417}
418
419}  // namespace art
420