1/* Copyright (C) 2017 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h.  The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation.  Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
33#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
34
35#include "jvmti_weak_table.h"
36
37#include <limits>
38
39#include "art_jvmti.h"
40#include "base/logging.h"
41#include "gc/allocation_listener.h"
42#include "instrumentation.h"
43#include "jni_env_ext-inl.h"
44#include "jvmti_allocator.h"
45#include "mirror/class.h"
46#include "mirror/object.h"
47#include "runtime.h"
48#include "ScopedLocalRef.h"
49
50namespace openjdkjvmti {
51
52template <typename T>
53void JvmtiWeakTable<T>::Lock() {
54  allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
55}
56template <typename T>
57void JvmtiWeakTable<T>::Unlock() {
58  allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
59}
60template <typename T>
61void JvmtiWeakTable<T>::AssertLocked() {
62  allow_disallow_lock_.AssertHeld(art::Thread::Current());
63}
64
65template <typename T>
66void JvmtiWeakTable<T>::UpdateTableWithReadBarrier() {
67  update_since_last_sweep_ = true;
68
69  auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
70                                    art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
71     REQUIRES_SHARED(art::Locks::mutator_lock_) {
72    return original_root.Read<art::kWithReadBarrier>();
73  };
74
75  UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
76}
77
78template <typename T>
79bool JvmtiWeakTable<T>::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, T* result) {
80  // Under concurrent GC, there is a window between moving objects and sweeping of system
81  // weaks in which mutators are active. We may receive a to-space object pointer in obj,
82  // but still have from-space pointers in the table. Explicitly update the table once.
83  // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
84  UpdateTableWithReadBarrier();
85  return GetTagLocked(self, obj, result);
86}
87
88template <typename T>
89bool JvmtiWeakTable<T>::Remove(art::mirror::Object* obj, /* out */ T* tag) {
90  art::Thread* self = art::Thread::Current();
91  art::MutexLock mu(self, allow_disallow_lock_);
92  Wait(self);
93
94  return RemoveLocked(self, obj, tag);
95}
96template <typename T>
97bool JvmtiWeakTable<T>::RemoveLocked(art::mirror::Object* obj, T* tag) {
98  art::Thread* self = art::Thread::Current();
99  allow_disallow_lock_.AssertHeld(self);
100  Wait(self);
101
102  return RemoveLocked(self, obj, tag);
103}
104
105template <typename T>
106bool JvmtiWeakTable<T>::RemoveLocked(art::Thread* self, art::mirror::Object* obj, T* tag) {
107  auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
108  if (it != tagged_objects_.end()) {
109    if (tag != nullptr) {
110      *tag = it->second;
111    }
112    tagged_objects_.erase(it);
113    return true;
114  }
115
116  if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
117    // Under concurrent GC, there is a window between moving objects and sweeping of system
118    // weaks in which mutators are active. We may receive a to-space object pointer in obj,
119    // but still have from-space pointers in the table. Explicitly update the table once.
120    // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
121
122    // Update the table.
123    UpdateTableWithReadBarrier();
124
125    // And try again.
126    return RemoveLocked(self, obj, tag);
127  }
128
129  // Not in here.
130  return false;
131}
132
133template <typename T>
134bool JvmtiWeakTable<T>::Set(art::mirror::Object* obj, T new_tag) {
135  art::Thread* self = art::Thread::Current();
136  art::MutexLock mu(self, allow_disallow_lock_);
137  Wait(self);
138
139  return SetLocked(self, obj, new_tag);
140}
141template <typename T>
142bool JvmtiWeakTable<T>::SetLocked(art::mirror::Object* obj, T new_tag) {
143  art::Thread* self = art::Thread::Current();
144  allow_disallow_lock_.AssertHeld(self);
145  Wait(self);
146
147  return SetLocked(self, obj, new_tag);
148}
149
150template <typename T>
151bool JvmtiWeakTable<T>::SetLocked(art::Thread* self, art::mirror::Object* obj, T new_tag) {
152  auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
153  if (it != tagged_objects_.end()) {
154    it->second = new_tag;
155    return true;
156  }
157
158  if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
159    // Under concurrent GC, there is a window between moving objects and sweeping of system
160    // weaks in which mutators are active. We may receive a to-space object pointer in obj,
161    // but still have from-space pointers in the table. Explicitly update the table once.
162    // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
163
164    // Update the table.
165    UpdateTableWithReadBarrier();
166
167    // And try again.
168    return SetLocked(self, obj, new_tag);
169  }
170
171  // New element.
172  auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
173  DCHECK(insert_it.second);
174  return false;
175}
176
177template <typename T>
178void JvmtiWeakTable<T>::Sweep(art::IsMarkedVisitor* visitor) {
179  if (DoesHandleNullOnSweep()) {
180    SweepImpl<true>(visitor);
181  } else {
182    SweepImpl<false>(visitor);
183  }
184
185  // Under concurrent GC, there is a window between moving objects and sweeping of system
186  // weaks in which mutators are active. We may receive a to-space object pointer in obj,
187  // but still have from-space pointers in the table. We explicitly update the table then
188  // to ensure we compare against to-space pointers. But we want to do this only once. Once
189  // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
190  // so we re-enable the explicit update for the next marking.
191  update_since_last_sweep_ = false;
192}
193
194template <typename T>
195template <bool kHandleNull>
196void JvmtiWeakTable<T>::SweepImpl(art::IsMarkedVisitor* visitor) {
197  art::Thread* self = art::Thread::Current();
198  art::MutexLock mu(self, allow_disallow_lock_);
199
200  auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
201                             art::mirror::Object* original_obj) {
202    return visitor->IsMarked(original_obj);
203  };
204
205  UpdateTableWith<decltype(IsMarkedUpdater),
206                  kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
207}
208
209template <typename T>
210template <typename Updater, typename JvmtiWeakTable<T>::TableUpdateNullTarget kTargetNull>
211ALWAYS_INLINE inline void JvmtiWeakTable<T>::UpdateTableWith(Updater& updater) {
212  // We optimistically hope that elements will still be well-distributed when re-inserting them.
213  // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
214  // vector and two passes.
215  float original_max_load_factor = tagged_objects_.max_load_factor();
216  tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
217  // For checking that a max load-factor actually does what we expect.
218  size_t original_bucket_count = tagged_objects_.bucket_count();
219
220  for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
221    DCHECK(!it->first.IsNull());
222    art::mirror::Object* original_obj = it->first.template Read<art::kWithoutReadBarrier>();
223    art::mirror::Object* target_obj = updater(it->first, original_obj);
224    if (original_obj != target_obj) {
225      if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
226        // Ignore null target, don't do anything.
227      } else {
228        T tag = it->second;
229        it = tagged_objects_.erase(it);
230        if (target_obj != nullptr) {
231          tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
232          DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
233        } else if (kTargetNull == kCallHandleNull) {
234          HandleNullSweep(tag);
235        }
236        continue;  // Iterator was implicitly updated by erase.
237      }
238    }
239    it++;
240  }
241
242  tagged_objects_.max_load_factor(original_max_load_factor);
243  // TODO: consider rehash here.
244}
245
246template <typename T>
247template <typename Storage, class Allocator>
248struct JvmtiWeakTable<T>::ReleasableContainer {
249  using allocator_type = Allocator;
250
251  explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
252      : allocator(alloc),
253        data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
254        size(0),
255        capacity(reserve) {
256  }
257
258  ~ReleasableContainer() {
259    if (data != nullptr) {
260      allocator.deallocate(data, capacity);
261      capacity = 0;
262      size = 0;
263    }
264  }
265
266  Storage* Release() {
267    Storage* tmp = data;
268
269    data = nullptr;
270    size = 0;
271    capacity = 0;
272
273    return tmp;
274  }
275
276  void Resize(size_t new_capacity) {
277    CHECK_GT(new_capacity, capacity);
278
279    Storage* tmp = allocator.allocate(new_capacity);
280    DCHECK(tmp != nullptr);
281    if (data != nullptr) {
282      memcpy(tmp, data, sizeof(Storage) * size);
283    }
284    Storage* old = data;
285    data = tmp;
286    allocator.deallocate(old, capacity);
287    capacity = new_capacity;
288  }
289
290  void Pushback(const Storage& elem) {
291    if (size == capacity) {
292      size_t new_capacity = 2 * capacity + 1;
293      Resize(new_capacity);
294    }
295    data[size++] = elem;
296  }
297
298  Allocator allocator;
299  Storage* data;
300  size_t size;
301  size_t capacity;
302};
303
304template <typename T>
305jvmtiError JvmtiWeakTable<T>::GetTaggedObjects(jvmtiEnv* jvmti_env,
306                                               jint tag_count,
307                                               const T* tags,
308                                               jint* count_ptr,
309                                               jobject** object_result_ptr,
310                                               T** tag_result_ptr) {
311  if (tag_count < 0) {
312    return ERR(ILLEGAL_ARGUMENT);
313  }
314  if (tag_count > 0) {
315    for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
316      if (tags[i] == 0) {
317        return ERR(ILLEGAL_ARGUMENT);
318      }
319    }
320  }
321  if (tags == nullptr) {
322    return ERR(NULL_POINTER);
323  }
324  if (count_ptr == nullptr) {
325    return ERR(NULL_POINTER);
326  }
327
328  art::Thread* self = art::Thread::Current();
329  art::MutexLock mu(self, allow_disallow_lock_);
330  Wait(self);
331
332  art::JNIEnvExt* jni_env = self->GetJniEnv();
333
334  constexpr size_t kDefaultSize = 10;
335  size_t initial_object_size;
336  size_t initial_tag_size;
337  if (tag_count == 0) {
338    initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
339    initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
340  } else {
341    initial_object_size = initial_tag_size = kDefaultSize;
342  }
343  JvmtiAllocator<void> allocator(jvmti_env);
344  ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator,
345                                                                         initial_object_size);
346  ReleasableContainer<T, JvmtiAllocator<T>> selected_tags(allocator, initial_tag_size);
347
348  size_t count = 0;
349  for (auto& pair : tagged_objects_) {
350    bool select;
351    if (tag_count > 0) {
352      select = false;
353      for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
354        if (tags[i] == pair.second) {
355          select = true;
356          break;
357        }
358      }
359    } else {
360      select = true;
361    }
362
363    if (select) {
364      art::mirror::Object* obj = pair.first.template Read<art::kWithReadBarrier>();
365      if (obj != nullptr) {
366        count++;
367        if (object_result_ptr != nullptr) {
368          selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
369        }
370        if (tag_result_ptr != nullptr) {
371          selected_tags.Pushback(pair.second);
372        }
373      }
374    }
375  }
376
377  if (object_result_ptr != nullptr) {
378    *object_result_ptr = selected_objects.Release();
379  }
380  if (tag_result_ptr != nullptr) {
381    *tag_result_ptr = selected_tags.Release();
382  }
383  *count_ptr = static_cast<jint>(count);
384  return ERR(NONE);
385}
386
387template <typename T>
388art::mirror::Object* JvmtiWeakTable<T>::Find(T tag) {
389  art::Thread* self = art::Thread::Current();
390  art::MutexLock mu(self, allow_disallow_lock_);
391  Wait(self);
392
393  for (auto& pair : tagged_objects_) {
394    if (tag == pair.second) {
395      art::mirror::Object* obj = pair.first.template Read<art::kWithReadBarrier>();
396      if (obj != nullptr) {
397        return obj;
398      }
399    }
400  }
401  return nullptr;
402}
403
404}  // namespace openjdkjvmti
405
406#endif  // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
407