/art/compiler/dex/quick/ |
H A D | dex_file_to_method_inliner_map.cc | 44 auto it = inliners_.find(dex_file); local 45 if (it != inliners_.end()) { 46 return it->second; 50 // We need to acquire our lock_ to modify inliners_ but we want to release it
|
H A D | dex_file_method_inliner.cc | 415 auto it = inline_methods_.find(method_index); local 416 bool res = (it != inline_methods_.end() && (it->second.flags & kInlineIntrinsic) != 0); 418 *intrinsic = it->second; 427 auto it = inline_methods_.find(info->index); local 428 if (it == inline_methods_.end() || (it->second.flags & kInlineIntrinsic) == 0) { 431 intrinsic = it->second; 512 auto it = inline_methods_.find(method_index); local 513 return it ! 520 auto it = inline_methods_.find(method_idx); local 534 auto it = inline_methods_.find(method_idx); local [all...] |
/art/runtime/gc/accounting/ |
H A D | heap_bitmap.cc | 28 auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(), local 30 CHECK(it != continuous_space_bitmaps_.end()) << " continuous space bitmap " << old_bitmap 32 *it = new_bitmap; 37 auto it = std::find(large_object_bitmaps_.begin(), large_object_bitmaps_.end(), old_bitmap); local 38 CHECK(it != large_object_bitmaps_.end()) << " large object bitmap " << old_bitmap 40 *it = new_bitmap; 57 auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(), bitmap); local 58 DCHECK(it != continuous_space_bitmaps_.end()); 59 continuous_space_bitmaps_.erase(it); 69 auto it local [all...] |
/art/compiler/dex/ |
H A D | verification_results.cc | 65 auto it = verified_methods_.find(ref); local 66 if (it != verified_methods_.end()) { 67 // TODO: Investigate why are we doing the work again for this method and try to avoid it. 70 DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size()); 71 DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size()); 72 DCHECK_EQ(it->second->GetDexGcMap().size(), verified_method->GetDexGcMap().size()); 73 delete it->second; 74 verified_methods_.erase(it); 83 auto it = verified_methods_.find(ref); local 84 return (it ! [all...] |
H A D | global_value_numbering.cc | 163 auto it = field_index_map_.PutBefore(lb, key, id); local 164 field_index_reverse_map_.push_back(&*it); 177 auto it = array_location_map_.PutBefore(lb, key, location); local 178 array_location_reverse_map_.push_back(&*it);
|
H A D | mir_analysis.cc | 1055 * If huge, assume we won't compile, but allow futher analysis to turn it back on. 1065 /* If it's large and contains no branches, it's likely to be machine generated initialization */ 1128 // Get field index and try to find it among existing indexes. If found, it's usually among 1130 // is a linear search, it actually performs much better than map based approach. 1259 // and increment it as needed instead of making O(log n) lookups. 1270 auto it = invoke_map.insert(entry).first; // Iterator to either the old or the new entry. local 1271 mir->meta.method_lowering_info = it->lowering_info_index; 1273 sequential_entries[it [all...] |
H A D | verified_method.cc | 72 auto it = devirt_map_.find(dex_pc); local 73 return (it != devirt_map_.end()) ? &it->second : nullptr; 238 // If the method is not found in the cache this means that it was never found 294 // an Object[] can have any type of object stored in it, but it may also be assigned a
|
H A D | global_value_numbering.h | 104 // except that it doesn't add an entry to the global value map if it's not there. 106 ValueMap::const_iterator it = global_value_map_.find(key); local 107 return (it != global_value_map_.end() && it->second == value);
|
H A D | local_value_numbering.h | 60 auto it = sreg_value_map_.find(s_reg); local 61 if (it != sreg_value_map_.end()) { 62 return it->second == value_name; 184 // for given base and type and makes it easy to prune unnecessary entries when merging 225 // where it was stored. We also keep track of all values known for the current write state 243 // anything that differs from the written value is removed as it may be overwritten.
|
/art/compiler/utils/ |
H A D | dedupe_set.h | 30 // Add method. The data-structure is thread-safe through the use of internal locks, it also 54 auto it = keys_[shard_bin].find(hashed_key); local 55 if (it != keys_[shard_bin].end()) { 56 return it->second;
|
/art/compiler/dex/quick/arm/ |
H A D | fp_arm.cc | 311 LIR* it = OpIT((default_result == -1) ? kCondGt : kCondMi, ""); local 314 OpEndIT(it); 316 it = OpIT(kCondEq, ""); 318 OpEndIT(it);
|
H A D | call_arm.cc | 51 // Add the table to the list - we'll process it later 83 LIR* it = OpIT(kCondEq, ""); local 85 OpEndIT(it); 99 // Add the table to the list - we'll process it later 152 // Add the table to the list - we'll process it later 228 LIR* it = OpIT(kCondEq, ""); local 231 OpEndIT(it); 233 it = OpIT(kCondNe, "T"); 239 OpEndIT(it); 298 LIR* it local [all...] |
/art/runtime/jdwp/ |
H A D | object_registry.cc | 68 // This object isn't in the registry yet, so add it. 99 for (auto it = object_to_entry_.lower_bound(identity_hash_code), end = object_to_entry_.end(); 100 it != end && it->first == identity_hash_code; ++it) { 101 ObjectRegistryEntry* entry = it->second; 135 auto it = id_to_entry_.find(id); local 136 if (it == id_to_entry_.end()) { 139 ObjectRegistryEntry& entry = *it->second; 149 auto it local 158 auto it = id_to_entry_.find(id); local 166 auto it = id_to_entry_.find(id); local 196 auto it = id_to_entry_.find(id); local 210 auto it = id_to_entry_.find(id); local [all...] |
/art/runtime/verifier/ |
H A D | register_line.h | 83 // The register index was validated during the static pass, so we don't need to check it here. 150 * The "this" argument to <init> uses code offset kUninitThisArgAddr, which puts it at the start 151 * of the list in slot 0. If we see a register with an uninitialized slot 0 reference, we know it 170 * caller can decide whether it needs the reference to be initialized or not. (Can also return 296 auto it = reg_to_lock_depths_.find(src); 297 if (it != reg_to_lock_depths_.end()) { 298 reg_to_lock_depths_.Put(dst, it->second); 303 auto it = reg_to_lock_depths_.find(reg); 304 if (it != reg_to_lock_depths_.end()) { 305 return (it 314 auto it = reg_to_lock_depths_.find(reg); local 325 auto it = reg_to_lock_depths_.find(reg); local [all...] |
H A D | reg_type.cc | 398 auto it = types.begin(); local 399 result << reg_type_cache_->GetFromId(*it).Dump(); 400 for (++it; it != types.end(); ++it) { 402 result << reg_type_cache_->GetFromId(*it).Dump();
|
/art/compiler/ |
H A D | elf_writer_mclinker.cc | 112 // TODO: LinkerTest uses mcld::Initialize(), but it does an 114 // want mcld::InitializeNative, but it doesn't exist yet, so we 244 DexMethodIterator it(dex_files); 245 while (it.HasNext()) { 246 const DexFile& dex_file = it.GetDexFile(); 247 uint32_t method_idx = it.GetMemberIndex(); 253 it.Next(); 260 // it. This can happen for reused code such as invoke stubs. 262 SafeMap<const std::string*, const std::string*>::iterator it = added_symbols_.find(&symbol); local 263 if (it ! 392 SafeMap<const std::string*, uint32_t>::iterator it = symbol_to_compiled_code_offset_.find(&symbol); local [all...] |
/art/runtime/base/ |
H A D | timing_logger.cc | 97 auto it = histograms_.find(&dummy); local 98 if (it == histograms_.end()) { 104 histogram = *it; 123 // We don't expect DumpHistogram to be called often, so it is not performance critical.
|
/art/runtime/ |
H A D | fault_handler.cc | 31 // Typically a signal handler should not need to deal with signals that occur within it. 35 // that it may not work. If the cause of the original SIGSEGV is a corrupted stack or other 50 // a. it completes successfully 51 // b. it crashes and a signal is raised. 58 // and write something to the log to tell the user that it happened. 141 // If malloc calls abort, it will be holding its lock. 142 // If the handler tries to call malloc, it will deadlock. 155 // We have handled a signal so it's time to return from the 165 // if it is. 188 auto it local [all...] |
H A D | intern_table.cc | 74 auto it = strong_interns_.find(GcRoot<mirror::String>(old_ref)); local 75 DCHECK(it != strong_interns_.end()); 76 strong_interns_.erase(it); 104 auto it = table->find(GcRoot<mirror::String>(s)); local 105 if (LIKELY(it != table->end())) { 106 return const_cast<GcRoot<mirror::String>&>(*it).Read<kWithReadBarrier>(); 145 auto it = table->find(GcRoot<mirror::String>(s)); local 146 DCHECK(it != table->end()); 147 table->erase(it); 294 for (auto it [all...] |
H A D | monitor_test.cc | 93 // Allocate simple objects till it fails. 104 // This test is potentially racy, but the timeout is long enough that it should work. 119 LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false); // it to thinLocked. 237 // Give it some more time to get to the exception code. 319 auto it = handles.begin(); local 322 for ( ; it != end; ++it) { 323 it->Assign(nullptr); 375 // after which it will interrupt the create task and then wait another 10ms.
|
H A D | safe_map.h | 70 iterator erase(iterator it) { return map_.erase(it); } argument 83 const_iterator it = map_.find(k); local 84 DCHECK(it != map_.end()); 85 return it->second;
|
H A D | transaction.cc | 42 for (auto it : object_logs_) { 43 field_values_count += it.second.Size(); 47 for (auto it : array_logs_) { 48 array_values_count += it.second.Size(); 133 for (auto it : object_logs_) { 134 it.second.Undo(it.first); 142 for (auto it : array_logs_) { 143 it.second.Undo(it 227 auto it = field_values_.find(offset.Uint32Value()); local 238 auto it = field_values_.find(offset.Uint32Value()); local 249 auto it = field_values_.find(offset.Uint32Value()); local 371 auto it = array_values_.find(index); local [all...] |
/art/compiler/optimizing/ |
H A D | code_generator.cc | 54 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 55 HInstruction* current = it.Current(); 82 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 83 HInstruction* current = it.Current(); 347 auto it = table.PcToDexBegin(); local 351 CHECK_EQ(pc_info.native_pc, it [all...] |
/art/compiler/dex/quick/mips/ |
H A D | int_mips.cc | 390 void MipsMir2Lir::OpEndIT(LIR* it) { argument
|
/art/runtime/gc/space/ |
H A D | large_object_space.cc | 177 for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) { 178 MemMap* mem_map = it->second; 209 // Updates the allocation size and whether or not it is free. 225 // where it is. This is only used for coalescing so we only need to be able to do it if the 251 // contains the size of the previous free block preceding it. Implemented in such a way that we 333 auto it = free_blocks_.lower_bound(info); local 334 CHECK(it ! 418 auto it = free_blocks_.lower_bound(&temp_info); local [all...] |