/art/test/102-concurrent-gc/src/ |
H A D | Main.java | 24 public byte[] bytes; field in class:Main.ByteContainer 37 l[index].bytes = new byte[bufferSize]; 57 byte[] temp = l[a].bytes; 58 l[a].bytes = l[b].bytes; 59 l[b].bytes = temp;
|
/art/runtime/jdwp/ |
H A D | jdwp_bits.h | 35 static inline void Append1BE(std::vector<uint8_t>& bytes, uint8_t value) { argument 36 bytes.push_back(value); 39 static inline void Append2BE(std::vector<uint8_t>& bytes, uint16_t value) { argument 40 bytes.push_back(static_cast<uint8_t>(value >> 8)); 41 bytes.push_back(static_cast<uint8_t>(value)); 44 static inline void Append4BE(std::vector<uint8_t>& bytes, uint32_t value) { argument 45 bytes.push_back(static_cast<uint8_t>(value >> 24)); 46 bytes.push_back(static_cast<uint8_t>(value >> 16)); 47 bytes.push_back(static_cast<uint8_t>(value >> 8)); 48 bytes 51 Append8BE(std::vector<uint8_t>& bytes, uint64_t value) argument 62 AppendUtf16BE(std::vector<uint8_t>& bytes, const uint16_t* chars, size_t char_count) argument [all...] |
H A D | jdwp_request.cc | 28 Request::Request(const uint8_t* bytes, uint32_t available) : p_(bytes) { argument 30 end_ = bytes + byte_count_; 48 CHECK(p_ == end_) << "read too few bytes: " << (end_ - p_); 50 CHECK(p_ == end_) << "read too many bytes: " << (p_ - end_);
|
/art/test/004-NativeAllocations/src/ |
H A D | Main.java | 31 private int bytes; field in class:Main.NativeAllocation 33 NativeAllocation(int bytes, boolean testingDeadlock) throws Exception { argument 34 this.bytes = bytes; 35 register_native_allocation.invoke(runtime, bytes); 38 nativeBytes += bytes; 48 nativeBytes -= bytes; 50 register_native_free.invoke(runtime, bytes);
|
/art/runtime/native/ |
H A D | org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc | 100 * (1b) bytes per entry 121 std::vector<uint8_t>& bytes = *reinterpret_cast<std::vector<uint8_t>*>(context); local 122 JDWP::Append4BE(bytes, t->GetThreadId()); 123 JDWP::Append1BE(bytes, Dbg::ToJdwpThreadStatus(t->GetState())); 124 JDWP::Append4BE(bytes, t->GetTid()); 125 JDWP::Append4BE(bytes, utime); 126 JDWP::Append4BE(bytes, stime); 127 JDWP::Append1BE(bytes, t->IsDaemon()); 131 std::vector<uint8_t> bytes; local 140 JDWP::Append1BE(bytes, kThstHeaderLe [all...] |
H A D | libcore_util_CharsetUtils.cc | 34 * We could avoid this by keeping the UTF-8 bytes on the native heap until we're done and only 112 ScopedByteArrayRO bytes(env, javaBytes); 113 if (bytes.get() == nullptr) { 121 const jbyte* src = &bytes[offset]; 132 ScopedByteArrayRO bytes(env, javaBytes); 133 if (bytes.get() == nullptr) { 141 const jbyte* src = &bytes[offset]; 149 * Translates the given characters to US-ASCII or ISO-8859-1 bytes, using the fact that 163 ScopedByteArrayRW bytes(env, javaBytes); 164 if (bytes [all...] |
/art/cmdline/ |
H A D | memory_representation.h | 28 // An integral representation of bytes of memory. 34 static Memory<kDivisor> FromBytes(size_t bytes) { argument 35 assert(bytes % kDivisor == 0); 36 return Memory<kDivisor>(bytes);
|
/art/runtime/base/ |
H A D | scoped_arena_allocator.h | 66 void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE { 68 return AllocValgrind(bytes, kind); 70 size_t rounded_bytes = RoundUp(bytes, 8); 75 CurrentStats()->RecordAlloc(bytes, kind); 83 void* AllocValgrind(size_t bytes, ArenaAllocKind kind); 118 void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE { 120 return arena_stack_->Alloc(bytes, kind);
|
H A D | arena_allocator.h | 85 void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); } argument 101 void RecordAlloc(size_t bytes, ArenaAllocKind kind); 209 void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE { 211 return AllocValgrind(bytes, kind); 213 bytes = RoundUp(bytes, kAlignment); 214 if (UNLIKELY(ptr_ + bytes > end_)) { 216 ObtainNewArenaForAllocation(bytes); variable 221 ArenaAllocatorStats::RecordAlloc(bytes, kin [all...] |
H A D | allocator.h | 77 // Running count of number of bytes used for this kind of allocation. Increased by allocations, 81 // Largest value of bytes used seen. 84 // Total number of bytes allocated of this kind. 89 inline void RegisterAllocation(AllocatorTag tag, size_t bytes) { argument 90 g_total_bytes_used[tag].FetchAndAddSequentiallyConsistent(bytes); 91 size_t new_bytes = g_bytes_used[tag].FetchAndAddSequentiallyConsistent(bytes) + bytes; 97 inline void RegisterFree(AllocatorTag tag, size_t bytes) { argument 98 g_bytes_used[tag].FetchAndSubSequentiallyConsistent(bytes);
|
H A D | scoped_arena_allocator.cc | 84 // Update how many bytes we have allocated into the arena so that the arena pool knows how 94 void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) { argument 95 size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 8); 101 CurrentStats()->RecordAlloc(bytes, kind); 103 VALGRIND_MAKE_MEM_UNDEFINED(ptr, bytes); 104 VALGRIND_MAKE_MEM_NOACCESS(ptr + bytes, rounded_bytes - bytes);
|
H A D | arena_allocator.cc | 76 void ArenaAllocatorStatsImpl<kCount>::RecordAlloc(size_t bytes, ArenaAllocKind kind) { argument 77 alloc_stats_[kind] += bytes; 263 // Update how many bytes we have allocated into the arena so that the arena pool knows how 269 void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) { argument 270 size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 8); 285 VALGRIND_MAKE_MEM_NOACCESS(ret + bytes, rounded_bytes - bytes);
|
/art/runtime/arch/x86_64/ |
H A D | memcmp16_x86_64.S | 701 jnc L(16bytes) 706 jnc L(16bytes) 711 jnc L(16bytes) 715 jmp L(16bytes) 720 jmp L(16bytes) 724 jmp L(16bytes) 728 jmp L(16bytes) 732 jmp L(16bytes) 736 jmp L(16bytes) 740 jmp L(16bytes) [all...] |
/art/test/407-arrays/src/ |
H A D | Main.java | 32 static void $opt$testReads(boolean[] bools, byte[] bytes, char[] chars, short[] shorts, argument 38 assertEquals(0, bytes[0]); 39 assertEquals(0, bytes[index]); 63 static void $opt$testWrites(boolean[] bools, byte[] bytes, char[] chars, short[] shorts, argument 71 bytes[0] = -4; 72 assertEquals(-4, bytes[0]); 73 bytes[index] = -8; 74 assertEquals(-8, bytes[index]);
|
/art/runtime/gc/collector/ |
H A D | garbage_collector.h | 39 : objects(num_objects), bytes(num_bytes) {} 42 bytes += other.bytes; 46 // Freed bytes are signed since the GC can free negative bytes if it promotes objects to a space 48 int64_t bytes; member in struct:art::gc::collector::ObjectBytePair 68 return freed_.bytes; 71 return freed_los_.bytes; 157 // Returns the estimated throughput in bytes / second.
|
H A D | garbage_collector.cc | 58 return (static_cast<uint64_t>(freed_.bytes) * 1000) / (NsToMs(GetDurationNs()) + 1); 92 // Update cumulative statistics with how many bytes the GC iteration freed. 180 heap_->RecordFree(freed.objects, freed.bytes); 184 heap_->RecordFree(freed.objects, freed.bytes);
|
/art/runtime/gc/space/ |
H A D | region_space-inl.h | 158 uint64_t bytes = 0; local 167 bytes += r->BytesAllocated(); 171 bytes += r->BytesAllocated(); 176 bytes += r->BytesAllocated(); 181 bytes += r->BytesAllocated(); 188 return bytes; 193 uint64_t bytes = 0; local 202 bytes += r->ObjectsAllocated(); 206 bytes += r->ObjectsAllocated(); 211 bytes [all...] |
H A D | bump_pointer_space.h | 136 bool AllocNewTlab(Thread* self, size_t bytes); 148 // Record objects / bytes freed. 149 void RecordFree(int32_t objects, int32_t bytes) { argument 151 bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes); 163 // Allocate a raw block of bytes. 164 uint8_t* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); 185 size_t size_; // Size of the block in bytes, does not include the header.
|
H A D | bump_pointer_space.cc | 140 uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) { argument 141 bytes = RoundUp(bytes, kAlignment); 146 AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader))); 149 header->size_ = bytes; // Write out the block header. 255 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) { argument 258 uint8_t* start = AllocBlock(bytes); 262 self->SetTlab(start, start + bytes); 270 << max_contiguous_allocation << " bytes)";
|
/art/runtime/arch/x86/ |
H A D | memcmp16_x86.S | 851 je L(8bytes) 853 je L(10bytes) 855 je L(12bytes) 856 jmp L(14bytes) 863 je L(16bytes) 865 je L(18bytes) 867 je L(20bytes) 868 jmp L(22bytes) 875 je L(24bytes) 877 je L(26bytes) [all...] |
/art/tools/ |
H A D | stream-trace-converter.py | 39 bytes = [ (val & 0xFF), ((val >> 8) & 0xFF) ] 40 asbytearray = bytearray(bytes) 59 bytes = [ (val & 0xFF), ((val >> 8) & 0xFF), ((val >> 16) & 0xFF), ((val >> 24) & 0xFF) ] 60 asbytearray = bytearray(bytes) 111 # Skip over offsetToData bytes
|
/art/test/003-omnibus-opcodes/src/ |
H A D | Array.java | 25 static void checkBytes(byte[] bytes) { argument 26 Main.assertTrue(bytes[0] == 0); 27 Main.assertTrue(bytes[1] == -1); 28 Main.assertTrue(bytes[2] == -2); 29 Main.assertTrue(bytes[3] == -3); 30 Main.assertTrue(bytes[4] == -4);
|
/art/test/093-serialization/src/ |
H A D | Main.java | 52 byte[] bytes = byteStream.toByteArray(); 56 return bytes;
|
/art/runtime/ |
H A D | dex_file_verifier_test.cc | 65 // the final = symbols are read and used to trim the remaining bytes 170 static std::unique_ptr<const DexFile> FixChecksumAndOpen(uint8_t* bytes, size_t length, argument 174 CHECK(bytes != nullptr); 177 FixUpChecksum(bytes); 182 if (!file->WriteFully(bytes, length)) {
|
/art/runtime/gc/accounting/ |
H A D | space_bitmap.h | 47 // heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned. 158 // Size in bytes of the memory that the bitmaps spans. 163 void SetHeapSize(size_t bytes) { argument 165 bitmap_size_ = OffsetToIndex(bytes) * sizeof(intptr_t); 166 CHECK_EQ(HeapSize(), bytes); local
|