MachVMMemory.cpp revision 08f60c88b61c42c35abf3233f0cbe19d29fbe814
1//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Created by Greg Clayton on 6/26/07. 11// 12//===----------------------------------------------------------------------===// 13 14#include "MachVMMemory.h" 15#include "MachVMRegion.h" 16#include "DNBLog.h" 17#include <mach/mach_vm.h> 18#include <mach/shared_region.h> 19#include <sys/sysctl.h> 20 21MachVMMemory::MachVMMemory() : 22 m_page_size (kInvalidPageSize), 23 m_err (0) 24{ 25} 26 27MachVMMemory::~MachVMMemory() 28{ 29} 30 31nub_size_t 32MachVMMemory::PageSize(task_t task) 33{ 34 if (m_page_size == kInvalidPageSize) 35 { 36#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22 37 if (task != TASK_NULL) 38 { 39 kern_return_t kr; 40 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT; 41 task_vm_info_data_t vm_info; 42 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count); 43 if (kr == KERN_SUCCESS) 44 { 45 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size); 46 m_page_size = vm_info.page_size; 47 return m_page_size; 48 } 49 else 50 { 51 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr); 52 } 53 } 54#endif 55 m_err = ::host_page_size( ::mach_host_self(), &m_page_size); 56 if (m_err.Fail()) 57 m_page_size = 0; 58 } 59 return m_page_size; 60} 61 62nub_size_t 63MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count) 64{ 65 const nub_size_t page_size = PageSize(task); 66 if (page_size > 0) 67 { 68 nub_size_t page_offset = (addr % page_size); 69 nub_size_t bytes_left_in_page = page_size - page_offset; 70 if (count > bytes_left_in_page) 71 count = bytes_left_in_page; 72 } 73 return count; 74} 75 76nub_bool_t 77MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info) 78{ 79 MachVMRegion vmRegion(task); 80 81 if (vmRegion.GetRegionForAddress(address)) 82 { 83 region_info->addr = vmRegion.StartAddress(); 84 region_info->size = vmRegion.GetByteSize(); 85 region_info->permissions = vmRegion.GetDNBPermissions(); 86 } 87 else 88 { 89 region_info->addr = address; 90 region_info->size = 0; 91 if (vmRegion.GetError().Success()) 92 { 93 // vmRegion.GetRegionForAddress() return false, indicating that "address" 94 // wasn't in a valid region, but the "vmRegion" info was successfully 95 // read from the task which means the info describes the next valid 96 // region from which we can infer the size of this invalid region 97 mach_vm_address_t start_addr = vmRegion.StartAddress(); 98 if (address < start_addr) 99 region_info->size = start_addr - address; 100 } 101 // If we can't get any infor about the size from the next region, just fill 102 // 1 in as the byte size 103 if (region_info->size == 0) 104 region_info->size = 1; 105 106 // Not readable, writeable or executable 107 region_info->permissions = 0; 108 } 109 return true; 110} 111 112// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top. 113uint64_t 114MachVMMemory::GetStolenPages(task_t task) 115{ 116 static uint64_t stolenPages = 0; 117 static bool calculated = false; 118 if (calculated) return stolenPages; 119 120 static int mib_reserved[CTL_MAXNAME]; 121 static int mib_unusable[CTL_MAXNAME]; 122 static int mib_other[CTL_MAXNAME]; 123 static size_t mib_reserved_len = 0; 124 static size_t mib_unusable_len = 0; 125 static size_t mib_other_len = 0; 126 int r; 127 128 /* This can be used for testing: */ 129 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize; 130 131 if(0 == mib_reserved_len) 132 { 133 mib_reserved_len = CTL_MAXNAME; 134 135 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved, 136 &mib_reserved_len); 137 138 if(-1 == r) 139 { 140 mib_reserved_len = 0; 141 return 0; 142 } 143 144 mib_unusable_len = CTL_MAXNAME; 145 146 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable, 147 &mib_unusable_len); 148 149 if(-1 == r) 150 { 151 mib_reserved_len = 0; 152 return 0; 153 } 154 155 156 mib_other_len = CTL_MAXNAME; 157 158 r = sysctlnametomib("machdep.memmap.Other", mib_other, 159 &mib_other_len); 160 161 if(-1 == r) 162 { 163 mib_reserved_len = 0; 164 return 0; 165 } 166 } 167 168 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0) 169 { 170 uint64_t reserved = 0, unusable = 0, other = 0; 171 size_t reserved_len; 172 size_t unusable_len; 173 size_t other_len; 174 175 reserved_len = sizeof(reserved); 176 unusable_len = sizeof(unusable); 177 other_len = sizeof(other); 178 179 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */ 180 181 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved, 182 &reserved_len, NULL, 0)) 183 { 184 return 0; 185 } 186 187 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable, 188 &unusable_len, NULL, 0)) 189 { 190 return 0; 191 } 192 193 if(-1 == sysctl(mib_other, mib_other_len, &other, 194 &other_len, NULL, 0)) 195 { 196 return 0; 197 } 198 199 if(reserved_len == sizeof(reserved) 200 && unusable_len == sizeof(unusable) 201 && other_len == sizeof(other)) 202 { 203 uint64_t stolen = reserved + unusable + other; 204 uint64_t mb128 = 128 * 1024 * 1024ULL; 205 206 if(stolen >= mb128) 207 { 208 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down 209 stolenPages = stolen / PageSize (task); 210 } 211 } 212 } 213 214 calculated = true; 215 return stolenPages; 216} 217 218static uint64_t GetPhysicalMemory() 219{ 220 // This doesn't change often at all. No need to poll each time. 221 static uint64_t physical_memory = 0; 222 static bool calculated = false; 223 if (calculated) return physical_memory; 224 225 int mib[2]; 226 mib[0] = CTL_HW; 227 mib[1] = HW_MEMSIZE; 228 size_t len = sizeof(physical_memory); 229 sysctl(mib, 2, &physical_memory, &len, NULL, 0); 230 return physical_memory; 231} 232 233// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size. 234void 235MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size) 236{ 237 mach_vm_address_t address = 0; 238 mach_vm_size_t size; 239 kern_return_t err = 0; 240 unsigned nestingDepth = 0; 241 mach_vm_size_t pages_resident = 0; 242 mach_vm_size_t pages_dirtied = 0; 243 244 while (1) 245 { 246 mach_msg_type_number_t count; 247 struct vm_region_submap_info_64 info; 248 249 count = VM_REGION_SUBMAP_INFO_COUNT_64; 250 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count); 251 if (err == KERN_INVALID_ADDRESS) 252 { 253 // It seems like this is a good break too. 254 break; 255 } 256 else if (err) 257 { 258 mach_error("vm_region",err); 259 break; // reached last region 260 } 261 262 bool should_count = true; 263 if (info.is_submap) 264 { // is it a submap? 265 nestingDepth++; 266 should_count = false; 267 } 268 else 269 { 270 // Don't count malloc stack logging data in the TOTAL VM usage lines. 271 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL) 272 should_count = false; 273 274 address = address+size; 275 } 276 277 if (should_count) 278 { 279 pages_resident += info.pages_resident; 280 pages_dirtied += info.pages_dirtied; 281 } 282 } 283 284 static vm_size_t pagesize; 285 static bool calculated = false; 286 if (!calculated) 287 { 288 calculated = true; 289 pagesize = PageSize (task); 290 } 291 292 rsize = pages_resident * pagesize; 293 dirty_size = pages_dirtied * pagesize; 294} 295 296// Test whether the virtual address is within the architecture's shared region. 297static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type) 298{ 299 mach_vm_address_t base = 0, size = 0; 300 301 switch(type) { 302 case CPU_TYPE_ARM: 303 base = SHARED_REGION_BASE_ARM; 304 size = SHARED_REGION_SIZE_ARM; 305 break; 306 307 case CPU_TYPE_X86_64: 308 base = SHARED_REGION_BASE_X86_64; 309 size = SHARED_REGION_SIZE_X86_64; 310 break; 311 312 case CPU_TYPE_I386: 313 base = SHARED_REGION_BASE_I386; 314 size = SHARED_REGION_SIZE_I386; 315 break; 316 317 default: { 318 // Log error abut unknown CPU type 319 break; 320 } 321 } 322 323 324 return(addr >= base && addr < (base + size)); 325} 326 327void 328MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt) 329{ 330 // Collecting some other info cheaply but not reporting for now. 331 mach_vm_size_t empty = 0; 332 mach_vm_size_t fw_private = 0; 333 334 mach_vm_size_t aliased = 0; 335 bool global_shared_text_data_mapped = false; 336 337 static vm_size_t pagesize; 338 static bool calculated = false; 339 if (!calculated) 340 { 341 calculated = true; 342 pagesize = PageSize (task); 343 } 344 345 for (mach_vm_address_t addr=0, size=0; ; addr += size) 346 { 347 vm_region_top_info_data_t info; 348 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT; 349 mach_port_t object_name; 350 351 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name); 352 if (kr != KERN_SUCCESS) break; 353 354 if (InSharedRegion(addr, cputype)) 355 { 356 // Private Shared 357 fw_private += info.private_pages_resident * pagesize; 358 359 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again. 360 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) { 361 vm_region_basic_info_data_64_t b_info; 362 mach_vm_address_t b_addr = addr; 363 mach_vm_size_t b_size = size; 364 count = VM_REGION_BASIC_INFO_COUNT_64; 365 366 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name); 367 if (kr != KERN_SUCCESS) break; 368 369 if (b_info.reserved) { 370 global_shared_text_data_mapped = TRUE; 371 } 372 } 373 374 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range. 375 if (info.share_mode != SM_PRIVATE) 376 { 377 continue; 378 } 379 } 380 381 // Update counters according to the region type. 382 if (info.share_mode == SM_COW && info.ref_count == 1) 383 { 384 // Treat single reference SM_COW as SM_PRIVATE 385 info.share_mode = SM_PRIVATE; 386 } 387 388 switch (info.share_mode) 389 { 390 case SM_LARGE_PAGE: 391 // Treat SM_LARGE_PAGE the same as SM_PRIVATE 392 // since they are not shareable and are wired. 393 case SM_PRIVATE: 394 rprvt += info.private_pages_resident * pagesize; 395 rprvt += info.shared_pages_resident * pagesize; 396 vprvt += size; 397 break; 398 399 case SM_EMPTY: 400 empty += size; 401 break; 402 403 case SM_COW: 404 case SM_SHARED: 405 { 406 if (pid == 0) 407 { 408 // Treat kernel_task specially 409 if (info.share_mode == SM_COW) 410 { 411 rprvt += info.private_pages_resident * pagesize; 412 vprvt += size; 413 } 414 break; 415 } 416 417 if (info.share_mode == SM_COW) 418 { 419 rprvt += info.private_pages_resident * pagesize; 420 vprvt += info.private_pages_resident * pagesize; 421 } 422 break; 423 } 424 default: 425 // log that something is really bad. 426 break; 427 } 428 } 429 430 rprvt += aliased; 431} 432 433nub_bool_t 434MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size) 435{ 436 if (scanType & eProfileHostMemory) 437 physical_memory = GetPhysicalMemory(); 438 439 if (scanType & eProfileMemory) 440 { 441 static mach_port_t localHost = mach_host_self(); 442 mach_msg_type_number_t count = HOST_VM_INFO_COUNT; 443 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count); 444 vm_stats.wire_count += GetStolenPages(task); 445 446 GetMemorySizes(task, cputype, pid, rprvt, vprvt); 447 448 rsize = ti.resident_size; 449 vsize = ti.virtual_size; 450 451 if (scanType & eProfileMemoryDirtyPage) 452 { 453 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics. 454 GetRegionSizes(task, rsize, dirty_size); 455 } 456 } 457 458 return true; 459} 460 461nub_size_t 462MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count) 463{ 464 if (data == NULL || data_count == 0) 465 return 0; 466 467 nub_size_t total_bytes_read = 0; 468 nub_addr_t curr_addr = address; 469 uint8_t *curr_data = (uint8_t*)data; 470 while (total_bytes_read < data_count) 471 { 472 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read); 473 mach_msg_type_number_t curr_bytes_read = 0; 474 vm_offset_t vm_memory = NULL; 475 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read); 476 477 if (DNBLogCheckLogBit(LOG_MEMORY)) 478 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read); 479 480 if (m_err.Success()) 481 { 482 if (curr_bytes_read != curr_size) 483 { 484 if (DNBLogCheckLogBit(LOG_MEMORY)) 485 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size); 486 } 487 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read); 488 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read); 489 total_bytes_read += curr_bytes_read; 490 curr_addr += curr_bytes_read; 491 curr_data += curr_bytes_read; 492 } 493 else 494 { 495 break; 496 } 497 } 498 return total_bytes_read; 499} 500 501 502nub_size_t 503MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count) 504{ 505 MachVMRegion vmRegion(task); 506 507 nub_size_t total_bytes_written = 0; 508 nub_addr_t curr_addr = address; 509 const uint8_t *curr_data = (const uint8_t*)data; 510 511 512 while (total_bytes_written < data_count) 513 { 514 if (vmRegion.GetRegionForAddress(curr_addr)) 515 { 516 mach_vm_size_t curr_data_count = data_count - total_bytes_written; 517 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr); 518 if (region_bytes_left == 0) 519 { 520 break; 521 } 522 if (curr_data_count > region_bytes_left) 523 curr_data_count = region_bytes_left; 524 525 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE)) 526 { 527 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count); 528 if (bytes_written <= 0) 529 { 530 // Error should have already be posted by WriteRegion... 531 break; 532 } 533 else 534 { 535 total_bytes_written += bytes_written; 536 curr_addr += bytes_written; 537 curr_data += bytes_written; 538 } 539 } 540 else 541 { 542 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count)); 543 break; 544 } 545 } 546 else 547 { 548 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address); 549 break; 550 } 551 } 552 553 return total_bytes_written; 554} 555 556 557nub_size_t 558MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count) 559{ 560 if (data == NULL || data_count == 0) 561 return 0; 562 563 nub_size_t total_bytes_written = 0; 564 nub_addr_t curr_addr = address; 565 const uint8_t *curr_data = (const uint8_t*)data; 566 while (total_bytes_written < data_count) 567 { 568 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written); 569 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count); 570 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 571 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count); 572 573#if !defined (__i386__) && !defined (__x86_64__) 574 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH; 575 576 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value); 577 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 578 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count); 579#endif 580 581 if (m_err.Success()) 582 { 583 total_bytes_written += curr_data_count; 584 curr_addr += curr_data_count; 585 curr_data += curr_data_count; 586 } 587 else 588 { 589 break; 590 } 591 } 592 return total_bytes_written; 593} 594