MachVMMemory.cpp revision 190276872994426fb0398e1cf521748249b75875
1//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  Created by Greg Clayton on 6/26/07.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MachVMMemory.h"
15#include "MachVMRegion.h"
16#include "DNBLog.h"
17#include <mach/mach_vm.h>
18#include <mach/shared_region.h>
19#include <sys/sysctl.h>
20
21MachVMMemory::MachVMMemory() :
22    m_page_size    (kInvalidPageSize),
23    m_err        (0)
24{
25}
26
27MachVMMemory::~MachVMMemory()
28{
29}
30
31nub_size_t
32MachVMMemory::PageSize()
33{
34    if (m_page_size == kInvalidPageSize)
35    {
36        m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
37        if (m_err.Fail())
38            m_page_size = 0;
39    }
40    return m_page_size;
41}
42
43nub_size_t
44MachVMMemory::MaxBytesLeftInPage(nub_addr_t addr, nub_size_t count)
45{
46    const nub_size_t page_size = PageSize();
47    if (page_size > 0)
48    {
49        nub_size_t page_offset = (addr % page_size);
50        nub_size_t bytes_left_in_page = page_size - page_offset;
51        if (count > bytes_left_in_page)
52            count = bytes_left_in_page;
53    }
54    return count;
55}
56
57nub_bool_t
58MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
59{
60    MachVMRegion vmRegion(task);
61
62    if (vmRegion.GetRegionForAddress(address))
63    {
64        region_info->addr = vmRegion.StartAddress();
65        region_info->size = vmRegion.GetByteSize();
66        region_info->permissions = vmRegion.GetDNBPermissions();
67    }
68    else
69    {
70        region_info->addr = address;
71        region_info->size = 0;
72        if (vmRegion.GetError().Success())
73        {
74            // vmRegion.GetRegionForAddress() return false, indicating that "address"
75            // wasn't in a valid region, but the "vmRegion" info was successfully
76            // read from the task which means the info describes the next valid
77            // region from which we can infer the size of this invalid region
78            mach_vm_address_t start_addr = vmRegion.StartAddress();
79            if (address < start_addr)
80                region_info->size = start_addr - address;
81        }
82        // If we can't get any infor about the size from the next region, just fill
83        // 1 in as the byte size
84        if (region_info->size == 0)
85            region_info->size = 1;
86
87        // Not readable, writeable or executable
88        region_info->permissions = 0;
89    }
90    return true;
91}
92
93// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
94static uint64_t GetStolenPages()
95{
96    static uint64_t stolenPages = 0;
97    static bool calculated = false;
98    if (calculated) return stolenPages;
99
100	static int mib_reserved[CTL_MAXNAME];
101	static int mib_unusable[CTL_MAXNAME];
102	static int mib_other[CTL_MAXNAME];
103	static size_t mib_reserved_len = 0;
104	static size_t mib_unusable_len = 0;
105	static size_t mib_other_len = 0;
106	int r;
107
108	/* This can be used for testing: */
109	//tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
110
111	if(0 == mib_reserved_len)
112    {
113		mib_reserved_len = CTL_MAXNAME;
114
115		r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
116                            &mib_reserved_len);
117
118		if(-1 == r)
119        {
120			mib_reserved_len = 0;
121			return 0;
122		}
123
124		mib_unusable_len = CTL_MAXNAME;
125
126		r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
127                            &mib_unusable_len);
128
129		if(-1 == r)
130        {
131			mib_reserved_len = 0;
132			return 0;
133		}
134
135
136		mib_other_len = CTL_MAXNAME;
137
138		r = sysctlnametomib("machdep.memmap.Other", mib_other,
139                            &mib_other_len);
140
141		if(-1 == r)
142        {
143			mib_reserved_len = 0;
144			return 0;
145		}
146	}
147
148	if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
149    {
150		uint64_t reserved = 0, unusable = 0, other = 0;
151		size_t reserved_len;
152		size_t unusable_len;
153		size_t other_len;
154
155		reserved_len = sizeof(reserved);
156		unusable_len = sizeof(unusable);
157		other_len = sizeof(other);
158
159		/* These are all declared as QUAD/uint64_t sysctls in the kernel. */
160
161		if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
162                        &reserved_len, NULL, 0))
163        {
164			return 0;
165		}
166
167		if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
168                        &unusable_len, NULL, 0))
169        {
170			return 0;
171		}
172
173		if(-1 == sysctl(mib_other, mib_other_len, &other,
174                        &other_len, NULL, 0))
175        {
176			return 0;
177		}
178
179		if(reserved_len == sizeof(reserved)
180		   && unusable_len == sizeof(unusable)
181		   && other_len == sizeof(other))
182        {
183			uint64_t stolen = reserved + unusable + other;
184			uint64_t mb128 = 128 * 1024 * 1024ULL;
185
186			if(stolen >= mb128)
187            {
188                stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
189                stolenPages = stolen/vm_page_size;
190			}
191		}
192	}
193
194    calculated = true;
195    return stolenPages;
196}
197
198static uint64_t GetPhysicalMemory()
199{
200    // This doesn't change often at all. No need to poll each time.
201    static uint64_t physical_memory = 0;
202    static bool calculated = false;
203    if (calculated) return physical_memory;
204
205    int mib[2];
206    mib[0] = CTL_HW;
207    mib[1] = HW_MEMSIZE;
208    size_t len = sizeof(physical_memory);
209    sysctl(mib, 2, &physical_memory, &len, NULL, 0);
210    return physical_memory;
211}
212
213// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
214static void GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
215{
216    mach_vm_address_t address = 0;
217    mach_vm_size_t size;
218    kern_return_t err = 0;
219    unsigned nestingDepth = 0;
220    mach_vm_size_t pages_resident = 0;
221    mach_vm_size_t pages_dirtied = 0;
222
223    while (1)
224    {
225        mach_msg_type_number_t  count;
226        struct vm_region_submap_info_64 info;
227
228        count = VM_REGION_SUBMAP_INFO_COUNT_64;
229        err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
230        if (err == KERN_INVALID_ADDRESS)
231        {
232            // It seems like this is a good break too.
233            break;
234        }
235        else if (err)
236        {
237            mach_error("vm_region",err);
238            break; // reached last region
239        }
240
241        bool should_count = true;
242        if (info.is_submap)
243        { // is it a submap?
244            nestingDepth++;
245            should_count = false;
246        }
247        else
248        {
249            // Don't count malloc stack logging data in the TOTAL VM usage lines.
250            if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
251                should_count = false;
252
253            address = address+size;
254        }
255
256        if (should_count)
257        {
258            pages_resident += info.pages_resident;
259            pages_dirtied += info.pages_dirtied;
260        }
261    }
262
263    rsize = pages_resident * vm_page_size;
264    dirty_size = pages_dirtied * vm_page_size;
265}
266
267// Test whether the virtual address is within the architecture's shared region.
268static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
269{
270    mach_vm_address_t base = 0, size = 0;
271
272    switch(type) {
273        case CPU_TYPE_ARM:
274            base = SHARED_REGION_BASE_ARM;
275            size = SHARED_REGION_SIZE_ARM;
276            break;
277
278        case CPU_TYPE_X86_64:
279            base = SHARED_REGION_BASE_X86_64;
280            size = SHARED_REGION_SIZE_X86_64;
281            break;
282
283        case CPU_TYPE_I386:
284            base = SHARED_REGION_BASE_I386;
285            size = SHARED_REGION_SIZE_I386;
286            break;
287
288        default: {
289            // Log error abut unknown CPU type
290            break;
291        }
292    }
293
294
295    return(addr >= base && addr < (base + size));
296}
297
298static void GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
299{
300    // Collecting some other info cheaply but not reporting for now.
301    mach_vm_size_t empty = 0;
302    mach_vm_size_t fw_private = 0;
303
304    mach_vm_size_t aliased = 0;
305    mach_vm_size_t pagesize = vm_page_size;
306    bool global_shared_text_data_mapped = false;
307
308    for (mach_vm_address_t addr=0, size=0; ; addr += size)
309    {
310        vm_region_top_info_data_t info;
311        mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
312        mach_port_t object_name;
313
314        kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
315        if (kr != KERN_SUCCESS) break;
316
317        if (InSharedRegion(addr, cputype))
318        {
319            // Private Shared
320            fw_private += info.private_pages_resident * pagesize;
321
322            // Check if this process has the globally shared text and data regions mapped in.  If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
323            if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
324                vm_region_basic_info_data_64_t  b_info;
325                mach_vm_address_t b_addr = addr;
326                mach_vm_size_t b_size = size;
327                count = VM_REGION_BASIC_INFO_COUNT_64;
328
329                kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
330                if (kr != KERN_SUCCESS) break;
331
332                if (b_info.reserved) {
333                    global_shared_text_data_mapped = TRUE;
334                }
335            }
336
337            // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
338            if (info.share_mode != SM_PRIVATE)
339            {
340                continue;
341            }
342        }
343
344        // Update counters according to the region type.
345        if (info.share_mode == SM_COW && info.ref_count == 1)
346        {
347            // Treat single reference SM_COW as SM_PRIVATE
348            info.share_mode = SM_PRIVATE;
349        }
350
351        switch (info.share_mode)
352        {
353            case SM_LARGE_PAGE:
354                // Treat SM_LARGE_PAGE the same as SM_PRIVATE
355                // since they are not shareable and are wired.
356            case SM_PRIVATE:
357                rprvt += info.private_pages_resident * pagesize;
358                rprvt += info.shared_pages_resident * pagesize;
359                vprvt += size;
360                break;
361
362            case SM_EMPTY:
363                empty += size;
364                break;
365
366            case SM_COW:
367            case SM_SHARED:
368            {
369                if (pid == 0)
370                {
371                    // Treat kernel_task specially
372                    if (info.share_mode == SM_COW)
373                    {
374                        rprvt += info.private_pages_resident * pagesize;
375                        vprvt += size;
376                    }
377                    break;
378                }
379
380                if (info.share_mode == SM_COW)
381                {
382                    rprvt += info.private_pages_resident * pagesize;
383                    vprvt += info.private_pages_resident * pagesize;
384                }
385                break;
386            }
387            default:
388                // log that something is really bad.
389                break;
390        }
391    }
392
393    rprvt += aliased;
394}
395
396nub_bool_t
397MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size)
398{
399    if (scanType & eProfileHostMemory)
400        physical_memory = GetPhysicalMemory();
401
402    if (scanType & eProfileMemory)
403    {
404        static mach_port_t localHost = mach_host_self();
405        mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
406        host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
407        vm_stats.wire_count += GetStolenPages();
408
409        GetMemorySizes(task, cputype, pid, rprvt, vprvt);
410
411        rsize = ti.resident_size;
412        vsize = ti.virtual_size;
413
414        if (scanType & eProfileMemoryDirtyPage)
415        {
416            // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
417            GetRegionSizes(task, rsize, dirty_size);
418        }
419    }
420
421    return true;
422}
423
424nub_size_t
425MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
426{
427    if (data == NULL || data_count == 0)
428        return 0;
429
430    nub_size_t total_bytes_read = 0;
431    nub_addr_t curr_addr = address;
432    uint8_t *curr_data = (uint8_t*)data;
433    while (total_bytes_read < data_count)
434    {
435        mach_vm_size_t curr_size = MaxBytesLeftInPage(curr_addr, data_count - total_bytes_read);
436        mach_msg_type_number_t curr_bytes_read = 0;
437        vm_offset_t vm_memory = NULL;
438        m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
439
440        if (DNBLogCheckLogBit(LOG_MEMORY))
441            m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
442
443        if (m_err.Success())
444        {
445            if (curr_bytes_read != curr_size)
446            {
447                if (DNBLogCheckLogBit(LOG_MEMORY))
448                    m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
449            }
450            ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
451            ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
452            total_bytes_read += curr_bytes_read;
453            curr_addr += curr_bytes_read;
454            curr_data += curr_bytes_read;
455        }
456        else
457        {
458            break;
459        }
460    }
461    return total_bytes_read;
462}
463
464
465nub_size_t
466MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
467{
468    MachVMRegion vmRegion(task);
469
470    nub_size_t total_bytes_written = 0;
471    nub_addr_t curr_addr = address;
472    const uint8_t *curr_data = (const uint8_t*)data;
473
474
475    while (total_bytes_written < data_count)
476    {
477        if (vmRegion.GetRegionForAddress(curr_addr))
478        {
479            mach_vm_size_t curr_data_count = data_count - total_bytes_written;
480            mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
481            if (region_bytes_left == 0)
482            {
483                break;
484            }
485            if (curr_data_count > region_bytes_left)
486                curr_data_count = region_bytes_left;
487
488            if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
489            {
490                nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
491                if (bytes_written <= 0)
492                {
493                    // Error should have already be posted by WriteRegion...
494                    break;
495                }
496                else
497                {
498                    total_bytes_written += bytes_written;
499                    curr_addr += bytes_written;
500                    curr_data += bytes_written;
501                }
502            }
503            else
504            {
505                DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
506                break;
507            }
508        }
509        else
510        {
511            DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
512            break;
513        }
514    }
515
516    return total_bytes_written;
517}
518
519
520nub_size_t
521MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
522{
523    if (data == NULL || data_count == 0)
524        return 0;
525
526    nub_size_t total_bytes_written = 0;
527    nub_addr_t curr_addr = address;
528    const uint8_t *curr_data = (const uint8_t*)data;
529    while (total_bytes_written < data_count)
530    {
531        mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(curr_addr, data_count - total_bytes_written);
532        m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
533        if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
534            m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
535
536#if !defined (__i386__) && !defined (__x86_64__)
537        vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
538
539        m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
540        if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
541            m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
542#endif
543
544        if (m_err.Success())
545        {
546            total_bytes_written += curr_data_count;
547            curr_addr += curr_data_count;
548            curr_data += curr_data_count;
549        }
550        else
551        {
552            break;
553        }
554    }
555    return total_bytes_written;
556}
557