minidump_generator.cc revision 8fac6df2a0dfbfe7512c3f6616cda4cbac4f0d9d
1// Copyright (c) 2006, Google Inc. 2// All rights reserved. 3// 4// Redistribution and use in source and binary forms, with or without 5// modification, are permitted provided that the following conditions are 6// met: 7// 8// * Redistributions of source code must retain the above copyright 9// notice, this list of conditions and the following disclaimer. 10// * Redistributions in binary form must reproduce the above 11// copyright notice, this list of conditions and the following disclaimer 12// in the documentation and/or other materials provided with the 13// distribution. 14// * Neither the name of Google Inc. nor the names of its 15// contributors may be used to endorse or promote products derived from 16// this software without specific prior written permission. 17// 18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 30#include <algorithm> 31#include <cstdio> 32 33#include <mach/host_info.h> 34#include <mach/mach_vm.h> 35#include <mach/vm_statistics.h> 36#include <mach-o/dyld.h> 37#include <mach-o/loader.h> 38#include <sys/sysctl.h> 39#include <sys/resource.h> 40 41#include <CoreFoundation/CoreFoundation.h> 42 43#include "client/mac/handler/minidump_generator.h" 44 45#ifdef HAS_ARM_SUPPORT 46#include <mach/arm/thread_status.h> 47#endif 48#ifdef HAS_PPC_SUPPORT 49#include <mach/ppc/thread_status.h> 50#endif 51#ifdef HAS_X86_SUPPORT 52#include <mach/i386/thread_status.h> 53#endif 54 55#include "client/minidump_file_writer-inl.h" 56#include "common/mac/file_id.h" 57#include "common/mac/macho_id.h" 58#include "common/mac/string_utilities.h" 59 60using MacStringUtils::ConvertToString; 61using MacStringUtils::IntegerValueAtIndex; 62 63namespace google_breakpad { 64 65#if __LP64__ 66#define LC_SEGMENT_ARCH LC_SEGMENT_64 67#else 68#define LC_SEGMENT_ARCH LC_SEGMENT 69#endif 70 71// constructor when generating from within the crashed process 72MinidumpGenerator::MinidumpGenerator() 73 : writer_(), 74 exception_type_(0), 75 exception_code_(0), 76 exception_subcode_(0), 77 exception_thread_(0), 78 crashing_task_(mach_task_self()), 79 handler_thread_(mach_thread_self()), 80 cpu_type_(DynamicImages::GetNativeCPUType()), 81 dynamic_images_(NULL), 82 memory_blocks_(&allocator_) { 83 GatherSystemInformation(); 84} 85 86// constructor when generating from a different process than the 87// crashed process 88MinidumpGenerator::MinidumpGenerator(mach_port_t crashing_task, 89 mach_port_t handler_thread) 90 : writer_(), 91 exception_type_(0), 92 exception_code_(0), 93 exception_subcode_(0), 94 exception_thread_(0), 95 crashing_task_(crashing_task), 96 handler_thread_(handler_thread), 97 cpu_type_(DynamicImages::GetNativeCPUType()), 98 dynamic_images_(NULL), 99 memory_blocks_(&allocator_) { 100 if (crashing_task != mach_task_self()) { 101 dynamic_images_ = new DynamicImages(crashing_task_); 102 cpu_type_ = dynamic_images_->GetCPUType(); 103 } else { 104 dynamic_images_ = NULL; 105 cpu_type_ = DynamicImages::GetNativeCPUType(); 106 } 107 108 GatherSystemInformation(); 109} 110 111MinidumpGenerator::~MinidumpGenerator() { 112 delete dynamic_images_; 113} 114 115char MinidumpGenerator::build_string_[16]; 116int MinidumpGenerator::os_major_version_ = 0; 117int MinidumpGenerator::os_minor_version_ = 0; 118int MinidumpGenerator::os_build_number_ = 0; 119 120// static 121void MinidumpGenerator::GatherSystemInformation() { 122 // If this is non-zero, then we've already gathered the information 123 if (os_major_version_) 124 return; 125 126 // This code extracts the version and build information from the OS 127 CFStringRef vers_path = 128 CFSTR("/System/Library/CoreServices/SystemVersion.plist"); 129 CFURLRef sys_vers = 130 CFURLCreateWithFileSystemPath(NULL, 131 vers_path, 132 kCFURLPOSIXPathStyle, 133 false); 134 CFDataRef data; 135 SInt32 error; 136 CFURLCreateDataAndPropertiesFromResource(NULL, sys_vers, &data, NULL, NULL, 137 &error); 138 139 if (!data) 140 return; 141 142 CFDictionaryRef list = static_cast<CFDictionaryRef> 143 (CFPropertyListCreateFromXMLData(NULL, data, kCFPropertyListImmutable, 144 NULL)); 145 if (!list) 146 return; 147 148 CFStringRef build_version = static_cast<CFStringRef> 149 (CFDictionaryGetValue(list, CFSTR("ProductBuildVersion"))); 150 CFStringRef product_version = static_cast<CFStringRef> 151 (CFDictionaryGetValue(list, CFSTR("ProductVersion"))); 152 string build_str = ConvertToString(build_version); 153 string product_str = ConvertToString(product_version); 154 155 CFRelease(list); 156 CFRelease(sys_vers); 157 CFRelease(data); 158 159 strlcpy(build_string_, build_str.c_str(), sizeof(build_string_)); 160 161 // Parse the string that looks like "10.4.8" 162 os_major_version_ = IntegerValueAtIndex(product_str, 0); 163 os_minor_version_ = IntegerValueAtIndex(product_str, 1); 164 os_build_number_ = IntegerValueAtIndex(product_str, 2); 165} 166 167string MinidumpGenerator::UniqueNameInDirectory(const string &dir, 168 string *unique_name) { 169 CFUUIDRef uuid = CFUUIDCreate(NULL); 170 CFStringRef uuid_cfstr = CFUUIDCreateString(NULL, uuid); 171 CFRelease(uuid); 172 string file_name(ConvertToString(uuid_cfstr)); 173 CFRelease(uuid_cfstr); 174 string path(dir); 175 176 // Ensure that the directory (if non-empty) has a trailing slash so that 177 // we can append the file name and have a valid pathname. 178 if (!dir.empty()) { 179 if (dir.at(dir.size() - 1) != '/') 180 path.append(1, '/'); 181 } 182 183 path.append(file_name); 184 path.append(".dmp"); 185 186 if (unique_name) 187 *unique_name = file_name; 188 189 return path; 190} 191 192bool MinidumpGenerator::Write(const char *path) { 193 WriteStreamFN writers[] = { 194 &MinidumpGenerator::WriteThreadListStream, 195 &MinidumpGenerator::WriteMemoryListStream, 196 &MinidumpGenerator::WriteSystemInfoStream, 197 &MinidumpGenerator::WriteModuleListStream, 198 &MinidumpGenerator::WriteMiscInfoStream, 199 &MinidumpGenerator::WriteBreakpadInfoStream, 200 // Exception stream needs to be the last entry in this array as it may 201 // be omitted in the case where the minidump is written without an 202 // exception. 203 &MinidumpGenerator::WriteExceptionStream, 204 }; 205 bool result = false; 206 207 // If opening was successful, create the header, directory, and call each 208 // writer. The destructor for the TypedMDRVAs will cause the data to be 209 // flushed. The destructor for the MinidumpFileWriter will close the file. 210 if (writer_.Open(path)) { 211 TypedMDRVA<MDRawHeader> header(&writer_); 212 TypedMDRVA<MDRawDirectory> dir(&writer_); 213 214 if (!header.Allocate()) 215 return false; 216 217 int writer_count = static_cast<int>(sizeof(writers) / sizeof(writers[0])); 218 219 // If we don't have exception information, don't write out the 220 // exception stream 221 if (!exception_thread_ && !exception_type_) 222 --writer_count; 223 224 // Add space for all writers 225 if (!dir.AllocateArray(writer_count)) 226 return false; 227 228 MDRawHeader *header_ptr = header.get(); 229 header_ptr->signature = MD_HEADER_SIGNATURE; 230 header_ptr->version = MD_HEADER_VERSION; 231 time(reinterpret_cast<time_t *>(&(header_ptr->time_date_stamp))); 232 header_ptr->stream_count = writer_count; 233 header_ptr->stream_directory_rva = dir.position(); 234 235 MDRawDirectory local_dir; 236 result = true; 237 for (int i = 0; (result) && (i < writer_count); ++i) { 238 result = (this->*writers[i])(&local_dir); 239 240 if (result) 241 dir.CopyIndex(i, &local_dir); 242 } 243 } 244 return result; 245} 246 247size_t MinidumpGenerator::CalculateStackSize(mach_vm_address_t start_addr) { 248 mach_vm_address_t stack_region_base = start_addr; 249 mach_vm_size_t stack_region_size; 250 natural_t nesting_level = 0; 251 vm_region_submap_info_64 submap_info; 252 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64; 253 254 vm_region_recurse_info_t region_info; 255 region_info = reinterpret_cast<vm_region_recurse_info_t>(&submap_info); 256 257 if (start_addr == 0) { 258 return 0; 259 } 260 261 kern_return_t result = 262 mach_vm_region_recurse(crashing_task_, &stack_region_base, 263 &stack_region_size, &nesting_level, 264 region_info, &info_count); 265 266 if (result != KERN_SUCCESS || start_addr < stack_region_base) { 267 // Failure or stack corruption, since mach_vm_region had to go 268 // higher in the process address space to find a valid region. 269 return 0; 270 } 271 272 unsigned int tag = submap_info.user_tag; 273 274 // If the user tag is VM_MEMORY_STACK, look for more readable regions with 275 // the same tag placed immediately above the computed stack region. Under 276 // some circumstances, the stack for thread 0 winds up broken up into 277 // multiple distinct abutting regions. This can happen for several reasons, 278 // including user code that calls setrlimit(RLIMIT_STACK, ...) or changes 279 // the access on stack pages by calling mprotect. 280 if (tag == VM_MEMORY_STACK) { 281 while (true) { 282 mach_vm_address_t next_region_base = stack_region_base + 283 stack_region_size; 284 mach_vm_address_t proposed_next_region_base = next_region_base; 285 mach_vm_size_t next_region_size; 286 nesting_level = 0; 287 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64; 288 result = mach_vm_region_recurse(crashing_task_, &next_region_base, 289 &next_region_size, &nesting_level, 290 region_info, &info_count); 291 if (result != KERN_SUCCESS || 292 next_region_base != proposed_next_region_base || 293 submap_info.user_tag != tag || 294 (submap_info.protection & VM_PROT_READ) == 0) { 295 break; 296 } 297 298 stack_region_size += next_region_size; 299 } 300 } 301 302 return stack_region_base + stack_region_size - start_addr; 303} 304 305bool MinidumpGenerator::WriteStackFromStartAddress( 306 mach_vm_address_t start_addr, 307 MDMemoryDescriptor *stack_location) { 308 UntypedMDRVA memory(&writer_); 309 310 bool result = false; 311 size_t size = CalculateStackSize(start_addr); 312 313 if (size == 0) { 314 // In some situations the stack address for the thread can come back 0. 315 // In these cases we skip over the threads in question and stuff the 316 // stack with a clearly borked value. 317 start_addr = 0xDEADBEEF; 318 size = 16; 319 if (!memory.Allocate(size)) 320 return false; 321 322 unsigned long long dummy_stack[2]; // Fill dummy stack with 16 bytes of 323 // junk. 324 dummy_stack[0] = 0xDEADBEEF; 325 dummy_stack[1] = 0xDEADBEEF; 326 327 result = memory.Copy(dummy_stack, size); 328 } else { 329 330 if (!memory.Allocate(size)) 331 return false; 332 333 if (dynamic_images_) { 334 vector<uint8_t> stack_memory; 335 if (ReadTaskMemory(crashing_task_, 336 start_addr, 337 size, 338 stack_memory) != KERN_SUCCESS) { 339 return false; 340 } 341 342 result = memory.Copy(&stack_memory[0], size); 343 } else { 344 result = memory.Copy(reinterpret_cast<const void *>(start_addr), size); 345 } 346 } 347 348 stack_location->start_of_memory_range = start_addr; 349 stack_location->memory = memory.location(); 350 351 return result; 352} 353 354bool MinidumpGenerator::WriteStack(breakpad_thread_state_data_t state, 355 MDMemoryDescriptor *stack_location) { 356 switch (cpu_type_) { 357#ifdef HAS_ARM_SUPPORT 358 case CPU_TYPE_ARM: 359 return WriteStackARM(state, stack_location); 360#endif 361#ifdef HAS_PPC_SUPPORT 362 case CPU_TYPE_POWERPC: 363 return WriteStackPPC(state, stack_location); 364 case CPU_TYPE_POWERPC64: 365 return WriteStackPPC64(state, stack_location); 366#endif 367#ifdef HAS_X86_SUPPORT 368 case CPU_TYPE_I386: 369 return WriteStackX86(state, stack_location); 370 case CPU_TYPE_X86_64: 371 return WriteStackX86_64(state, stack_location); 372#endif 373 default: 374 return false; 375 } 376} 377 378bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state, 379 MDLocationDescriptor *register_location) { 380 switch (cpu_type_) { 381#ifdef HAS_ARM_SUPPORT 382 case CPU_TYPE_ARM: 383 return WriteContextARM(state, register_location); 384#endif 385#ifdef HAS_PPC_SUPPORT 386 case CPU_TYPE_POWERPC: 387 return WriteContextPPC(state, register_location); 388 case CPU_TYPE_POWERPC64: 389 return WriteContextPPC64(state, register_location); 390#endif 391#ifdef HAS_X86_SUPPORT 392 case CPU_TYPE_I386: 393 return WriteContextX86(state, register_location); 394 case CPU_TYPE_X86_64: 395 return WriteContextX86_64(state, register_location); 396#endif 397 default: 398 return false; 399 } 400} 401 402u_int64_t MinidumpGenerator::CurrentPCForStack( 403 breakpad_thread_state_data_t state) { 404 switch (cpu_type_) { 405#ifdef HAS_ARM_SUPPORT 406 case CPU_TYPE_ARM: 407 return CurrentPCForStackARM(state); 408#endif 409#ifdef HAS_PPC_SUPPORT 410 case CPU_TYPE_POWERPC: 411 return CurrentPCForStackPPC(state); 412 case CPU_TYPE_POWERPC64: 413 return CurrentPCForStackPPC64(state); 414#endif 415#ifdef HAS_X86_SUPPORT 416 case CPU_TYPE_I386: 417 return CurrentPCForStackX86(state); 418 case CPU_TYPE_X86_64: 419 return CurrentPCForStackX86_64(state); 420#endif 421 default: 422 assert("Unknown CPU type!"); 423 return 0; 424 } 425} 426 427#ifdef HAS_ARM_SUPPORT 428bool MinidumpGenerator::WriteStackARM(breakpad_thread_state_data_t state, 429 MDMemoryDescriptor *stack_location) { 430 arm_thread_state_t *machine_state = 431 reinterpret_cast<arm_thread_state_t *>(state); 432 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, sp); 433 return WriteStackFromStartAddress(start_addr, stack_location); 434} 435 436u_int64_t 437MinidumpGenerator::CurrentPCForStackARM(breakpad_thread_state_data_t state) { 438 arm_thread_state_t *machine_state = 439 reinterpret_cast<arm_thread_state_t *>(state); 440 441 return REGISTER_FROM_THREADSTATE(machine_state, pc); 442} 443 444bool MinidumpGenerator::WriteContextARM(breakpad_thread_state_data_t state, 445 MDLocationDescriptor *register_location) 446{ 447 TypedMDRVA<MDRawContextARM> context(&writer_); 448 arm_thread_state_t *machine_state = 449 reinterpret_cast<arm_thread_state_t *>(state); 450 451 if (!context.Allocate()) 452 return false; 453 454 *register_location = context.location(); 455 MDRawContextARM *context_ptr = context.get(); 456 context_ptr->context_flags = MD_CONTEXT_ARM_FULL; 457 458#define AddGPR(a) context_ptr->iregs[a] = REGISTER_FROM_THREADSTATE(machine_state, r[a]) 459 460 context_ptr->iregs[13] = REGISTER_FROM_THREADSTATE(machine_state, sp); 461 context_ptr->iregs[14] = REGISTER_FROM_THREADSTATE(machine_state, lr); 462 context_ptr->iregs[15] = REGISTER_FROM_THREADSTATE(machine_state, pc); 463 context_ptr->cpsr = REGISTER_FROM_THREADSTATE(machine_state, cpsr); 464 465 AddGPR(0); 466 AddGPR(1); 467 AddGPR(2); 468 AddGPR(3); 469 AddGPR(4); 470 AddGPR(5); 471 AddGPR(6); 472 AddGPR(7); 473 AddGPR(8); 474 AddGPR(9); 475 AddGPR(10); 476 AddGPR(11); 477 AddGPR(12); 478#undef AddReg 479#undef AddGPR 480 481 return true; 482} 483#endif 484 485#ifdef HAS_PCC_SUPPORT 486bool MinidumpGenerator::WriteStackPPC(breakpad_thread_state_data_t state, 487 MDMemoryDescriptor *stack_location) { 488 ppc_thread_state_t *machine_state = 489 reinterpret_cast<ppc_thread_state_t *>(state); 490 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1); 491 return WriteStackFromStartAddress(start_addr, stack_location); 492} 493 494bool MinidumpGenerator::WriteStackPPC64(breakpad_thread_state_data_t state, 495 MDMemoryDescriptor *stack_location) { 496 ppc_thread_state64_t *machine_state = 497 reinterpret_cast<ppc_thread_state64_t *>(state); 498 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1); 499 return WriteStackFromStartAddress(start_addr, stack_location); 500} 501 502u_int64_t 503MinidumpGenerator::CurrentPCForStackPPC(breakpad_thread_state_data_t state) { 504 ppc_thread_state_t *machine_state = 505 reinterpret_cast<ppc_thread_state_t *>(state); 506 507 return REGISTER_FROM_THREADSTATE(machine_state, srr0); 508} 509 510u_int64_t 511MinidumpGenerator::CurrentPCForStackPPC64(breakpad_thread_state_data_t state) { 512 ppc_thread_state64_t *machine_state = 513 reinterpret_cast<ppc_thread_state64_t *>(state); 514 515 return REGISTER_FROM_THREADSTATE(machine_state, srr0); 516} 517 518bool MinidumpGenerator::WriteContextPPC(breakpad_thread_state_data_t state, 519 MDLocationDescriptor *register_location) 520{ 521 TypedMDRVA<MDRawContextPPC> context(&writer_); 522 ppc_thread_state_t *machine_state = 523 reinterpret_cast<ppc_thread_state_t *>(state); 524 525 if (!context.Allocate()) 526 return false; 527 528 *register_location = context.location(); 529 MDRawContextPPC *context_ptr = context.get(); 530 context_ptr->context_flags = MD_CONTEXT_PPC_BASE; 531 532#define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a) 533#define AddGPR(a) context_ptr->gpr[a] = REGISTER_FROM_THREADSTATE(machine_state, r ## a) 534 535 AddReg(srr0); 536 AddReg(cr); 537 AddReg(xer); 538 AddReg(ctr); 539 AddReg(lr); 540 AddReg(vrsave); 541 542 AddGPR(0); 543 AddGPR(1); 544 AddGPR(2); 545 AddGPR(3); 546 AddGPR(4); 547 AddGPR(5); 548 AddGPR(6); 549 AddGPR(7); 550 AddGPR(8); 551 AddGPR(9); 552 AddGPR(10); 553 AddGPR(11); 554 AddGPR(12); 555 AddGPR(13); 556 AddGPR(14); 557 AddGPR(15); 558 AddGPR(16); 559 AddGPR(17); 560 AddGPR(18); 561 AddGPR(19); 562 AddGPR(20); 563 AddGPR(21); 564 AddGPR(22); 565 AddGPR(23); 566 AddGPR(24); 567 AddGPR(25); 568 AddGPR(26); 569 AddGPR(27); 570 AddGPR(28); 571 AddGPR(29); 572 AddGPR(30); 573 AddGPR(31); 574 AddReg(mq); 575#undef AddReg 576#undef AddGPR 577 578 return true; 579} 580 581bool MinidumpGenerator::WriteContextPPC64( 582 breakpad_thread_state_data_t state, 583 MDLocationDescriptor *register_location) { 584 TypedMDRVA<MDRawContextPPC64> context(&writer_); 585 ppc_thread_state64_t *machine_state = 586 reinterpret_cast<ppc_thread_state64_t *>(state); 587 588 if (!context.Allocate()) 589 return false; 590 591 *register_location = context.location(); 592 MDRawContextPPC64 *context_ptr = context.get(); 593 context_ptr->context_flags = MD_CONTEXT_PPC_BASE; 594 595#define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a) 596#define AddGPR(a) context_ptr->gpr[a] = REGISTER_FROM_THREADSTATE(machine_state, r ## a) 597 598 AddReg(srr0); 599 AddReg(cr); 600 AddReg(xer); 601 AddReg(ctr); 602 AddReg(lr); 603 AddReg(vrsave); 604 605 AddGPR(0); 606 AddGPR(1); 607 AddGPR(2); 608 AddGPR(3); 609 AddGPR(4); 610 AddGPR(5); 611 AddGPR(6); 612 AddGPR(7); 613 AddGPR(8); 614 AddGPR(9); 615 AddGPR(10); 616 AddGPR(11); 617 AddGPR(12); 618 AddGPR(13); 619 AddGPR(14); 620 AddGPR(15); 621 AddGPR(16); 622 AddGPR(17); 623 AddGPR(18); 624 AddGPR(19); 625 AddGPR(20); 626 AddGPR(21); 627 AddGPR(22); 628 AddGPR(23); 629 AddGPR(24); 630 AddGPR(25); 631 AddGPR(26); 632 AddGPR(27); 633 AddGPR(28); 634 AddGPR(29); 635 AddGPR(30); 636 AddGPR(31); 637#undef AddReg 638#undef AddGPR 639 640 return true; 641} 642 643#endif 644 645#ifdef HAS_X86_SUPPORT 646bool MinidumpGenerator::WriteStackX86(breakpad_thread_state_data_t state, 647 MDMemoryDescriptor *stack_location) { 648 i386_thread_state_t *machine_state = 649 reinterpret_cast<i386_thread_state_t *>(state); 650 651 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, esp); 652 return WriteStackFromStartAddress(start_addr, stack_location); 653} 654 655bool MinidumpGenerator::WriteStackX86_64(breakpad_thread_state_data_t state, 656 MDMemoryDescriptor *stack_location) { 657 x86_thread_state64_t *machine_state = 658 reinterpret_cast<x86_thread_state64_t *>(state); 659 660 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, rsp); 661 return WriteStackFromStartAddress(start_addr, stack_location); 662} 663 664u_int64_t 665MinidumpGenerator::CurrentPCForStackX86(breakpad_thread_state_data_t state) { 666 i386_thread_state_t *machine_state = 667 reinterpret_cast<i386_thread_state_t *>(state); 668 669 return REGISTER_FROM_THREADSTATE(machine_state, eip); 670} 671 672u_int64_t 673MinidumpGenerator::CurrentPCForStackX86_64(breakpad_thread_state_data_t state) { 674 x86_thread_state64_t *machine_state = 675 reinterpret_cast<x86_thread_state64_t *>(state); 676 677 return REGISTER_FROM_THREADSTATE(machine_state, rip); 678} 679 680bool MinidumpGenerator::WriteContextX86(breakpad_thread_state_data_t state, 681 MDLocationDescriptor *register_location) 682{ 683 TypedMDRVA<MDRawContextX86> context(&writer_); 684 i386_thread_state_t *machine_state = 685 reinterpret_cast<i386_thread_state_t *>(state); 686 687 if (!context.Allocate()) 688 return false; 689 690 *register_location = context.location(); 691 MDRawContextX86 *context_ptr = context.get(); 692 693#define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a) 694 695 context_ptr->context_flags = MD_CONTEXT_X86; 696 AddReg(eax); 697 AddReg(ebx); 698 AddReg(ecx); 699 AddReg(edx); 700 AddReg(esi); 701 AddReg(edi); 702 AddReg(ebp); 703 AddReg(esp); 704 705 AddReg(cs); 706 AddReg(ds); 707 AddReg(ss); 708 AddReg(es); 709 AddReg(fs); 710 AddReg(gs); 711 AddReg(eflags); 712 713 AddReg(eip); 714#undef AddReg 715 716 return true; 717} 718 719bool MinidumpGenerator::WriteContextX86_64( 720 breakpad_thread_state_data_t state, 721 MDLocationDescriptor *register_location) { 722 TypedMDRVA<MDRawContextAMD64> context(&writer_); 723 x86_thread_state64_t *machine_state = 724 reinterpret_cast<x86_thread_state64_t *>(state); 725 726 if (!context.Allocate()) 727 return false; 728 729 *register_location = context.location(); 730 MDRawContextAMD64 *context_ptr = context.get(); 731 732#define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a) 733 734 context_ptr->context_flags = MD_CONTEXT_AMD64; 735 AddReg(rax); 736 AddReg(rbx); 737 AddReg(rcx); 738 AddReg(rdx); 739 AddReg(rdi); 740 AddReg(rsi); 741 AddReg(rbp); 742 AddReg(rsp); 743 AddReg(r8); 744 AddReg(r9); 745 AddReg(r10); 746 AddReg(r11); 747 AddReg(r12); 748 AddReg(r13); 749 AddReg(r14); 750 AddReg(r15); 751 AddReg(rip); 752 // according to AMD's software developer guide, bits above 18 are 753 // not used in the flags register. Since the minidump format 754 // specifies 32 bits for the flags register, we can truncate safely 755 // with no loss. 756 context_ptr->eflags = static_cast<u_int32_t>(REGISTER_FROM_THREADSTATE(machine_state, rflags)); 757 AddReg(cs); 758 AddReg(fs); 759 AddReg(gs); 760#undef AddReg 761 762 return true; 763} 764#endif 765 766bool MinidumpGenerator::GetThreadState(thread_act_t target_thread, 767 thread_state_t state, 768 mach_msg_type_number_t *count) { 769 thread_state_flavor_t flavor; 770 switch (cpu_type_) { 771#ifdef HAS_ARM_SUPPORT 772 case CPU_TYPE_ARM: 773 flavor = ARM_THREAD_STATE; 774 break; 775#endif 776#ifdef HAS_PPC_SUPPORT 777 case CPU_TYPE_POWERPC: 778 flavor = PPC_THREAD_STATE; 779 break; 780 case CPU_TYPE_POWERPC64: 781 flavor = PPC_THREAD_STATE64; 782 break; 783#endif 784#ifdef HAS_X86_SUPPORT 785 case CPU_TYPE_I386: 786 flavor = i386_THREAD_STATE; 787 break; 788 case CPU_TYPE_X86_64: 789 flavor = x86_THREAD_STATE64; 790 break; 791#endif 792 default: 793 return false; 794 } 795 return thread_get_state(target_thread, flavor, 796 state, count) == KERN_SUCCESS; 797} 798 799bool MinidumpGenerator::WriteThreadStream(mach_port_t thread_id, 800 MDRawThread *thread) { 801 breakpad_thread_state_data_t state; 802 mach_msg_type_number_t state_count 803 = static_cast<mach_msg_type_number_t>(sizeof(state)); 804 805 if (GetThreadState(thread_id, state, &state_count)) { 806 if (!WriteStack(state, &thread->stack)) 807 return false; 808 809 memory_blocks_.push_back(thread->stack); 810 811 if (!WriteContext(state, &thread->thread_context)) 812 return false; 813 814 thread->thread_id = thread_id; 815 } else { 816 return false; 817 } 818 819 return true; 820} 821 822bool MinidumpGenerator::WriteThreadListStream( 823 MDRawDirectory *thread_list_stream) { 824 TypedMDRVA<MDRawThreadList> list(&writer_); 825 thread_act_port_array_t threads_for_task; 826 mach_msg_type_number_t thread_count; 827 int non_generator_thread_count; 828 829 if (task_threads(crashing_task_, &threads_for_task, &thread_count)) 830 return false; 831 832 // Don't include the generator thread 833 if (handler_thread_ != MACH_PORT_NULL) 834 non_generator_thread_count = thread_count - 1; 835 else 836 non_generator_thread_count = thread_count; 837 if (!list.AllocateObjectAndArray(non_generator_thread_count, 838 sizeof(MDRawThread))) 839 return false; 840 841 thread_list_stream->stream_type = MD_THREAD_LIST_STREAM; 842 thread_list_stream->location = list.location(); 843 844 list.get()->number_of_threads = non_generator_thread_count; 845 846 MDRawThread thread; 847 int thread_idx = 0; 848 849 for (unsigned int i = 0; i < thread_count; ++i) { 850 memset(&thread, 0, sizeof(MDRawThread)); 851 852 if (threads_for_task[i] != handler_thread_) { 853 if (!WriteThreadStream(threads_for_task[i], &thread)) 854 return false; 855 856 list.CopyIndexAfterObject(thread_idx++, &thread, sizeof(MDRawThread)); 857 } 858 } 859 860 return true; 861} 862 863bool MinidumpGenerator::WriteMemoryListStream( 864 MDRawDirectory *memory_list_stream) { 865 TypedMDRVA<MDRawMemoryList> list(&writer_); 866 867 // If the dump has an exception, include some memory around the 868 // instruction pointer. 869 const size_t kIPMemorySize = 256; // bytes 870 bool have_ip_memory = false; 871 MDMemoryDescriptor ip_memory_d; 872 if (exception_thread_ && exception_type_) { 873 breakpad_thread_state_data_t state; 874 mach_msg_type_number_t stateCount 875 = static_cast<mach_msg_type_number_t>(sizeof(state)); 876 877 if (thread_get_state(exception_thread_, 878 BREAKPAD_MACHINE_THREAD_STATE, 879 state, 880 &stateCount) == KERN_SUCCESS) { 881 u_int64_t ip = CurrentPCForStack(state); 882 // Bound it to the upper and lower bounds of the region 883 // it's contained within. If it's not in a known memory region, 884 // don't bother trying to write it. 885 mach_vm_address_t addr = ip; 886 mach_vm_size_t size; 887 natural_t nesting_level = 0; 888 vm_region_submap_info_64 info; 889 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64; 890 891 kern_return_t ret = 892 mach_vm_region_recurse(crashing_task_, 893 &addr, 894 &size, 895 &nesting_level, 896 (vm_region_recurse_info_t)&info, 897 &info_count); 898 if (ret == KERN_SUCCESS && ip >= addr && ip < (addr + size)) { 899 // Try to get 128 bytes before and after the IP, but 900 // settle for whatever's available. 901 ip_memory_d.start_of_memory_range = 902 std::max(uintptr_t(addr), 903 uintptr_t(ip - (kIPMemorySize / 2))); 904 uintptr_t end_of_range = 905 std::min(uintptr_t(ip + (kIPMemorySize / 2)), 906 uintptr_t(addr + size)); 907 ip_memory_d.memory.data_size = 908 end_of_range - ip_memory_d.start_of_memory_range; 909 have_ip_memory = true; 910 // This needs to get appended to the list even though 911 // the memory bytes aren't filled in yet so the entire 912 // list can be written first. The memory bytes will get filled 913 // in after the memory list is written. 914 memory_blocks_.push_back(ip_memory_d); 915 } 916 } 917 } 918 919 // Now fill in the memory list and write it. 920 unsigned memory_count = memory_blocks_.size(); 921 if (!list.AllocateObjectAndArray(memory_count, 922 sizeof(MDMemoryDescriptor))) 923 return false; 924 925 memory_list_stream->stream_type = MD_MEMORY_LIST_STREAM; 926 memory_list_stream->location = list.location(); 927 928 list.get()->number_of_memory_ranges = memory_count; 929 930 unsigned int i; 931 for (i = 0; i < memory_count; ++i) { 932 list.CopyIndexAfterObject(i, &memory_blocks_[i], 933 sizeof(MDMemoryDescriptor)); 934 } 935 936 if (have_ip_memory) { 937 // Now read the memory around the instruction pointer. 938 UntypedMDRVA ip_memory(&writer_); 939 if (!ip_memory.Allocate(ip_memory_d.memory.data_size)) 940 return false; 941 942 if (dynamic_images_) { 943 // Out-of-process. 944 vector<uint8_t> memory; 945 if (ReadTaskMemory(crashing_task_, 946 ip_memory_d.start_of_memory_range, 947 ip_memory_d.memory.data_size, 948 memory) != KERN_SUCCESS) { 949 return false; 950 } 951 952 ip_memory.Copy(&memory[0], ip_memory_d.memory.data_size); 953 } else { 954 // In-process, just copy from local memory. 955 ip_memory.Copy( 956 reinterpret_cast<const void *>(ip_memory_d.start_of_memory_range), 957 ip_memory_d.memory.data_size); 958 } 959 960 ip_memory_d.memory = ip_memory.location(); 961 // Write this again now that the data location is filled in. 962 list.CopyIndexAfterObject(i - 1, &ip_memory_d, 963 sizeof(MDMemoryDescriptor)); 964 } 965 966 return true; 967} 968 969bool 970MinidumpGenerator::WriteExceptionStream(MDRawDirectory *exception_stream) { 971 TypedMDRVA<MDRawExceptionStream> exception(&writer_); 972 973 if (!exception.Allocate()) 974 return false; 975 976 exception_stream->stream_type = MD_EXCEPTION_STREAM; 977 exception_stream->location = exception.location(); 978 MDRawExceptionStream *exception_ptr = exception.get(); 979 exception_ptr->thread_id = exception_thread_; 980 981 // This naming is confusing, but it is the proper translation from 982 // mach naming to minidump naming. 983 exception_ptr->exception_record.exception_code = exception_type_; 984 exception_ptr->exception_record.exception_flags = exception_code_; 985 986 breakpad_thread_state_data_t state; 987 mach_msg_type_number_t state_count 988 = static_cast<mach_msg_type_number_t>(sizeof(state)); 989 990 if (!GetThreadState(exception_thread_, state, &state_count)) 991 return false; 992 993 if (!WriteContext(state, &exception_ptr->thread_context)) 994 return false; 995 996 if (exception_type_ == EXC_BAD_ACCESS) 997 exception_ptr->exception_record.exception_address = exception_subcode_; 998 else 999 exception_ptr->exception_record.exception_address = CurrentPCForStack(state); 1000 1001 return true; 1002} 1003 1004bool MinidumpGenerator::WriteSystemInfoStream( 1005 MDRawDirectory *system_info_stream) { 1006 TypedMDRVA<MDRawSystemInfo> info(&writer_); 1007 1008 if (!info.Allocate()) 1009 return false; 1010 1011 system_info_stream->stream_type = MD_SYSTEM_INFO_STREAM; 1012 system_info_stream->location = info.location(); 1013 1014 // CPU Information 1015 uint32_t number_of_processors; 1016 size_t len = sizeof(number_of_processors); 1017 sysctlbyname("hw.ncpu", &number_of_processors, &len, NULL, 0); 1018 MDRawSystemInfo *info_ptr = info.get(); 1019 1020 switch (cpu_type_) { 1021#ifdef HAS_ARM_SUPPORT 1022 case CPU_TYPE_ARM: 1023 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_ARM; 1024 break; 1025#endif 1026#ifdef HAS_PPC_SUPPORT 1027 case CPU_TYPE_POWERPC: 1028 case CPU_TYPE_POWERPC64: 1029 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_PPC; 1030 break; 1031#endif 1032#ifdef HAS_X86_SUPPORT 1033 case CPU_TYPE_I386: 1034 case CPU_TYPE_X86_64: 1035 if (cpu_type_ == CPU_TYPE_I386) 1036 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_X86; 1037 else 1038 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_AMD64; 1039#ifdef __i386__ 1040 // ebx is used for PIC code, so we need 1041 // to preserve it. 1042#define cpuid(op,eax,ebx,ecx,edx) \ 1043 asm ("pushl %%ebx \n\t" \ 1044 "cpuid \n\t" \ 1045 "movl %%ebx,%1 \n\t" \ 1046 "popl %%ebx" \ 1047 : "=a" (eax), \ 1048 "=g" (ebx), \ 1049 "=c" (ecx), \ 1050 "=d" (edx) \ 1051 : "0" (op)) 1052#elif defined(__x86_64__) 1053 1054#define cpuid(op,eax,ebx,ecx,edx) \ 1055 asm ("cpuid \n\t" \ 1056 : "=a" (eax), \ 1057 "=b" (ebx), \ 1058 "=c" (ecx), \ 1059 "=d" (edx) \ 1060 : "0" (op)) 1061#endif 1062 1063#if defined(__i386__) || defined(__x86_64__) 1064 int unused, unused2; 1065 // get vendor id 1066 cpuid(0, unused, info_ptr->cpu.x86_cpu_info.vendor_id[0], 1067 info_ptr->cpu.x86_cpu_info.vendor_id[2], 1068 info_ptr->cpu.x86_cpu_info.vendor_id[1]); 1069 // get version and feature info 1070 cpuid(1, info_ptr->cpu.x86_cpu_info.version_information, unused, unused2, 1071 info_ptr->cpu.x86_cpu_info.feature_information); 1072 1073 // family 1074 info_ptr->processor_level = 1075 (info_ptr->cpu.x86_cpu_info.version_information & 0xF00) >> 8; 1076 // 0xMMSS (Model, Stepping) 1077 info_ptr->processor_revision = 1078 (info_ptr->cpu.x86_cpu_info.version_information & 0xF) | 1079 ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0) << 4); 1080 1081 // decode extended model info 1082 if (info_ptr->processor_level == 0xF || 1083 info_ptr->processor_level == 0x6) { 1084 info_ptr->processor_revision |= 1085 ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0000) >> 4); 1086 } 1087 1088 // decode extended family info 1089 if (info_ptr->processor_level == 0xF) { 1090 info_ptr->processor_level += 1091 ((info_ptr->cpu.x86_cpu_info.version_information & 0xFF00000) >> 20); 1092 } 1093 1094#endif // __i386__ || __x86_64_ 1095 break; 1096#endif // HAS_X86_SUPPORT 1097 default: 1098 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_UNKNOWN; 1099 break; 1100 } 1101 1102 info_ptr->number_of_processors = number_of_processors; 1103#if TARGET_OS_IPHONE 1104 info_ptr->platform_id = MD_OS_IOS; 1105#else 1106 info_ptr->platform_id = MD_OS_MAC_OS_X; 1107#endif // TARGET_OS_IPHONE 1108 1109 MDLocationDescriptor build_string_loc; 1110 1111 if (!writer_.WriteString(build_string_, 0, 1112 &build_string_loc)) 1113 return false; 1114 1115 info_ptr->csd_version_rva = build_string_loc.rva; 1116 info_ptr->major_version = os_major_version_; 1117 info_ptr->minor_version = os_minor_version_; 1118 info_ptr->build_number = os_build_number_; 1119 1120 return true; 1121} 1122 1123bool MinidumpGenerator::WriteModuleStream(unsigned int index, 1124 MDRawModule *module) { 1125 if (dynamic_images_) { 1126 // we're in a different process than the crashed process 1127 DynamicImage *image = dynamic_images_->GetImage(index); 1128 1129 if (!image) 1130 return false; 1131 1132 memset(module, 0, sizeof(MDRawModule)); 1133 1134 MDLocationDescriptor string_location; 1135 1136 string name = image->GetFilePath(); 1137 if (!writer_.WriteString(name.c_str(), 0, &string_location)) 1138 return false; 1139 1140 module->base_of_image = image->GetVMAddr() + image->GetVMAddrSlide(); 1141 module->size_of_image = static_cast<u_int32_t>(image->GetVMSize()); 1142 module->module_name_rva = string_location.rva; 1143 1144 // We'll skip the executable module, because they don't have 1145 // LC_ID_DYLIB load commands, and the crash processing server gets 1146 // version information from the Plist file, anyway. 1147 if (index != (uint32_t)FindExecutableModule()) { 1148 module->version_info.signature = MD_VSFIXEDFILEINFO_SIGNATURE; 1149 module->version_info.struct_version |= MD_VSFIXEDFILEINFO_VERSION; 1150 // Convert MAC dylib version format, which is a 32 bit number, to the 1151 // format used by minidump. The mac format is <16 bits>.<8 bits>.<8 bits> 1152 // so it fits nicely into the windows version with some massaging 1153 // The mapping is: 1154 // 1) upper 16 bits of MAC version go to lower 16 bits of product HI 1155 // 2) Next most significant 8 bits go to upper 16 bits of product LO 1156 // 3) Least significant 8 bits go to lower 16 bits of product LO 1157 uint32_t modVersion = image->GetVersion(); 1158 module->version_info.file_version_hi = 0; 1159 module->version_info.file_version_hi = modVersion >> 16; 1160 module->version_info.file_version_lo |= (modVersion & 0xff00) << 8; 1161 module->version_info.file_version_lo |= (modVersion & 0xff); 1162 } 1163 1164 if (!WriteCVRecord(module, image->GetCPUType(), name.c_str(), false)) { 1165 return false; 1166 } 1167 } else { 1168 // Getting module info in the crashed process 1169 const breakpad_mach_header *header; 1170 header = (breakpad_mach_header*)_dyld_get_image_header(index); 1171 if (!header) 1172 return false; 1173 1174#ifdef __LP64__ 1175 assert(header->magic == MH_MAGIC_64); 1176 1177 if(header->magic != MH_MAGIC_64) 1178 return false; 1179#else 1180 assert(header->magic == MH_MAGIC); 1181 1182 if(header->magic != MH_MAGIC) 1183 return false; 1184#endif 1185 1186 int cpu_type = header->cputype; 1187 unsigned long slide = _dyld_get_image_vmaddr_slide(index); 1188 const char* name = _dyld_get_image_name(index); 1189 const struct load_command *cmd = 1190 reinterpret_cast<const struct load_command *>(header + 1); 1191 1192 memset(module, 0, sizeof(MDRawModule)); 1193 1194 for (unsigned int i = 0; cmd && (i < header->ncmds); i++) { 1195 if (cmd->cmd == LC_SEGMENT_ARCH) { 1196 1197 const breakpad_mach_segment_command *seg = 1198 reinterpret_cast<const breakpad_mach_segment_command *>(cmd); 1199 1200 if (!strcmp(seg->segname, "__TEXT")) { 1201 MDLocationDescriptor string_location; 1202 1203 if (!writer_.WriteString(name, 0, &string_location)) 1204 return false; 1205 1206 module->base_of_image = seg->vmaddr + slide; 1207 module->size_of_image = static_cast<u_int32_t>(seg->vmsize); 1208 module->module_name_rva = string_location.rva; 1209 1210 bool in_memory = false; 1211#if TARGET_OS_IPHONE 1212 in_memory = true; 1213#endif 1214 if (!WriteCVRecord(module, cpu_type, name, in_memory)) 1215 return false; 1216 1217 return true; 1218 } 1219 } 1220 1221 cmd = reinterpret_cast<struct load_command*>((char *)cmd + cmd->cmdsize); 1222 } 1223 } 1224 1225 return true; 1226} 1227 1228int MinidumpGenerator::FindExecutableModule() { 1229 if (dynamic_images_) { 1230 int index = dynamic_images_->GetExecutableImageIndex(); 1231 1232 if (index >= 0) { 1233 return index; 1234 } 1235 } else { 1236 int image_count = _dyld_image_count(); 1237 const struct mach_header *header; 1238 1239 for (int index = 0; index < image_count; ++index) { 1240 header = _dyld_get_image_header(index); 1241 1242 if (header->filetype == MH_EXECUTE) 1243 return index; 1244 } 1245 } 1246 1247 // failed - just use the first image 1248 return 0; 1249} 1250 1251bool MinidumpGenerator::WriteCVRecord(MDRawModule *module, int cpu_type, 1252 const char *module_path, bool in_memory) { 1253 TypedMDRVA<MDCVInfoPDB70> cv(&writer_); 1254 1255 // Only return the last path component of the full module path 1256 const char *module_name = strrchr(module_path, '/'); 1257 1258 // Increment past the slash 1259 if (module_name) 1260 ++module_name; 1261 else 1262 module_name = "<Unknown>"; 1263 1264 size_t module_name_length = strlen(module_name); 1265 1266 if (!cv.AllocateObjectAndArray(module_name_length + 1, sizeof(u_int8_t))) 1267 return false; 1268 1269 if (!cv.CopyIndexAfterObject(0, module_name, module_name_length)) 1270 return false; 1271 1272 module->cv_record = cv.location(); 1273 MDCVInfoPDB70 *cv_ptr = cv.get(); 1274 cv_ptr->cv_signature = MD_CVINFOPDB70_SIGNATURE; 1275 cv_ptr->age = 0; 1276 1277 // Get the module identifier 1278 unsigned char identifier[16]; 1279 bool result = false; 1280 if (in_memory) { 1281 MacFileUtilities::MachoID macho(module_path, 1282 reinterpret_cast<void *>(module->base_of_image), 1283 static_cast<size_t>(module->size_of_image)); 1284 result = macho.UUIDCommand(cpu_type, identifier); 1285 if (!result) 1286 result = macho.MD5(cpu_type, identifier); 1287 } 1288 1289 if (!result) { 1290 FileID file_id(module_path); 1291 result = file_id.MachoIdentifier(cpu_type, identifier); 1292 } 1293 1294 if (result) { 1295 cv_ptr->signature.data1 = (uint32_t)identifier[0] << 24 | 1296 (uint32_t)identifier[1] << 16 | (uint32_t)identifier[2] << 8 | 1297 (uint32_t)identifier[3]; 1298 cv_ptr->signature.data2 = (uint32_t)identifier[4] << 8 | identifier[5]; 1299 cv_ptr->signature.data3 = (uint32_t)identifier[6] << 8 | identifier[7]; 1300 cv_ptr->signature.data4[0] = identifier[8]; 1301 cv_ptr->signature.data4[1] = identifier[9]; 1302 cv_ptr->signature.data4[2] = identifier[10]; 1303 cv_ptr->signature.data4[3] = identifier[11]; 1304 cv_ptr->signature.data4[4] = identifier[12]; 1305 cv_ptr->signature.data4[5] = identifier[13]; 1306 cv_ptr->signature.data4[6] = identifier[14]; 1307 cv_ptr->signature.data4[7] = identifier[15]; 1308 } 1309 1310 return true; 1311} 1312 1313bool MinidumpGenerator::WriteModuleListStream( 1314 MDRawDirectory *module_list_stream) { 1315 TypedMDRVA<MDRawModuleList> list(&writer_); 1316 1317 int image_count = dynamic_images_ ? 1318 dynamic_images_->GetImageCount() : _dyld_image_count(); 1319 1320 if (!list.AllocateObjectAndArray(image_count, MD_MODULE_SIZE)) 1321 return false; 1322 1323 module_list_stream->stream_type = MD_MODULE_LIST_STREAM; 1324 module_list_stream->location = list.location(); 1325 list.get()->number_of_modules = image_count; 1326 1327 // Write out the executable module as the first one 1328 MDRawModule module; 1329 int executableIndex = FindExecutableModule(); 1330 1331 if (!WriteModuleStream(executableIndex, &module)) { 1332 return false; 1333 } 1334 1335 list.CopyIndexAfterObject(0, &module, MD_MODULE_SIZE); 1336 int destinationIndex = 1; // Write all other modules after this one 1337 1338 for (int i = 0; i < image_count; ++i) { 1339 if (i != executableIndex) { 1340 if (!WriteModuleStream(i, &module)) { 1341 return false; 1342 } 1343 1344 list.CopyIndexAfterObject(destinationIndex++, &module, MD_MODULE_SIZE); 1345 } 1346 } 1347 1348 return true; 1349} 1350 1351bool MinidumpGenerator::WriteMiscInfoStream(MDRawDirectory *misc_info_stream) { 1352 TypedMDRVA<MDRawMiscInfo> info(&writer_); 1353 1354 if (!info.Allocate()) 1355 return false; 1356 1357 misc_info_stream->stream_type = MD_MISC_INFO_STREAM; 1358 misc_info_stream->location = info.location(); 1359 1360 MDRawMiscInfo *info_ptr = info.get(); 1361 info_ptr->size_of_info = static_cast<u_int32_t>(sizeof(MDRawMiscInfo)); 1362 info_ptr->flags1 = MD_MISCINFO_FLAGS1_PROCESS_ID | 1363 MD_MISCINFO_FLAGS1_PROCESS_TIMES | 1364 MD_MISCINFO_FLAGS1_PROCESSOR_POWER_INFO; 1365 1366 // Process ID 1367 info_ptr->process_id = getpid(); 1368 1369 // Times 1370 struct rusage usage; 1371 if (getrusage(RUSAGE_SELF, &usage) != -1) { 1372 // Omit the fractional time since the MDRawMiscInfo only wants seconds 1373 info_ptr->process_user_time = 1374 static_cast<u_int32_t>(usage.ru_utime.tv_sec); 1375 info_ptr->process_kernel_time = 1376 static_cast<u_int32_t>(usage.ru_stime.tv_sec); 1377 } 1378 int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 1379 static_cast<int>(info_ptr->process_id) }; 1380 u_int mibsize = static_cast<u_int>(sizeof(mib) / sizeof(mib[0])); 1381 size_t size; 1382 if (!sysctl(mib, mibsize, NULL, &size, NULL, 0)) { 1383 mach_vm_address_t addr; 1384 if (mach_vm_allocate(mach_task_self(), 1385 &addr, 1386 size, 1387 true) == KERN_SUCCESS) { 1388 struct kinfo_proc *proc = (struct kinfo_proc *)addr; 1389 if (!sysctl(mib, mibsize, proc, &size, NULL, 0)) 1390 info_ptr->process_create_time = 1391 static_cast<u_int32_t>(proc->kp_proc.p_starttime.tv_sec); 1392 mach_vm_deallocate(mach_task_self(), addr, size); 1393 } 1394 } 1395 1396 // Speed 1397 uint64_t speed; 1398 const uint64_t kOneMillion = 1000 * 1000; 1399 size = sizeof(speed); 1400 sysctlbyname("hw.cpufrequency_max", &speed, &size, NULL, 0); 1401 info_ptr->processor_max_mhz = static_cast<u_int32_t>(speed / kOneMillion); 1402 info_ptr->processor_mhz_limit = static_cast<u_int32_t>(speed / kOneMillion); 1403 size = sizeof(speed); 1404 sysctlbyname("hw.cpufrequency", &speed, &size, NULL, 0); 1405 info_ptr->processor_current_mhz = static_cast<u_int32_t>(speed / kOneMillion); 1406 1407 return true; 1408} 1409 1410bool MinidumpGenerator::WriteBreakpadInfoStream( 1411 MDRawDirectory *breakpad_info_stream) { 1412 TypedMDRVA<MDRawBreakpadInfo> info(&writer_); 1413 1414 if (!info.Allocate()) 1415 return false; 1416 1417 breakpad_info_stream->stream_type = MD_BREAKPAD_INFO_STREAM; 1418 breakpad_info_stream->location = info.location(); 1419 MDRawBreakpadInfo *info_ptr = info.get(); 1420 1421 if (exception_thread_ && exception_type_) { 1422 info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID | 1423 MD_BREAKPAD_INFO_VALID_REQUESTING_THREAD_ID; 1424 info_ptr->dump_thread_id = handler_thread_; 1425 info_ptr->requesting_thread_id = exception_thread_; 1426 } else { 1427 info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID; 1428 info_ptr->dump_thread_id = handler_thread_; 1429 info_ptr->requesting_thread_id = 0; 1430 } 1431 1432 return true; 1433} 1434 1435} // namespace google_breakpad 1436