DNBArchImplI386.cpp revision e39356825b86cd7484097ca4c4c9f07f9ff95e2e
1//===-- DNBArchImplI386.cpp -------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Created by Greg Clayton on 6/25/07. 11// 12//===----------------------------------------------------------------------===// 13 14#if defined (__i386__) || defined (__x86_64__) 15 16#include <sys/cdefs.h> 17 18#include "MacOSX/i386/DNBArchImplI386.h" 19#include "DNBLog.h" 20#include "MachThread.h" 21#include "MachProcess.h" 22 23#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG) 24enum debugState { 25 debugStateUnknown, 26 debugStateOff, 27 debugStateOn 28}; 29 30static debugState sFPUDebugState = debugStateUnknown; 31static debugState sAVXForceState = debugStateUnknown; 32 33static bool DebugFPURegs () 34{ 35 if (sFPUDebugState == debugStateUnknown) 36 { 37 if (getenv("DNB_DEBUG_FPU_REGS")) 38 sFPUDebugState = debugStateOn; 39 else 40 sFPUDebugState = debugStateOff; 41 } 42 43 return (sFPUDebugState == debugStateOn); 44} 45 46static bool ForceAVXRegs () 47{ 48 if (sFPUDebugState == debugStateUnknown) 49 { 50 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 51 sAVXForceState = debugStateOn; 52 else 53 sAVXForceState = debugStateOff; 54 } 55 56 return (sAVXForceState == debugStateOn); 57} 58 59#define DEBUG_FPU_REGS (DebugFPURegs()) 60#define FORCE_AVX_REGS (ForceAVXRegs()) 61#else 62#define DEBUG_FPU_REGS (0) 63#define FORCE_AVX_REGS (0) 64#endif 65 66enum 67{ 68 gpr_eax = 0, 69 gpr_ebx = 1, 70 gpr_ecx = 2, 71 gpr_edx = 3, 72 gpr_edi = 4, 73 gpr_esi = 5, 74 gpr_ebp = 6, 75 gpr_esp = 7, 76 gpr_ss = 8, 77 gpr_eflags = 9, 78 gpr_eip = 10, 79 gpr_cs = 11, 80 gpr_ds = 12, 81 gpr_es = 13, 82 gpr_fs = 14, 83 gpr_gs = 15, 84 k_num_gpr_regs 85}; 86 87enum { 88 fpu_fcw, 89 fpu_fsw, 90 fpu_ftw, 91 fpu_fop, 92 fpu_ip, 93 fpu_cs, 94 fpu_dp, 95 fpu_ds, 96 fpu_mxcsr, 97 fpu_mxcsrmask, 98 fpu_stmm0, 99 fpu_stmm1, 100 fpu_stmm2, 101 fpu_stmm3, 102 fpu_stmm4, 103 fpu_stmm5, 104 fpu_stmm6, 105 fpu_stmm7, 106 fpu_xmm0, 107 fpu_xmm1, 108 fpu_xmm2, 109 fpu_xmm3, 110 fpu_xmm4, 111 fpu_xmm5, 112 fpu_xmm6, 113 fpu_xmm7, 114 fpu_ymm0, 115 fpu_ymm1, 116 fpu_ymm2, 117 fpu_ymm3, 118 fpu_ymm4, 119 fpu_ymm5, 120 fpu_ymm6, 121 fpu_ymm7, 122 k_num_fpu_regs, 123 124 // Aliases 125 fpu_fctrl = fpu_fcw, 126 fpu_fstat = fpu_fsw, 127 fpu_ftag = fpu_ftw, 128 fpu_fiseg = fpu_cs, 129 fpu_fioff = fpu_ip, 130 fpu_foseg = fpu_ds, 131 fpu_fooff = fpu_dp 132}; 133 134enum { 135 exc_trapno, 136 exc_err, 137 exc_faultvaddr, 138 k_num_exc_regs, 139}; 140 141 142enum 143{ 144 gcc_eax = 0, 145 gcc_ecx, 146 gcc_edx, 147 gcc_ebx, 148 gcc_ebp, 149 gcc_esp, 150 gcc_esi, 151 gcc_edi, 152 gcc_eip, 153 gcc_eflags 154}; 155 156enum 157{ 158 dwarf_eax = 0, 159 dwarf_ecx, 160 dwarf_edx, 161 dwarf_ebx, 162 dwarf_esp, 163 dwarf_ebp, 164 dwarf_esi, 165 dwarf_edi, 166 dwarf_eip, 167 dwarf_eflags, 168 dwarf_stmm0 = 11, 169 dwarf_stmm1, 170 dwarf_stmm2, 171 dwarf_stmm3, 172 dwarf_stmm4, 173 dwarf_stmm5, 174 dwarf_stmm6, 175 dwarf_stmm7, 176 dwarf_xmm0 = 21, 177 dwarf_xmm1, 178 dwarf_xmm2, 179 dwarf_xmm3, 180 dwarf_xmm4, 181 dwarf_xmm5, 182 dwarf_xmm6, 183 dwarf_xmm7, 184 dwarf_ymm0 = dwarf_xmm0, 185 dwarf_ymm1 = dwarf_xmm1, 186 dwarf_ymm2 = dwarf_xmm2, 187 dwarf_ymm3 = dwarf_xmm3, 188 dwarf_ymm4 = dwarf_xmm4, 189 dwarf_ymm5 = dwarf_xmm5, 190 dwarf_ymm6 = dwarf_xmm6, 191 dwarf_ymm7 = dwarf_xmm7, 192}; 193 194enum 195{ 196 gdb_eax = 0, 197 gdb_ecx = 1, 198 gdb_edx = 2, 199 gdb_ebx = 3, 200 gdb_esp = 4, 201 gdb_ebp = 5, 202 gdb_esi = 6, 203 gdb_edi = 7, 204 gdb_eip = 8, 205 gdb_eflags = 9, 206 gdb_cs = 10, 207 gdb_ss = 11, 208 gdb_ds = 12, 209 gdb_es = 13, 210 gdb_fs = 14, 211 gdb_gs = 15, 212 gdb_stmm0 = 16, 213 gdb_stmm1 = 17, 214 gdb_stmm2 = 18, 215 gdb_stmm3 = 19, 216 gdb_stmm4 = 20, 217 gdb_stmm5 = 21, 218 gdb_stmm6 = 22, 219 gdb_stmm7 = 23, 220 gdb_fctrl = 24, gdb_fcw = gdb_fctrl, 221 gdb_fstat = 25, gdb_fsw = gdb_fstat, 222 gdb_ftag = 26, gdb_ftw = gdb_ftag, 223 gdb_fiseg = 27, gdb_fpu_cs = gdb_fiseg, 224 gdb_fioff = 28, gdb_ip = gdb_fioff, 225 gdb_foseg = 29, gdb_fpu_ds = gdb_foseg, 226 gdb_fooff = 30, gdb_dp = gdb_fooff, 227 gdb_fop = 31, 228 gdb_xmm0 = 32, 229 gdb_xmm1 = 33, 230 gdb_xmm2 = 34, 231 gdb_xmm3 = 35, 232 gdb_xmm4 = 36, 233 gdb_xmm5 = 37, 234 gdb_xmm6 = 38, 235 gdb_xmm7 = 39, 236 gdb_mxcsr = 40, 237 gdb_mm0 = 41, 238 gdb_mm1 = 42, 239 gdb_mm2 = 43, 240 gdb_mm3 = 44, 241 gdb_mm4 = 45, 242 gdb_mm5 = 46, 243 gdb_mm6 = 47, 244 gdb_mm7 = 48, 245 gdb_ymm0 = gdb_xmm0, 246 gdb_ymm1 = gdb_xmm1, 247 gdb_ymm2 = gdb_xmm2, 248 gdb_ymm3 = gdb_xmm3, 249 gdb_ymm4 = gdb_xmm4, 250 gdb_ymm5 = gdb_xmm5, 251 gdb_ymm6 = gdb_xmm6, 252 gdb_ymm7 = gdb_xmm7 253}; 254 255enum DNBArchImplI386::AVXPresence DNBArchImplI386::s_has_avx = DNBArchImplI386::kAVXUnknown; 256 257uint64_t 258DNBArchImplI386::GetPC(uint64_t failValue) 259{ 260 // Get program counter 261 if (GetGPRState(false) == KERN_SUCCESS) 262 return m_state.context.gpr.__eip; 263 return failValue; 264} 265 266kern_return_t 267DNBArchImplI386::SetPC(uint64_t value) 268{ 269 // Get program counter 270 kern_return_t err = GetGPRState(false); 271 if (err == KERN_SUCCESS) 272 { 273 m_state.context.gpr.__eip = value; 274 err = SetGPRState(); 275 } 276 return err == KERN_SUCCESS; 277} 278 279uint64_t 280DNBArchImplI386::GetSP(uint64_t failValue) 281{ 282 // Get stack pointer 283 if (GetGPRState(false) == KERN_SUCCESS) 284 return m_state.context.gpr.__esp; 285 return failValue; 286} 287 288// Uncomment the value below to verify the values in the debugger. 289//#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 290//#define SET_GPR(reg) m_state.context.gpr.__##reg = gpr_##reg 291 292kern_return_t 293DNBArchImplI386::GetGPRState(bool force) 294{ 295 if (force || m_state.GetError(e_regSetGPR, Read)) 296 { 297#if DEBUG_GPR_VALUES 298 SET_GPR(eax); 299 SET_GPR(ebx); 300 SET_GPR(ecx); 301 SET_GPR(edx); 302 SET_GPR(edi); 303 SET_GPR(esi); 304 SET_GPR(ebp); 305 SET_GPR(esp); 306 SET_GPR(ss); 307 SET_GPR(eflags); 308 SET_GPR(eip); 309 SET_GPR(cs); 310 SET_GPR(ds); 311 SET_GPR(es); 312 SET_GPR(fs); 313 SET_GPR(gs); 314 m_state.SetError(e_regSetGPR, Read, 0); 315#else 316 mach_msg_type_number_t count = e_regSetWordSizeGPR; 317 m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count)); 318#endif 319 } 320 return m_state.GetError(e_regSetGPR, Read); 321} 322 323// Uncomment the value below to verify the values in the debugger. 324//#define DEBUG_FPU_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 325 326kern_return_t 327DNBArchImplI386::GetFPUState(bool force) 328{ 329 if (force || m_state.GetError(e_regSetFPU, Read)) 330 { 331 if (DEBUG_FPU_REGS) 332 { 333 if (CPUHasAVX() || FORCE_AVX_REGS) 334 { 335 m_state.context.fpu.avx.__fpu_reserved[0] = -1; 336 m_state.context.fpu.avx.__fpu_reserved[1] = -1; 337 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234; 338 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678; 339 m_state.context.fpu.avx.__fpu_ftw = 1; 340 m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX; 341 m_state.context.fpu.avx.__fpu_fop = 2; 342 m_state.context.fpu.avx.__fpu_ip = 3; 343 m_state.context.fpu.avx.__fpu_cs = 4; 344 m_state.context.fpu.avx.__fpu_rsrv2 = 5; 345 m_state.context.fpu.avx.__fpu_dp = 6; 346 m_state.context.fpu.avx.__fpu_ds = 7; 347 m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX; 348 m_state.context.fpu.avx.__fpu_mxcsr = 8; 349 m_state.context.fpu.avx.__fpu_mxcsrmask = 9; 350 int i; 351 for (i=0; i<16; ++i) 352 { 353 if (i<10) 354 { 355 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 356 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 357 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 358 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 359 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 360 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 361 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 362 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 363 } 364 else 365 { 366 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 367 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 368 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 369 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 370 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 371 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 372 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 373 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 374 } 375 376 m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0'; 377 m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1'; 378 m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2'; 379 m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3'; 380 m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4'; 381 m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5'; 382 m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6'; 383 m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7'; 384 } 385 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 386 m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN; 387 m_state.context.fpu.avx.__fpu_reserved1 = -1; 388 for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i) 389 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 390 391 for (i = 0; i < 16; ++i) 392 { 393 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0'; 394 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1'; 395 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2'; 396 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3'; 397 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4'; 398 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5'; 399 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6'; 400 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7'; 401 } 402 } 403 else 404 { 405 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 406 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 407 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 408 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 409 m_state.context.fpu.no_avx.__fpu_ftw = 1; 410 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 411 m_state.context.fpu.no_avx.__fpu_fop = 2; 412 m_state.context.fpu.no_avx.__fpu_ip = 3; 413 m_state.context.fpu.no_avx.__fpu_cs = 4; 414 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 415 m_state.context.fpu.no_avx.__fpu_dp = 6; 416 m_state.context.fpu.no_avx.__fpu_ds = 7; 417 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 418 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 419 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 420 int i; 421 for (i=0; i<16; ++i) 422 { 423 if (i<10) 424 { 425 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 426 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 427 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 428 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 429 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 430 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 431 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 432 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 433 } 434 else 435 { 436 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 437 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 438 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 439 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 440 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 441 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 442 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 443 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 444 } 445 446 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 447 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 448 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 449 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 450 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 451 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 452 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 453 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 454 } 455 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 456 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 457 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 458 } 459 m_state.SetError(e_regSetFPU, Read, 0); 460 } 461 else 462 { 463 if (CPUHasAVX() || FORCE_AVX_REGS) 464 { 465 mach_msg_type_number_t count = e_regSetWordSizeAVX; 466 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count)); 467 } 468 else 469 { 470 mach_msg_type_number_t count = e_regSetWordSizeFPR; 471 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count)); 472 } 473 } 474 } 475 return m_state.GetError(e_regSetFPU, Read); 476} 477 478kern_return_t 479DNBArchImplI386::GetEXCState(bool force) 480{ 481 if (force || m_state.GetError(e_regSetEXC, Read)) 482 { 483 mach_msg_type_number_t count = e_regSetWordSizeEXC; 484 m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count)); 485 } 486 return m_state.GetError(e_regSetEXC, Read); 487} 488 489kern_return_t 490DNBArchImplI386::SetGPRState() 491{ 492 m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR)); 493 return m_state.GetError(e_regSetGPR, Write); 494} 495 496kern_return_t 497DNBArchImplI386::SetFPUState() 498{ 499 if (DEBUG_FPU_REGS) 500 { 501 m_state.SetError(e_regSetFPU, Write, 0); 502 return m_state.GetError(e_regSetFPU, Write); 503 } 504 else 505 { 506 if (CPUHasAVX() || FORCE_AVX_REGS) 507 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX)); 508 else 509 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPR)); 510 return m_state.GetError(e_regSetFPU, Write); 511 } 512} 513 514kern_return_t 515DNBArchImplI386::SetEXCState() 516{ 517 m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC)); 518 return m_state.GetError(e_regSetEXC, Write); 519} 520 521kern_return_t 522DNBArchImplI386::GetDBGState(bool force) 523{ 524 if (force || m_state.GetError(e_regSetDBG, Read)) 525 { 526 mach_msg_type_number_t count = e_regSetWordSizeDBG; 527 m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count)); 528 } 529 return m_state.GetError(e_regSetDBG, Read); 530} 531 532kern_return_t 533DNBArchImplI386::SetDBGState() 534{ 535 m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG)); 536 return m_state.GetError(e_regSetDBG, Write); 537} 538 539void 540DNBArchImplI386::ThreadWillResume() 541{ 542 // Do we need to step this thread? If so, let the mach thread tell us so. 543 if (m_thread->IsStepping()) 544 { 545 // This is the primary thread, let the arch do anything it needs 546 EnableHardwareSingleStep(true); 547 } 548} 549 550bool 551DNBArchImplI386::ThreadDidStop() 552{ 553 bool success = true; 554 555 m_state.InvalidateAllRegisterStates(); 556 557 // Are we stepping a single instruction? 558 if (GetGPRState(true) == KERN_SUCCESS) 559 { 560 // We are single stepping, was this the primary thread? 561 if (m_thread->IsStepping()) 562 { 563 // This was the primary thread, we need to clear the trace 564 // bit if so. 565 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 566 } 567 else 568 { 569 // The MachThread will automatically restore the suspend count 570 // in ThreadDidStop(), so we don't need to do anything here if 571 // we weren't the primary thread the last time 572 } 573 } 574 return success; 575} 576 577bool 578DNBArchImplI386::NotifyException(MachException::Data& exc) 579{ 580 switch (exc.exc_type) 581 { 582 case EXC_BAD_ACCESS: 583 break; 584 case EXC_BAD_INSTRUCTION: 585 break; 586 case EXC_ARITHMETIC: 587 break; 588 case EXC_EMULATION: 589 break; 590 case EXC_SOFTWARE: 591 break; 592 case EXC_BREAKPOINT: 593 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) 594 { 595 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 596 if (pc != INVALID_NUB_ADDRESS && pc > 0) 597 { 598 pc -= 1; 599 // Check for a breakpoint at one byte prior to the current PC value 600 // since the PC will be just past the trap. 601 602 nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc); 603 if (NUB_BREAK_ID_IS_VALID(breakID)) 604 { 605 // Backup the PC for i386 since the trap was taken and the PC 606 // is at the address following the single byte trap instruction. 607 if (m_state.context.gpr.__eip > 0) 608 { 609 m_state.context.gpr.__eip = pc; 610 // Write the new PC back out 611 SetGPRState (); 612 } 613 } 614 return true; 615 } 616 } 617 break; 618 case EXC_SYSCALL: 619 break; 620 case EXC_MACH_SYSCALL: 621 break; 622 case EXC_RPC_ALERT: 623 break; 624 } 625 return false; 626} 627 628 629// Set the single step bit in the processor status register. 630kern_return_t 631DNBArchImplI386::EnableHardwareSingleStep (bool enable) 632{ 633 if (GetGPRState(false) == KERN_SUCCESS) 634 { 635 const uint32_t trace_bit = 0x100u; 636 if (enable) 637 m_state.context.gpr.__eflags |= trace_bit; 638 else 639 m_state.context.gpr.__eflags &= ~trace_bit; 640 return SetGPRState(); 641 } 642 return m_state.GetError(e_regSetGPR, Read); 643} 644 645 646//---------------------------------------------------------------------- 647// Register information defintions 648//---------------------------------------------------------------------- 649 650 651#define GPR_OFFSET(reg) (offsetof (DNBArchImplI386::GPR, __##reg)) 652#define FPU_OFFSET(reg) (offsetof (DNBArchImplI386::FPU, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.no_avx)) 653#define AVX_OFFSET(reg) (offsetof (DNBArchImplI386::AVX, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.avx)) 654#define EXC_OFFSET(reg) (offsetof (DNBArchImplI386::EXC, __##reg) + offsetof (DNBArchImplI386::Context, exc)) 655 656#define GPR_SIZE(reg) (sizeof(((DNBArchImplI386::GPR *)NULL)->__##reg)) 657#define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg)) 658#define FPU_SIZE_MMST(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 659#define FPU_SIZE_XMM(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 660#define FPU_SIZE_YMM(reg) (32) 661#define EXC_SIZE(reg) (sizeof(((DNBArchImplI386::EXC *)NULL)->__##reg)) 662 663// This does not accurately identify the location of ymm0...7 in 664// Context.fpu.avx. That is because there is a bunch of padding 665// in Context.fpu.avx that we don't need. Offset macros lay out 666// the register state that Debugserver transmits to the debugger 667// -- not to interpret the thread_get_state info. 668#define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n)) 669 670// These macros will auto define the register name, alt name, register size, 671// register offset, encoding, format and native register. This ensures that 672// the register state structures are defined correctly and have the correct 673// sizes and offsets. 674 675// General purpose registers for 64 bit 676const DNBRegisterInfo 677DNBArchImplI386::g_gpr_registers[] = 678{ 679{ e_regSetGPR, gpr_eax, "eax" , NULL , Uint, Hex, GPR_SIZE(eax), GPR_OFFSET(eax) , gcc_eax , dwarf_eax , -1 , gdb_eax }, 680{ e_regSetGPR, gpr_ebx, "ebx" , NULL , Uint, Hex, GPR_SIZE(ebx), GPR_OFFSET(ebx) , gcc_ebx , dwarf_ebx , -1 , gdb_ebx }, 681{ e_regSetGPR, gpr_ecx, "ecx" , NULL , Uint, Hex, GPR_SIZE(ecx), GPR_OFFSET(ecx) , gcc_ecx , dwarf_ecx , -1 , gdb_ecx }, 682{ e_regSetGPR, gpr_edx, "edx" , NULL , Uint, Hex, GPR_SIZE(edx), GPR_OFFSET(edx) , gcc_edx , dwarf_edx , -1 , gdb_edx }, 683{ e_regSetGPR, gpr_edi, "edi" , NULL , Uint, Hex, GPR_SIZE(edi), GPR_OFFSET(edi) , gcc_edi , dwarf_edi , -1 , gdb_edi }, 684{ e_regSetGPR, gpr_esi, "esi" , NULL , Uint, Hex, GPR_SIZE(esi), GPR_OFFSET(esi) , gcc_esi , dwarf_esi , -1 , gdb_esi }, 685{ e_regSetGPR, gpr_ebp, "ebp" , "fp" , Uint, Hex, GPR_SIZE(ebp), GPR_OFFSET(ebp) , gcc_ebp , dwarf_ebp , GENERIC_REGNUM_FP , gdb_ebp }, 686{ e_regSetGPR, gpr_esp, "esp" , "sp" , Uint, Hex, GPR_SIZE(esp), GPR_OFFSET(esp) , gcc_esp , dwarf_esp , GENERIC_REGNUM_SP , gdb_esp }, 687{ e_regSetGPR, gpr_ss, "ss" , NULL , Uint, Hex, GPR_SIZE(ss), GPR_OFFSET(ss) , -1 , -1 , -1 , gdb_ss }, 688{ e_regSetGPR, gpr_eflags, "eflags", "flags" , Uint, Hex, GPR_SIZE(eflags), GPR_OFFSET(eflags) , gcc_eflags, dwarf_eflags , GENERIC_REGNUM_FLAGS , gdb_eflags}, 689{ e_regSetGPR, gpr_eip, "eip" , "pc" , Uint, Hex, GPR_SIZE(eip), GPR_OFFSET(eip) , gcc_eip , dwarf_eip , GENERIC_REGNUM_PC , gdb_eip }, 690{ e_regSetGPR, gpr_cs, "cs" , NULL , Uint, Hex, GPR_SIZE(cs), GPR_OFFSET(cs) , -1 , -1 , -1 , gdb_cs }, 691{ e_regSetGPR, gpr_ds, "ds" , NULL , Uint, Hex, GPR_SIZE(ds), GPR_OFFSET(ds) , -1 , -1 , -1 , gdb_ds }, 692{ e_regSetGPR, gpr_es, "es" , NULL , Uint, Hex, GPR_SIZE(es), GPR_OFFSET(es) , -1 , -1 , -1 , gdb_es }, 693{ e_regSetGPR, gpr_fs, "fs" , NULL , Uint, Hex, GPR_SIZE(fs), GPR_OFFSET(fs) , -1 , -1 , -1 , gdb_fs }, 694{ e_regSetGPR, gpr_gs, "gs" , NULL , Uint, Hex, GPR_SIZE(gs), GPR_OFFSET(gs) , -1 , -1 , -1 , gdb_gs } 695}; 696 697 698const DNBRegisterInfo 699DNBArchImplI386::g_fpu_registers_no_avx[] = 700{ 701{ e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , FPU_OFFSET(fcw) , -1, -1, -1, -1 }, 702{ e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , FPU_OFFSET(fsw) , -1, -1, -1, -1 }, 703{ e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , FPU_OFFSET(ftw) , -1, -1, -1, -1 }, 704{ e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , FPU_OFFSET(fop) , -1, -1, -1, -1 }, 705{ e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , FPU_OFFSET(ip) , -1, -1, -1, -1 }, 706{ e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , FPU_OFFSET(cs) , -1, -1, -1, -1 }, 707{ e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , FPU_OFFSET(dp) , -1, -1, -1, -1 }, 708{ e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , FPU_OFFSET(ds) , -1, -1, -1, -1 }, 709{ e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , FPU_OFFSET(mxcsr) , -1, -1, -1, -1 }, 710{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1, -1, -1, -1 }, 711 712{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), -1, dwarf_stmm0, -1, gdb_stmm0 }, 713{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), -1, dwarf_stmm1, -1, gdb_stmm1 }, 714{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), -1, dwarf_stmm2, -1, gdb_stmm2 }, 715{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), -1, dwarf_stmm3, -1, gdb_stmm3 }, 716{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), -1, dwarf_stmm4, -1, gdb_stmm4 }, 717{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), -1, dwarf_stmm5, -1, gdb_stmm5 }, 718{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), -1, dwarf_stmm6, -1, gdb_stmm6 }, 719{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), -1, dwarf_stmm7, -1, gdb_stmm7 }, 720 721{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), -1, dwarf_xmm0, -1, gdb_xmm0 }, 722{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), -1, dwarf_xmm1, -1, gdb_xmm1 }, 723{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), -1, dwarf_xmm2, -1, gdb_xmm2 }, 724{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), -1, dwarf_xmm3, -1, gdb_xmm3 }, 725{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), -1, dwarf_xmm4, -1, gdb_xmm4 }, 726{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), -1, dwarf_xmm5, -1, gdb_xmm5 }, 727{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), -1, dwarf_xmm6, -1, gdb_xmm6 }, 728{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), -1, dwarf_xmm7, -1, gdb_xmm7 } 729}; 730 731const DNBRegisterInfo 732DNBArchImplI386::g_fpu_registers_avx[] = 733{ 734{ e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , AVX_OFFSET(fcw) , -1, -1, -1, -1 }, 735{ e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , AVX_OFFSET(fsw) , -1, -1, -1, -1 }, 736{ e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , AVX_OFFSET(ftw) , -1, -1, -1, -1 }, 737{ e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , AVX_OFFSET(fop) , -1, -1, -1, -1 }, 738{ e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , AVX_OFFSET(ip) , -1, -1, -1, -1 }, 739{ e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , AVX_OFFSET(cs) , -1, -1, -1, -1 }, 740{ e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , AVX_OFFSET(dp) , -1, -1, -1, -1 }, 741{ e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , AVX_OFFSET(ds) , -1, -1, -1, -1 }, 742{ e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , AVX_OFFSET(mxcsr) , -1, -1, -1, -1 }, 743{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1, -1, -1, -1 }, 744 745{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), -1, dwarf_stmm0, -1, gdb_stmm0 }, 746{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), -1, dwarf_stmm1, -1, gdb_stmm1 }, 747{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), -1, dwarf_stmm2, -1, gdb_stmm2 }, 748{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), -1, dwarf_stmm3, -1, gdb_stmm3 }, 749{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), -1, dwarf_stmm4, -1, gdb_stmm4 }, 750{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), -1, dwarf_stmm5, -1, gdb_stmm5 }, 751{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), -1, dwarf_stmm6, -1, gdb_stmm6 }, 752{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), -1, dwarf_stmm7, -1, gdb_stmm7 }, 753 754{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), AVX_OFFSET(xmm0), -1, dwarf_xmm0, -1, gdb_xmm0 }, 755{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), AVX_OFFSET(xmm1), -1, dwarf_xmm1, -1, gdb_xmm1 }, 756{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), AVX_OFFSET(xmm2), -1, dwarf_xmm2, -1, gdb_xmm2 }, 757{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), AVX_OFFSET(xmm3), -1, dwarf_xmm3, -1, gdb_xmm3 }, 758{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), AVX_OFFSET(xmm4), -1, dwarf_xmm4, -1, gdb_xmm4 }, 759{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), AVX_OFFSET(xmm5), -1, dwarf_xmm5, -1, gdb_xmm5 }, 760{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), AVX_OFFSET(xmm6), -1, dwarf_xmm6, -1, gdb_xmm6 }, 761{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), AVX_OFFSET(xmm7), -1, dwarf_xmm7, -1, gdb_xmm7 }, 762 763{ e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), -1, dwarf_ymm0, -1, gdb_ymm0 }, 764{ e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), -1, dwarf_ymm1, -1, gdb_ymm1 }, 765{ e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), -1, dwarf_ymm2, -1, gdb_ymm2 }, 766{ e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), -1, dwarf_ymm3, -1, gdb_ymm3 }, 767{ e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), -1, dwarf_ymm4, -1, gdb_ymm4 }, 768{ e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), -1, dwarf_ymm5, -1, gdb_ymm5 }, 769{ e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), -1, dwarf_ymm6, -1, gdb_ymm6 }, 770{ e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), -1, dwarf_ymm7, -1, gdb_ymm7 }, 771}; 772 773const DNBRegisterInfo 774DNBArchImplI386::g_exc_registers[] = 775{ 776{ e_regSetEXC, exc_trapno, "trapno" , NULL, Uint, Hex, EXC_SIZE (trapno) , EXC_OFFSET (trapno) , -1, -1, -1, -1 }, 777{ e_regSetEXC, exc_err, "err" , NULL, Uint, Hex, EXC_SIZE (err) , EXC_OFFSET (err) , -1, -1, -1, -1 }, 778{ e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr) , -1, -1, -1, -1 } 779}; 780 781// Number of registers in each register set 782const size_t DNBArchImplI386::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo); 783const size_t DNBArchImplI386::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo); 784const size_t DNBArchImplI386::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo); 785const size_t DNBArchImplI386::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo); 786const size_t DNBArchImplI386::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 787const size_t DNBArchImplI386::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 788 789//---------------------------------------------------------------------- 790// Register set definitions. The first definitions at register set index 791// of zero is for all registers, followed by other registers sets. The 792// register information for the all register set need not be filled in. 793//---------------------------------------------------------------------- 794const DNBRegisterSetInfo 795DNBArchImplI386::g_reg_sets_no_avx[] = 796{ 797 { "i386 Registers", NULL, k_num_all_registers_no_avx }, 798 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 799 { "Floating Point Registers", g_fpu_registers_no_avx, k_num_fpu_registers_no_avx }, 800 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 801}; 802 803const DNBRegisterSetInfo 804DNBArchImplI386::g_reg_sets_avx[] = 805{ 806 { "i386 Registers", NULL, k_num_all_registers_avx }, 807 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 808 { "Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx }, 809 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 810}; 811 812// Total number of register sets for this architecture 813const size_t DNBArchImplI386::k_num_register_sets = sizeof(g_reg_sets_no_avx)/sizeof(DNBRegisterSetInfo); 814 815DNBArchProtocol * 816DNBArchImplI386::Create (MachThread *thread) 817{ 818 return new DNBArchImplI386 (thread); 819} 820 821const uint8_t * const 822DNBArchImplI386::SoftwareBreakpointOpcode (nub_size_t byte_size) 823{ 824 static const uint8_t g_breakpoint_opcode[] = { 0xCC }; 825 if (byte_size == 1) 826 return g_breakpoint_opcode; 827 return NULL; 828} 829 830const DNBRegisterSetInfo * 831DNBArchImplI386::GetRegisterSetInfo(nub_size_t *num_reg_sets) 832{ 833 *num_reg_sets = k_num_register_sets; 834 if (CPUHasAVX() || FORCE_AVX_REGS) 835 return g_reg_sets_avx; 836 else 837 return g_reg_sets_no_avx; 838} 839 840 841void 842DNBArchImplI386::Initialize() 843{ 844 DNBArchPluginInfo arch_plugin_info = 845 { 846 CPU_TYPE_I386, 847 DNBArchImplI386::Create, 848 DNBArchImplI386::GetRegisterSetInfo, 849 DNBArchImplI386::SoftwareBreakpointOpcode 850 }; 851 852 // Register this arch plug-in with the main protocol class 853 DNBArchProtocol::RegisterArchPlugin (arch_plugin_info); 854} 855 856bool 857DNBArchImplI386::GetRegisterValue(int set, int reg, DNBRegisterValue *value) 858{ 859 if (set == REGISTER_SET_GENERIC) 860 { 861 switch (reg) 862 { 863 case GENERIC_REGNUM_PC: // Program Counter 864 set = e_regSetGPR; 865 reg = gpr_eip; 866 break; 867 868 case GENERIC_REGNUM_SP: // Stack Pointer 869 set = e_regSetGPR; 870 reg = gpr_esp; 871 break; 872 873 case GENERIC_REGNUM_FP: // Frame Pointer 874 set = e_regSetGPR; 875 reg = gpr_ebp; 876 break; 877 878 case GENERIC_REGNUM_FLAGS: // Processor flags register 879 set = e_regSetGPR; 880 reg = gpr_eflags; 881 break; 882 883 case GENERIC_REGNUM_RA: // Return Address 884 default: 885 return false; 886 } 887 } 888 889 if (GetRegisterState(set, false) != KERN_SUCCESS) 890 return false; 891 892 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 893 if (regInfo) 894 { 895 value->info = *regInfo; 896 switch (set) 897 { 898 case e_regSetGPR: 899 if (reg < k_num_gpr_registers) 900 { 901 value->value.uint32 = ((uint32_t*)(&m_state.context.gpr))[reg]; 902 return true; 903 } 904 break; 905 906 case e_regSetFPU: 907 if (CPUHasAVX() || FORCE_AVX_REGS) 908 { 909 switch (reg) 910 { 911 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); return true; 912 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); return true; 913 case fpu_ftw: value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; return true; 914 case fpu_fop: value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; return true; 915 case fpu_ip: value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; return true; 916 case fpu_cs: value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; return true; 917 case fpu_dp: value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; return true; 918 case fpu_ds: value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; return true; 919 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; return true; 920 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; return true; 921 922 case fpu_stmm0: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, 10); return true; 923 case fpu_stmm1: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, 10); return true; 924 case fpu_stmm2: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, 10); return true; 925 case fpu_stmm3: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, 10); return true; 926 case fpu_stmm4: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, 10); return true; 927 case fpu_stmm5: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, 10); return true; 928 case fpu_stmm6: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, 10); return true; 929 case fpu_stmm7: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, 10); return true; 930 931 case fpu_xmm0: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, 16); return true; 932 case fpu_xmm1: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, 16); return true; 933 case fpu_xmm2: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, 16); return true; 934 case fpu_xmm3: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, 16); return true; 935 case fpu_xmm4: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, 16); return true; 936 case fpu_xmm5: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, 16); return true; 937 case fpu_xmm6: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, 16); return true; 938 case fpu_xmm7: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, 16); return true; 939 940#define MEMCPY_YMM(n) \ 941 memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, 16); \ 942 memcpy((&value->value.uint8) + 16, m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, 16); 943 case fpu_ymm0: MEMCPY_YMM(0); return true; 944 case fpu_ymm1: MEMCPY_YMM(1); return true; 945 case fpu_ymm2: MEMCPY_YMM(2); return true; 946 case fpu_ymm3: MEMCPY_YMM(3); return true; 947 case fpu_ymm4: MEMCPY_YMM(4); return true; 948 case fpu_ymm5: MEMCPY_YMM(5); return true; 949 case fpu_ymm6: MEMCPY_YMM(6); return true; 950 case fpu_ymm7: MEMCPY_YMM(7); return true; 951#undef MEMCPY_YMM 952 } 953 } 954 else 955 { 956 switch (reg) 957 { 958 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); return true; 959 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); return true; 960 case fpu_ftw: value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; return true; 961 case fpu_fop: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; return true; 962 case fpu_ip: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; return true; 963 case fpu_cs: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; return true; 964 case fpu_dp: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; return true; 965 case fpu_ds: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; return true; 966 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; return true; 967 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; return true; 968 969 case fpu_stmm0: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 10); return true; 970 case fpu_stmm1: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 10); return true; 971 case fpu_stmm2: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 10); return true; 972 case fpu_stmm3: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 10); return true; 973 case fpu_stmm4: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 10); return true; 974 case fpu_stmm5: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 10); return true; 975 case fpu_stmm6: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 10); return true; 976 case fpu_stmm7: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 10); return true; 977 978 case fpu_xmm0: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 16); return true; 979 case fpu_xmm1: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 16); return true; 980 case fpu_xmm2: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 16); return true; 981 case fpu_xmm3: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 16); return true; 982 case fpu_xmm4: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 16); return true; 983 case fpu_xmm5: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 16); return true; 984 case fpu_xmm6: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 16); return true; 985 case fpu_xmm7: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 16); return true; 986 } 987 } 988 break; 989 990 case e_regSetEXC: 991 if (reg < k_num_exc_registers) 992 { 993 value->value.uint32 = (&m_state.context.exc.__trapno)[reg]; 994 return true; 995 } 996 break; 997 } 998 } 999 return false; 1000} 1001 1002 1003bool 1004DNBArchImplI386::SetRegisterValue(int set, int reg, const DNBRegisterValue *value) 1005{ 1006 if (set == REGISTER_SET_GENERIC) 1007 { 1008 switch (reg) 1009 { 1010 case GENERIC_REGNUM_PC: // Program Counter 1011 set = e_regSetGPR; 1012 reg = gpr_eip; 1013 break; 1014 1015 case GENERIC_REGNUM_SP: // Stack Pointer 1016 set = e_regSetGPR; 1017 reg = gpr_esp; 1018 break; 1019 1020 case GENERIC_REGNUM_FP: // Frame Pointer 1021 set = e_regSetGPR; 1022 reg = gpr_ebp; 1023 break; 1024 1025 case GENERIC_REGNUM_FLAGS: // Processor flags register 1026 set = e_regSetGPR; 1027 reg = gpr_eflags; 1028 break; 1029 1030 case GENERIC_REGNUM_RA: // Return Address 1031 default: 1032 return false; 1033 } 1034 } 1035 1036 if (GetRegisterState(set, false) != KERN_SUCCESS) 1037 return false; 1038 1039 bool success = false; 1040 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1041 if (regInfo) 1042 { 1043 switch (set) 1044 { 1045 case e_regSetGPR: 1046 if (reg < k_num_gpr_registers) 1047 { 1048 ((uint32_t*)(&m_state.context.gpr))[reg] = value->value.uint32; 1049 success = true; 1050 } 1051 break; 1052 1053 case e_regSetFPU: 1054 if (CPUHasAVX() || FORCE_AVX_REGS) 1055 { 1056 switch (reg) 1057 { 1058 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1059 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1060 case fpu_ftw: m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; success = true; break; 1061 case fpu_fop: m_state.context.fpu.avx.__fpu_fop = value->value.uint16; success = true; break; 1062 case fpu_ip: m_state.context.fpu.avx.__fpu_ip = value->value.uint32; success = true; break; 1063 case fpu_cs: m_state.context.fpu.avx.__fpu_cs = value->value.uint16; success = true; break; 1064 case fpu_dp: m_state.context.fpu.avx.__fpu_dp = value->value.uint32; success = true; break; 1065 case fpu_ds: m_state.context.fpu.avx.__fpu_ds = value->value.uint16; success = true; break; 1066 case fpu_mxcsr: m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1067 case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1068 1069 case fpu_stmm0: memcpy (m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10); success = true; break; 1070 case fpu_stmm1: memcpy (m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10); success = true; break; 1071 case fpu_stmm2: memcpy (m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10); success = true; break; 1072 case fpu_stmm3: memcpy (m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10); success = true; break; 1073 case fpu_stmm4: memcpy (m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10); success = true; break; 1074 case fpu_stmm5: memcpy (m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10); success = true; break; 1075 case fpu_stmm6: memcpy (m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10); success = true; break; 1076 case fpu_stmm7: memcpy (m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10); success = true; break; 1077 1078 case fpu_xmm0: memcpy(m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16); success = true; break; 1079 case fpu_xmm1: memcpy(m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16); success = true; break; 1080 case fpu_xmm2: memcpy(m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16); success = true; break; 1081 case fpu_xmm3: memcpy(m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16); success = true; break; 1082 case fpu_xmm4: memcpy(m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16); success = true; break; 1083 case fpu_xmm5: memcpy(m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16); success = true; break; 1084 case fpu_xmm6: memcpy(m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16); success = true; break; 1085 case fpu_xmm7: memcpy(m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16); success = true; break; 1086 1087#define MEMCPY_YMM(n) \ 1088 memcpy(m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, &value->value.uint8, 16); \ 1089 memcpy(m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, (&value->value.uint8) + 16, 16); 1090 case fpu_ymm0: MEMCPY_YMM(0); return true; 1091 case fpu_ymm1: MEMCPY_YMM(1); return true; 1092 case fpu_ymm2: MEMCPY_YMM(2); return true; 1093 case fpu_ymm3: MEMCPY_YMM(3); return true; 1094 case fpu_ymm4: MEMCPY_YMM(4); return true; 1095 case fpu_ymm5: MEMCPY_YMM(5); return true; 1096 case fpu_ymm6: MEMCPY_YMM(6); return true; 1097 case fpu_ymm7: MEMCPY_YMM(7); return true; 1098#undef MEMCPY_YMM 1099 } 1100 } 1101 else 1102 { 1103 switch (reg) 1104 { 1105 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1106 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1107 case fpu_ftw: m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; success = true; break; 1108 case fpu_fop: m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; success = true; break; 1109 case fpu_ip: m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; success = true; break; 1110 case fpu_cs: m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; success = true; break; 1111 case fpu_dp: m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; success = true; break; 1112 case fpu_ds: m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; success = true; break; 1113 case fpu_mxcsr: m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1114 case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1115 1116 case fpu_stmm0: memcpy (m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10); success = true; break; 1117 case fpu_stmm1: memcpy (m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10); success = true; break; 1118 case fpu_stmm2: memcpy (m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10); success = true; break; 1119 case fpu_stmm3: memcpy (m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10); success = true; break; 1120 case fpu_stmm4: memcpy (m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10); success = true; break; 1121 case fpu_stmm5: memcpy (m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10); success = true; break; 1122 case fpu_stmm6: memcpy (m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10); success = true; break; 1123 case fpu_stmm7: memcpy (m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10); success = true; break; 1124 1125 case fpu_xmm0: memcpy(m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16); success = true; break; 1126 case fpu_xmm1: memcpy(m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16); success = true; break; 1127 case fpu_xmm2: memcpy(m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16); success = true; break; 1128 case fpu_xmm3: memcpy(m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16); success = true; break; 1129 case fpu_xmm4: memcpy(m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16); success = true; break; 1130 case fpu_xmm5: memcpy(m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16); success = true; break; 1131 case fpu_xmm6: memcpy(m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16); success = true; break; 1132 case fpu_xmm7: memcpy(m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16); success = true; break; 1133 } 1134 } 1135 break; 1136 1137 case e_regSetEXC: 1138 if (reg < k_num_exc_registers) 1139 { 1140 (&m_state.context.exc.__trapno)[reg] = value->value.uint32; 1141 success = true; 1142 } 1143 break; 1144 } 1145 } 1146 1147 if (success) 1148 return SetRegisterState(set) == KERN_SUCCESS; 1149 return false; 1150} 1151 1152 1153nub_size_t 1154DNBArchImplI386::GetRegisterContext (void *buf, nub_size_t buf_len) 1155{ 1156 nub_size_t size = sizeof (m_state.context); 1157 1158 if (buf && buf_len) 1159 { 1160 if (size > buf_len) 1161 size = buf_len; 1162 1163 bool force = false; 1164 if (GetGPRState(force) | GetFPUState(force) | GetEXCState(force)) 1165 return 0; 1166 ::memcpy (buf, &m_state.context, size); 1167 } 1168 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size); 1169 // Return the size of the register context even if NULL was passed in 1170 return size; 1171} 1172 1173nub_size_t 1174DNBArchImplI386::SetRegisterContext (const void *buf, nub_size_t buf_len) 1175{ 1176 nub_size_t size = sizeof (m_state.context); 1177 if (buf == NULL || buf_len == 0) 1178 size = 0; 1179 1180 if (size) 1181 { 1182 if (size > buf_len) 1183 size = buf_len; 1184 1185 ::memcpy (&m_state.context, buf, size); 1186 SetGPRState(); 1187 SetFPUState(); 1188 SetEXCState(); 1189 } 1190 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size); 1191 return size; 1192} 1193 1194 1195 1196kern_return_t 1197DNBArchImplI386::GetRegisterState(int set, bool force) 1198{ 1199 switch (set) 1200 { 1201 case e_regSetALL: return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 1202 case e_regSetGPR: return GetGPRState(force); 1203 case e_regSetFPU: return GetFPUState(force); 1204 case e_regSetEXC: return GetEXCState(force); 1205 default: break; 1206 } 1207 return KERN_INVALID_ARGUMENT; 1208} 1209 1210kern_return_t 1211DNBArchImplI386::SetRegisterState(int set) 1212{ 1213 // Make sure we have a valid context to set. 1214 if (RegisterSetStateIsValid(set)) 1215 { 1216 switch (set) 1217 { 1218 case e_regSetALL: return SetGPRState() | SetFPUState() | SetEXCState(); 1219 case e_regSetGPR: return SetGPRState(); 1220 case e_regSetFPU: return SetFPUState(); 1221 case e_regSetEXC: return SetEXCState(); 1222 default: break; 1223 } 1224 } 1225 return KERN_INVALID_ARGUMENT; 1226} 1227 1228bool 1229DNBArchImplI386::RegisterSetStateIsValid (int set) const 1230{ 1231 return m_state.RegsAreValid(set); 1232} 1233 1234#endif // #if defined (__i386__) 1235