DNBArchImplI386.cpp revision eb0eae254e4e2d737b9c137296197a70a1ba7f68
1//===-- DNBArchImplI386.cpp -------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Created by Greg Clayton on 6/25/07. 11// 12//===----------------------------------------------------------------------===// 13 14#if defined (__i386__) || defined (__x86_64__) 15 16#include <sys/cdefs.h> 17 18#include "MacOSX/i386/DNBArchImplI386.h" 19#include "DNBLog.h" 20#include "MachThread.h" 21#include "MachProcess.h" 22 23#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG) 24enum debugState { 25 debugStateUnknown, 26 debugStateOff, 27 debugStateOn 28}; 29 30static debugState sFPUDebugState = debugStateUnknown; 31static debugState sAVXForceState = debugStateUnknown; 32 33static bool DebugFPURegs () 34{ 35 if (sFPUDebugState == debugStateUnknown) 36 { 37 if (getenv("DNB_DEBUG_FPU_REGS")) 38 sFPUDebugState = debugStateOn; 39 else 40 sFPUDebugState = debugStateOff; 41 } 42 43 return (sFPUDebugState == debugStateOn); 44} 45 46static bool ForceAVXRegs () 47{ 48 if (sFPUDebugState == debugStateUnknown) 49 { 50 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 51 sAVXForceState = debugStateOn; 52 else 53 sAVXForceState = debugStateOff; 54 } 55 56 return (sAVXForceState == debugStateOn); 57} 58 59#define DEBUG_FPU_REGS (DebugFPURegs()) 60#define FORCE_AVX_REGS (ForceAVXRegs()) 61#else 62#define DEBUG_FPU_REGS (0) 63#define FORCE_AVX_REGS (0) 64#endif 65 66enum 67{ 68 gpr_eax = 0, 69 gpr_ebx = 1, 70 gpr_ecx = 2, 71 gpr_edx = 3, 72 gpr_edi = 4, 73 gpr_esi = 5, 74 gpr_ebp = 6, 75 gpr_esp = 7, 76 gpr_ss = 8, 77 gpr_eflags = 9, 78 gpr_eip = 10, 79 gpr_cs = 11, 80 gpr_ds = 12, 81 gpr_es = 13, 82 gpr_fs = 14, 83 gpr_gs = 15, 84 k_num_gpr_regs 85}; 86 87enum { 88 fpu_fcw, 89 fpu_fsw, 90 fpu_ftw, 91 fpu_fop, 92 fpu_ip, 93 fpu_cs, 94 fpu_dp, 95 fpu_ds, 96 fpu_mxcsr, 97 fpu_mxcsrmask, 98 fpu_stmm0, 99 fpu_stmm1, 100 fpu_stmm2, 101 fpu_stmm3, 102 fpu_stmm4, 103 fpu_stmm5, 104 fpu_stmm6, 105 fpu_stmm7, 106 fpu_xmm0, 107 fpu_xmm1, 108 fpu_xmm2, 109 fpu_xmm3, 110 fpu_xmm4, 111 fpu_xmm5, 112 fpu_xmm6, 113 fpu_xmm7, 114 fpu_ymm0, 115 fpu_ymm1, 116 fpu_ymm2, 117 fpu_ymm3, 118 fpu_ymm4, 119 fpu_ymm5, 120 fpu_ymm6, 121 fpu_ymm7, 122 k_num_fpu_regs, 123 124 // Aliases 125 fpu_fctrl = fpu_fcw, 126 fpu_fstat = fpu_fsw, 127 fpu_ftag = fpu_ftw, 128 fpu_fiseg = fpu_cs, 129 fpu_fioff = fpu_ip, 130 fpu_foseg = fpu_ds, 131 fpu_fooff = fpu_dp 132}; 133 134enum { 135 exc_trapno, 136 exc_err, 137 exc_faultvaddr, 138 k_num_exc_regs, 139}; 140 141 142enum 143{ 144 gcc_eax = 0, 145 gcc_ecx, 146 gcc_edx, 147 gcc_ebx, 148 gcc_ebp, 149 gcc_esp, 150 gcc_esi, 151 gcc_edi, 152 gcc_eip, 153 gcc_eflags 154}; 155 156enum 157{ 158 dwarf_eax = 0, 159 dwarf_ecx, 160 dwarf_edx, 161 dwarf_ebx, 162 dwarf_esp, 163 dwarf_ebp, 164 dwarf_esi, 165 dwarf_edi, 166 dwarf_eip, 167 dwarf_eflags, 168 dwarf_stmm0 = 11, 169 dwarf_stmm1, 170 dwarf_stmm2, 171 dwarf_stmm3, 172 dwarf_stmm4, 173 dwarf_stmm5, 174 dwarf_stmm6, 175 dwarf_stmm7, 176 dwarf_xmm0 = 21, 177 dwarf_xmm1, 178 dwarf_xmm2, 179 dwarf_xmm3, 180 dwarf_xmm4, 181 dwarf_xmm5, 182 dwarf_xmm6, 183 dwarf_xmm7, 184 dwarf_ymm0 = dwarf_xmm0, 185 dwarf_ymm1 = dwarf_xmm1, 186 dwarf_ymm2 = dwarf_xmm2, 187 dwarf_ymm3 = dwarf_xmm3, 188 dwarf_ymm4 = dwarf_xmm4, 189 dwarf_ymm5 = dwarf_xmm5, 190 dwarf_ymm6 = dwarf_xmm6, 191 dwarf_ymm7 = dwarf_xmm7, 192}; 193 194enum 195{ 196 gdb_eax = 0, 197 gdb_ecx = 1, 198 gdb_edx = 2, 199 gdb_ebx = 3, 200 gdb_esp = 4, 201 gdb_ebp = 5, 202 gdb_esi = 6, 203 gdb_edi = 7, 204 gdb_eip = 8, 205 gdb_eflags = 9, 206 gdb_cs = 10, 207 gdb_ss = 11, 208 gdb_ds = 12, 209 gdb_es = 13, 210 gdb_fs = 14, 211 gdb_gs = 15, 212 gdb_stmm0 = 16, 213 gdb_stmm1 = 17, 214 gdb_stmm2 = 18, 215 gdb_stmm3 = 19, 216 gdb_stmm4 = 20, 217 gdb_stmm5 = 21, 218 gdb_stmm6 = 22, 219 gdb_stmm7 = 23, 220 gdb_fctrl = 24, gdb_fcw = gdb_fctrl, 221 gdb_fstat = 25, gdb_fsw = gdb_fstat, 222 gdb_ftag = 26, gdb_ftw = gdb_ftag, 223 gdb_fiseg = 27, gdb_fpu_cs = gdb_fiseg, 224 gdb_fioff = 28, gdb_ip = gdb_fioff, 225 gdb_foseg = 29, gdb_fpu_ds = gdb_foseg, 226 gdb_fooff = 30, gdb_dp = gdb_fooff, 227 gdb_fop = 31, 228 gdb_xmm0 = 32, 229 gdb_xmm1 = 33, 230 gdb_xmm2 = 34, 231 gdb_xmm3 = 35, 232 gdb_xmm4 = 36, 233 gdb_xmm5 = 37, 234 gdb_xmm6 = 38, 235 gdb_xmm7 = 39, 236 gdb_mxcsr = 40, 237 gdb_mm0 = 41, 238 gdb_mm1 = 42, 239 gdb_mm2 = 43, 240 gdb_mm3 = 44, 241 gdb_mm4 = 45, 242 gdb_mm5 = 46, 243 gdb_mm6 = 47, 244 gdb_mm7 = 48, 245 gdb_ymm0 = gdb_xmm0, 246 gdb_ymm1 = gdb_xmm1, 247 gdb_ymm2 = gdb_xmm2, 248 gdb_ymm3 = gdb_xmm3, 249 gdb_ymm4 = gdb_xmm4, 250 gdb_ymm5 = gdb_xmm5, 251 gdb_ymm6 = gdb_xmm6, 252 gdb_ymm7 = gdb_xmm7 253}; 254 255enum DNBArchImplI386::AVXPresence DNBArchImplI386::s_has_avx = DNBArchImplI386::kAVXUnknown; 256 257uint64_t 258DNBArchImplI386::GetPC(uint64_t failValue) 259{ 260 // Get program counter 261 if (GetGPRState(false) == KERN_SUCCESS) 262 return m_state.context.gpr.__eip; 263 return failValue; 264} 265 266kern_return_t 267DNBArchImplI386::SetPC(uint64_t value) 268{ 269 // Get program counter 270 kern_return_t err = GetGPRState(false); 271 if (err == KERN_SUCCESS) 272 { 273 m_state.context.gpr.__eip = value; 274 err = SetGPRState(); 275 } 276 return err == KERN_SUCCESS; 277} 278 279uint64_t 280DNBArchImplI386::GetSP(uint64_t failValue) 281{ 282 // Get stack pointer 283 if (GetGPRState(false) == KERN_SUCCESS) 284 return m_state.context.gpr.__esp; 285 return failValue; 286} 287 288// Uncomment the value below to verify the values in the debugger. 289//#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 290//#define SET_GPR(reg) m_state.context.gpr.__##reg = gpr_##reg 291 292kern_return_t 293DNBArchImplI386::GetGPRState(bool force) 294{ 295 if (force || m_state.GetError(e_regSetGPR, Read)) 296 { 297#if DEBUG_GPR_VALUES 298 SET_GPR(eax); 299 SET_GPR(ebx); 300 SET_GPR(ecx); 301 SET_GPR(edx); 302 SET_GPR(edi); 303 SET_GPR(esi); 304 SET_GPR(ebp); 305 SET_GPR(esp); 306 SET_GPR(ss); 307 SET_GPR(eflags); 308 SET_GPR(eip); 309 SET_GPR(cs); 310 SET_GPR(ds); 311 SET_GPR(es); 312 SET_GPR(fs); 313 SET_GPR(gs); 314 m_state.SetError(e_regSetGPR, Read, 0); 315#else 316 mach_msg_type_number_t count = e_regSetWordSizeGPR; 317 m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count)); 318#endif 319 } 320 return m_state.GetError(e_regSetGPR, Read); 321} 322 323// Uncomment the value below to verify the values in the debugger. 324//#define DEBUG_FPU_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 325 326kern_return_t 327DNBArchImplI386::GetFPUState(bool force) 328{ 329 if (force || m_state.GetError(e_regSetFPU, Read)) 330 { 331 if (DEBUG_FPU_REGS) 332 { 333 if (CPUHasAVX() || FORCE_AVX_REGS) 334 { 335 m_state.context.fpu.avx.__fpu_reserved[0] = -1; 336 m_state.context.fpu.avx.__fpu_reserved[1] = -1; 337 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234; 338 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678; 339 m_state.context.fpu.avx.__fpu_ftw = 1; 340 m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX; 341 m_state.context.fpu.avx.__fpu_fop = 2; 342 m_state.context.fpu.avx.__fpu_ip = 3; 343 m_state.context.fpu.avx.__fpu_cs = 4; 344 m_state.context.fpu.avx.__fpu_rsrv2 = 5; 345 m_state.context.fpu.avx.__fpu_dp = 6; 346 m_state.context.fpu.avx.__fpu_ds = 7; 347 m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX; 348 m_state.context.fpu.avx.__fpu_mxcsr = 8; 349 m_state.context.fpu.avx.__fpu_mxcsrmask = 9; 350 int i; 351 for (i=0; i<16; ++i) 352 { 353 if (i<10) 354 { 355 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 356 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 357 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 358 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 359 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 360 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 361 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 362 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 363 } 364 else 365 { 366 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 367 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 368 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 369 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 370 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 371 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 372 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 373 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 374 } 375 376 m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0'; 377 m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1'; 378 m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2'; 379 m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3'; 380 m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4'; 381 m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5'; 382 m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6'; 383 m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7'; 384 } 385 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 386 m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN; 387 m_state.context.fpu.avx.__fpu_reserved1 = -1; 388 for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i) 389 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 390 391 for (i = 0; i < 16; ++i) 392 { 393 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0'; 394 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1'; 395 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2'; 396 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3'; 397 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4'; 398 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5'; 399 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6'; 400 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7'; 401 } 402 } 403 else 404 { 405 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 406 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 407 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 408 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 409 m_state.context.fpu.no_avx.__fpu_ftw = 1; 410 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 411 m_state.context.fpu.no_avx.__fpu_fop = 2; 412 m_state.context.fpu.no_avx.__fpu_ip = 3; 413 m_state.context.fpu.no_avx.__fpu_cs = 4; 414 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 415 m_state.context.fpu.no_avx.__fpu_dp = 6; 416 m_state.context.fpu.no_avx.__fpu_ds = 7; 417 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 418 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 419 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 420 int i; 421 for (i=0; i<16; ++i) 422 { 423 if (i<10) 424 { 425 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 426 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 427 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 428 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 429 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 430 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 431 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 432 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 433 } 434 else 435 { 436 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 437 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 438 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 439 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 440 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 441 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 442 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 443 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 444 } 445 446 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 447 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 448 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 449 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 450 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 451 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 452 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 453 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 454 } 455 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 456 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 457 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 458 } 459 m_state.SetError(e_regSetFPU, Read, 0); 460 } 461 else 462 { 463 if (CPUHasAVX() || FORCE_AVX_REGS) 464 { 465 mach_msg_type_number_t count = e_regSetWordSizeAVX; 466 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count)); 467 } 468 else 469 { 470 mach_msg_type_number_t count = e_regSetWordSizeFPR; 471 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count)); 472 } 473 } 474 } 475 return m_state.GetError(e_regSetFPU, Read); 476} 477 478kern_return_t 479DNBArchImplI386::GetEXCState(bool force) 480{ 481 if (force || m_state.GetError(e_regSetEXC, Read)) 482 { 483 mach_msg_type_number_t count = e_regSetWordSizeEXC; 484 m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count)); 485 } 486 return m_state.GetError(e_regSetEXC, Read); 487} 488 489kern_return_t 490DNBArchImplI386::SetGPRState() 491{ 492 m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR)); 493 return m_state.GetError(e_regSetGPR, Write); 494} 495 496kern_return_t 497DNBArchImplI386::SetFPUState() 498{ 499 if (DEBUG_FPU_REGS) 500 { 501 m_state.SetError(e_regSetFPU, Write, 0); 502 return m_state.GetError(e_regSetFPU, Write); 503 } 504 else 505 { 506 if (CPUHasAVX() || FORCE_AVX_REGS) 507 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX)); 508 else 509 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPR)); 510 return m_state.GetError(e_regSetFPU, Write); 511 } 512} 513 514kern_return_t 515DNBArchImplI386::SetEXCState() 516{ 517 m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC)); 518 return m_state.GetError(e_regSetEXC, Write); 519} 520 521kern_return_t 522DNBArchImplI386::GetDBGState(bool force) 523{ 524 if (force || m_state.GetError(e_regSetDBG, Read)) 525 { 526 mach_msg_type_number_t count = e_regSetWordSizeDBG; 527 m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count)); 528 } 529 return m_state.GetError(e_regSetDBG, Read); 530} 531 532kern_return_t 533DNBArchImplI386::SetDBGState() 534{ 535 m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG)); 536 return m_state.GetError(e_regSetDBG, Write); 537} 538 539void 540DNBArchImplI386::ThreadWillResume() 541{ 542 // Do we need to step this thread? If so, let the mach thread tell us so. 543 if (m_thread->IsStepping()) 544 { 545 // This is the primary thread, let the arch do anything it needs 546 EnableHardwareSingleStep(true); 547 } 548} 549 550bool 551DNBArchImplI386::ThreadDidStop() 552{ 553 bool success = true; 554 555 m_state.InvalidateAllRegisterStates(); 556 557 // Are we stepping a single instruction? 558 if (GetGPRState(true) == KERN_SUCCESS) 559 { 560 // We are single stepping, was this the primary thread? 561 if (m_thread->IsStepping()) 562 { 563 // This was the primary thread, we need to clear the trace 564 // bit if so. 565 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 566 } 567 else 568 { 569 // The MachThread will automatically restore the suspend count 570 // in ThreadDidStop(), so we don't need to do anything here if 571 // we weren't the primary thread the last time 572 } 573 } 574 return success; 575} 576 577bool 578DNBArchImplI386::NotifyException(MachException::Data& exc) 579{ 580 switch (exc.exc_type) 581 { 582 case EXC_BAD_ACCESS: 583 break; 584 case EXC_BAD_INSTRUCTION: 585 break; 586 case EXC_ARITHMETIC: 587 break; 588 case EXC_EMULATION: 589 break; 590 case EXC_SOFTWARE: 591 break; 592 case EXC_BREAKPOINT: 593 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) 594 { 595 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 596 if (pc != INVALID_NUB_ADDRESS && pc > 0) 597 { 598 pc -= 1; 599 // Check for a breakpoint at one byte prior to the current PC value 600 // since the PC will be just past the trap. 601 602 nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc); 603 if (NUB_BREAK_ID_IS_VALID(breakID)) 604 { 605 // Backup the PC for i386 since the trap was taken and the PC 606 // is at the address following the single byte trap instruction. 607 if (m_state.context.gpr.__eip > 0) 608 { 609 m_state.context.gpr.__eip = pc; 610 // Write the new PC back out 611 SetGPRState (); 612 } 613 } 614 return true; 615 } 616 } 617 break; 618 case EXC_SYSCALL: 619 break; 620 case EXC_MACH_SYSCALL: 621 break; 622 case EXC_RPC_ALERT: 623 break; 624 } 625 return false; 626} 627 628#ifndef DR_FIRSTADDR 629#define DR_FIRSTADDR 0 630#endif 631 632#ifndef DR_LASTADDR 633#define DR_LASTADDR 3 634#endif 635 636#ifndef DR_STATUS 637#define DR_STATUS 6 638#endif 639 640#ifndef DR_CONTROL 641#define DR_CONTROL 7 642#endif 643 644uint32_t 645DNBArchImplI386::NumSupportedHardwareWatchpoints() 646{ 647 return DR_LASTADDR - DR_FIRSTADDR + 1; 648} 649 650uint32_t 651DNBArchImplI386::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write) 652{ 653 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(addr = %8.8p, size = %u, read = %u, write = %u)", addr, size, read, write); 654 655 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 656 657 // Can't watch zero bytes 658 if (size == 0) 659 return INVALID_NUB_HW_INDEX; 660 661 // We must watch for either read or write 662 if (read == false && write == false) 663 return INVALID_NUB_HW_INDEX; 664 665 // 666 // FIXME: Add implmentation. 667 // 668 669 // Read the debug state 670 kern_return_t kret = GetDBGState(false); 671 672 if (kret == KERN_SUCCESS) 673 { 674 // Check to make sure we have the needed hardware support 675 uint32_t i = 0; 676 677 DBG debug_state = m_state.context.dbg; 678 for (i=0; i<num_hw_watchpoints; ++i) 679 { 680 uint64_t dr_val = 0; 681 switch (i) { 682 case 0: 683 dr_val = debug_state.__dr0; break; 684 case 1: 685 dr_val = debug_state.__dr1; break; 686 case 2: 687 dr_val = debug_state.__dr2; break; 688 case 3: 689 dr_val = debug_state.__dr3; break; 690 default: 691 break; 692 } 693 if (dr_val != 0) 694 break; // We found an available hw breakpoint slot (in i) 695 } 696 697 // See if we found an available hw breakpoint slot above 698 if (i < num_hw_watchpoints) 699 { 700 kret = SetDBGState(); 701 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImpI386::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret); 702 703 if (kret == KERN_SUCCESS) 704 return i; 705 } 706 else 707 { 708 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImpl386::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints); 709 } 710 } 711 return INVALID_NUB_HW_INDEX; 712} 713 714bool 715DNBArchImplI386::DisableHardwareWatchpoint (uint32_t hw_index) 716{ 717 kern_return_t kret = GetDBGState(false); 718 719 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 720 if (kret == KERN_SUCCESS) 721 { 722 if (hw_index < num_hw_points) 723 { 724 // 725 // FIXEME: Add implementation. 726 // 727 728 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::DisableHardwareWatchpoint( %u )", 729 hw_index); 730 731 kret = SetDBGState(); 732 733 if (kret == KERN_SUCCESS) 734 return true; 735 } 736 } 737 return false; 738} 739 740// Set the single step bit in the processor status register. 741kern_return_t 742DNBArchImplI386::EnableHardwareSingleStep (bool enable) 743{ 744 if (GetGPRState(false) == KERN_SUCCESS) 745 { 746 const uint32_t trace_bit = 0x100u; 747 if (enable) 748 m_state.context.gpr.__eflags |= trace_bit; 749 else 750 m_state.context.gpr.__eflags &= ~trace_bit; 751 return SetGPRState(); 752 } 753 return m_state.GetError(e_regSetGPR, Read); 754} 755 756 757//---------------------------------------------------------------------- 758// Register information defintions 759//---------------------------------------------------------------------- 760 761 762#define GPR_OFFSET(reg) (offsetof (DNBArchImplI386::GPR, __##reg)) 763#define FPU_OFFSET(reg) (offsetof (DNBArchImplI386::FPU, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.no_avx)) 764#define AVX_OFFSET(reg) (offsetof (DNBArchImplI386::AVX, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.avx)) 765#define EXC_OFFSET(reg) (offsetof (DNBArchImplI386::EXC, __##reg) + offsetof (DNBArchImplI386::Context, exc)) 766 767#define GPR_SIZE(reg) (sizeof(((DNBArchImplI386::GPR *)NULL)->__##reg)) 768#define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg)) 769#define FPU_SIZE_MMST(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 770#define FPU_SIZE_XMM(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 771#define FPU_SIZE_YMM(reg) (32) 772#define EXC_SIZE(reg) (sizeof(((DNBArchImplI386::EXC *)NULL)->__##reg)) 773 774// This does not accurately identify the location of ymm0...7 in 775// Context.fpu.avx. That is because there is a bunch of padding 776// in Context.fpu.avx that we don't need. Offset macros lay out 777// the register state that Debugserver transmits to the debugger 778// -- not to interpret the thread_get_state info. 779#define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n)) 780 781// These macros will auto define the register name, alt name, register size, 782// register offset, encoding, format and native register. This ensures that 783// the register state structures are defined correctly and have the correct 784// sizes and offsets. 785 786// General purpose registers for 64 bit 787const DNBRegisterInfo 788DNBArchImplI386::g_gpr_registers[] = 789{ 790{ e_regSetGPR, gpr_eax, "eax" , NULL , Uint, Hex, GPR_SIZE(eax), GPR_OFFSET(eax) , gcc_eax , dwarf_eax , -1 , gdb_eax }, 791{ e_regSetGPR, gpr_ebx, "ebx" , NULL , Uint, Hex, GPR_SIZE(ebx), GPR_OFFSET(ebx) , gcc_ebx , dwarf_ebx , -1 , gdb_ebx }, 792{ e_regSetGPR, gpr_ecx, "ecx" , NULL , Uint, Hex, GPR_SIZE(ecx), GPR_OFFSET(ecx) , gcc_ecx , dwarf_ecx , -1 , gdb_ecx }, 793{ e_regSetGPR, gpr_edx, "edx" , NULL , Uint, Hex, GPR_SIZE(edx), GPR_OFFSET(edx) , gcc_edx , dwarf_edx , -1 , gdb_edx }, 794{ e_regSetGPR, gpr_edi, "edi" , NULL , Uint, Hex, GPR_SIZE(edi), GPR_OFFSET(edi) , gcc_edi , dwarf_edi , -1 , gdb_edi }, 795{ e_regSetGPR, gpr_esi, "esi" , NULL , Uint, Hex, GPR_SIZE(esi), GPR_OFFSET(esi) , gcc_esi , dwarf_esi , -1 , gdb_esi }, 796{ e_regSetGPR, gpr_ebp, "ebp" , "fp" , Uint, Hex, GPR_SIZE(ebp), GPR_OFFSET(ebp) , gcc_ebp , dwarf_ebp , GENERIC_REGNUM_FP , gdb_ebp }, 797{ e_regSetGPR, gpr_esp, "esp" , "sp" , Uint, Hex, GPR_SIZE(esp), GPR_OFFSET(esp) , gcc_esp , dwarf_esp , GENERIC_REGNUM_SP , gdb_esp }, 798{ e_regSetGPR, gpr_ss, "ss" , NULL , Uint, Hex, GPR_SIZE(ss), GPR_OFFSET(ss) , -1 , -1 , -1 , gdb_ss }, 799{ e_regSetGPR, gpr_eflags, "eflags", "flags" , Uint, Hex, GPR_SIZE(eflags), GPR_OFFSET(eflags) , gcc_eflags, dwarf_eflags , GENERIC_REGNUM_FLAGS , gdb_eflags}, 800{ e_regSetGPR, gpr_eip, "eip" , "pc" , Uint, Hex, GPR_SIZE(eip), GPR_OFFSET(eip) , gcc_eip , dwarf_eip , GENERIC_REGNUM_PC , gdb_eip }, 801{ e_regSetGPR, gpr_cs, "cs" , NULL , Uint, Hex, GPR_SIZE(cs), GPR_OFFSET(cs) , -1 , -1 , -1 , gdb_cs }, 802{ e_regSetGPR, gpr_ds, "ds" , NULL , Uint, Hex, GPR_SIZE(ds), GPR_OFFSET(ds) , -1 , -1 , -1 , gdb_ds }, 803{ e_regSetGPR, gpr_es, "es" , NULL , Uint, Hex, GPR_SIZE(es), GPR_OFFSET(es) , -1 , -1 , -1 , gdb_es }, 804{ e_regSetGPR, gpr_fs, "fs" , NULL , Uint, Hex, GPR_SIZE(fs), GPR_OFFSET(fs) , -1 , -1 , -1 , gdb_fs }, 805{ e_regSetGPR, gpr_gs, "gs" , NULL , Uint, Hex, GPR_SIZE(gs), GPR_OFFSET(gs) , -1 , -1 , -1 , gdb_gs } 806}; 807 808 809const DNBRegisterInfo 810DNBArchImplI386::g_fpu_registers_no_avx[] = 811{ 812{ e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , FPU_OFFSET(fcw) , -1, -1, -1, -1 }, 813{ e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , FPU_OFFSET(fsw) , -1, -1, -1, -1 }, 814{ e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , FPU_OFFSET(ftw) , -1, -1, -1, -1 }, 815{ e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , FPU_OFFSET(fop) , -1, -1, -1, -1 }, 816{ e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , FPU_OFFSET(ip) , -1, -1, -1, -1 }, 817{ e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , FPU_OFFSET(cs) , -1, -1, -1, -1 }, 818{ e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , FPU_OFFSET(dp) , -1, -1, -1, -1 }, 819{ e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , FPU_OFFSET(ds) , -1, -1, -1, -1 }, 820{ e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , FPU_OFFSET(mxcsr) , -1, -1, -1, -1 }, 821{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1, -1, -1, -1 }, 822 823{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), -1, dwarf_stmm0, -1, gdb_stmm0 }, 824{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), -1, dwarf_stmm1, -1, gdb_stmm1 }, 825{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), -1, dwarf_stmm2, -1, gdb_stmm2 }, 826{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), -1, dwarf_stmm3, -1, gdb_stmm3 }, 827{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), -1, dwarf_stmm4, -1, gdb_stmm4 }, 828{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), -1, dwarf_stmm5, -1, gdb_stmm5 }, 829{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), -1, dwarf_stmm6, -1, gdb_stmm6 }, 830{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), -1, dwarf_stmm7, -1, gdb_stmm7 }, 831 832{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), -1, dwarf_xmm0, -1, gdb_xmm0 }, 833{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), -1, dwarf_xmm1, -1, gdb_xmm1 }, 834{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), -1, dwarf_xmm2, -1, gdb_xmm2 }, 835{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), -1, dwarf_xmm3, -1, gdb_xmm3 }, 836{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), -1, dwarf_xmm4, -1, gdb_xmm4 }, 837{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), -1, dwarf_xmm5, -1, gdb_xmm5 }, 838{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), -1, dwarf_xmm6, -1, gdb_xmm6 }, 839{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), -1, dwarf_xmm7, -1, gdb_xmm7 } 840}; 841 842const DNBRegisterInfo 843DNBArchImplI386::g_fpu_registers_avx[] = 844{ 845{ e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , AVX_OFFSET(fcw) , -1, -1, -1, -1 }, 846{ e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , AVX_OFFSET(fsw) , -1, -1, -1, -1 }, 847{ e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , AVX_OFFSET(ftw) , -1, -1, -1, -1 }, 848{ e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , AVX_OFFSET(fop) , -1, -1, -1, -1 }, 849{ e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , AVX_OFFSET(ip) , -1, -1, -1, -1 }, 850{ e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , AVX_OFFSET(cs) , -1, -1, -1, -1 }, 851{ e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , AVX_OFFSET(dp) , -1, -1, -1, -1 }, 852{ e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , AVX_OFFSET(ds) , -1, -1, -1, -1 }, 853{ e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , AVX_OFFSET(mxcsr) , -1, -1, -1, -1 }, 854{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1, -1, -1, -1 }, 855 856{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), -1, dwarf_stmm0, -1, gdb_stmm0 }, 857{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), -1, dwarf_stmm1, -1, gdb_stmm1 }, 858{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), -1, dwarf_stmm2, -1, gdb_stmm2 }, 859{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), -1, dwarf_stmm3, -1, gdb_stmm3 }, 860{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), -1, dwarf_stmm4, -1, gdb_stmm4 }, 861{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), -1, dwarf_stmm5, -1, gdb_stmm5 }, 862{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), -1, dwarf_stmm6, -1, gdb_stmm6 }, 863{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), -1, dwarf_stmm7, -1, gdb_stmm7 }, 864 865{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), AVX_OFFSET(xmm0), -1, dwarf_xmm0, -1, gdb_xmm0 }, 866{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), AVX_OFFSET(xmm1), -1, dwarf_xmm1, -1, gdb_xmm1 }, 867{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), AVX_OFFSET(xmm2), -1, dwarf_xmm2, -1, gdb_xmm2 }, 868{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), AVX_OFFSET(xmm3), -1, dwarf_xmm3, -1, gdb_xmm3 }, 869{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), AVX_OFFSET(xmm4), -1, dwarf_xmm4, -1, gdb_xmm4 }, 870{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), AVX_OFFSET(xmm5), -1, dwarf_xmm5, -1, gdb_xmm5 }, 871{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), AVX_OFFSET(xmm6), -1, dwarf_xmm6, -1, gdb_xmm6 }, 872{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), AVX_OFFSET(xmm7), -1, dwarf_xmm7, -1, gdb_xmm7 }, 873 874{ e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), -1, dwarf_ymm0, -1, gdb_ymm0 }, 875{ e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), -1, dwarf_ymm1, -1, gdb_ymm1 }, 876{ e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), -1, dwarf_ymm2, -1, gdb_ymm2 }, 877{ e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), -1, dwarf_ymm3, -1, gdb_ymm3 }, 878{ e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), -1, dwarf_ymm4, -1, gdb_ymm4 }, 879{ e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), -1, dwarf_ymm5, -1, gdb_ymm5 }, 880{ e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), -1, dwarf_ymm6, -1, gdb_ymm6 }, 881{ e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), -1, dwarf_ymm7, -1, gdb_ymm7 }, 882}; 883 884const DNBRegisterInfo 885DNBArchImplI386::g_exc_registers[] = 886{ 887{ e_regSetEXC, exc_trapno, "trapno" , NULL, Uint, Hex, EXC_SIZE (trapno) , EXC_OFFSET (trapno) , -1, -1, -1, -1 }, 888{ e_regSetEXC, exc_err, "err" , NULL, Uint, Hex, EXC_SIZE (err) , EXC_OFFSET (err) , -1, -1, -1, -1 }, 889{ e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr) , -1, -1, -1, -1 } 890}; 891 892// Number of registers in each register set 893const size_t DNBArchImplI386::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo); 894const size_t DNBArchImplI386::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo); 895const size_t DNBArchImplI386::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo); 896const size_t DNBArchImplI386::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo); 897const size_t DNBArchImplI386::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 898const size_t DNBArchImplI386::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 899 900//---------------------------------------------------------------------- 901// Register set definitions. The first definitions at register set index 902// of zero is for all registers, followed by other registers sets. The 903// register information for the all register set need not be filled in. 904//---------------------------------------------------------------------- 905const DNBRegisterSetInfo 906DNBArchImplI386::g_reg_sets_no_avx[] = 907{ 908 { "i386 Registers", NULL, k_num_all_registers_no_avx }, 909 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 910 { "Floating Point Registers", g_fpu_registers_no_avx, k_num_fpu_registers_no_avx }, 911 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 912}; 913 914const DNBRegisterSetInfo 915DNBArchImplI386::g_reg_sets_avx[] = 916{ 917 { "i386 Registers", NULL, k_num_all_registers_avx }, 918 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 919 { "Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx }, 920 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 921}; 922 923// Total number of register sets for this architecture 924const size_t DNBArchImplI386::k_num_register_sets = sizeof(g_reg_sets_no_avx)/sizeof(DNBRegisterSetInfo); 925 926DNBArchProtocol * 927DNBArchImplI386::Create (MachThread *thread) 928{ 929 return new DNBArchImplI386 (thread); 930} 931 932const uint8_t * const 933DNBArchImplI386::SoftwareBreakpointOpcode (nub_size_t byte_size) 934{ 935 static const uint8_t g_breakpoint_opcode[] = { 0xCC }; 936 if (byte_size == 1) 937 return g_breakpoint_opcode; 938 return NULL; 939} 940 941const DNBRegisterSetInfo * 942DNBArchImplI386::GetRegisterSetInfo(nub_size_t *num_reg_sets) 943{ 944 *num_reg_sets = k_num_register_sets; 945 if (CPUHasAVX() || FORCE_AVX_REGS) 946 return g_reg_sets_avx; 947 else 948 return g_reg_sets_no_avx; 949} 950 951 952void 953DNBArchImplI386::Initialize() 954{ 955 DNBArchPluginInfo arch_plugin_info = 956 { 957 CPU_TYPE_I386, 958 DNBArchImplI386::Create, 959 DNBArchImplI386::GetRegisterSetInfo, 960 DNBArchImplI386::SoftwareBreakpointOpcode 961 }; 962 963 // Register this arch plug-in with the main protocol class 964 DNBArchProtocol::RegisterArchPlugin (arch_plugin_info); 965} 966 967bool 968DNBArchImplI386::GetRegisterValue(int set, int reg, DNBRegisterValue *value) 969{ 970 if (set == REGISTER_SET_GENERIC) 971 { 972 switch (reg) 973 { 974 case GENERIC_REGNUM_PC: // Program Counter 975 set = e_regSetGPR; 976 reg = gpr_eip; 977 break; 978 979 case GENERIC_REGNUM_SP: // Stack Pointer 980 set = e_regSetGPR; 981 reg = gpr_esp; 982 break; 983 984 case GENERIC_REGNUM_FP: // Frame Pointer 985 set = e_regSetGPR; 986 reg = gpr_ebp; 987 break; 988 989 case GENERIC_REGNUM_FLAGS: // Processor flags register 990 set = e_regSetGPR; 991 reg = gpr_eflags; 992 break; 993 994 case GENERIC_REGNUM_RA: // Return Address 995 default: 996 return false; 997 } 998 } 999 1000 if (GetRegisterState(set, false) != KERN_SUCCESS) 1001 return false; 1002 1003 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1004 if (regInfo) 1005 { 1006 value->info = *regInfo; 1007 switch (set) 1008 { 1009 case e_regSetGPR: 1010 if (reg < k_num_gpr_registers) 1011 { 1012 value->value.uint32 = ((uint32_t*)(&m_state.context.gpr))[reg]; 1013 return true; 1014 } 1015 break; 1016 1017 case e_regSetFPU: 1018 if (CPUHasAVX() || FORCE_AVX_REGS) 1019 { 1020 switch (reg) 1021 { 1022 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); return true; 1023 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); return true; 1024 case fpu_ftw: value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; return true; 1025 case fpu_fop: value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; return true; 1026 case fpu_ip: value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; return true; 1027 case fpu_cs: value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; return true; 1028 case fpu_dp: value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; return true; 1029 case fpu_ds: value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; return true; 1030 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; return true; 1031 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; return true; 1032 1033 case fpu_stmm0: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, 10); return true; 1034 case fpu_stmm1: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, 10); return true; 1035 case fpu_stmm2: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, 10); return true; 1036 case fpu_stmm3: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, 10); return true; 1037 case fpu_stmm4: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, 10); return true; 1038 case fpu_stmm5: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, 10); return true; 1039 case fpu_stmm6: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, 10); return true; 1040 case fpu_stmm7: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, 10); return true; 1041 1042 case fpu_xmm0: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, 16); return true; 1043 case fpu_xmm1: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, 16); return true; 1044 case fpu_xmm2: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, 16); return true; 1045 case fpu_xmm3: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, 16); return true; 1046 case fpu_xmm4: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, 16); return true; 1047 case fpu_xmm5: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, 16); return true; 1048 case fpu_xmm6: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, 16); return true; 1049 case fpu_xmm7: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, 16); return true; 1050 1051#define MEMCPY_YMM(n) \ 1052 memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, 16); \ 1053 memcpy((&value->value.uint8) + 16, m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, 16); 1054 case fpu_ymm0: MEMCPY_YMM(0); return true; 1055 case fpu_ymm1: MEMCPY_YMM(1); return true; 1056 case fpu_ymm2: MEMCPY_YMM(2); return true; 1057 case fpu_ymm3: MEMCPY_YMM(3); return true; 1058 case fpu_ymm4: MEMCPY_YMM(4); return true; 1059 case fpu_ymm5: MEMCPY_YMM(5); return true; 1060 case fpu_ymm6: MEMCPY_YMM(6); return true; 1061 case fpu_ymm7: MEMCPY_YMM(7); return true; 1062#undef MEMCPY_YMM 1063 } 1064 } 1065 else 1066 { 1067 switch (reg) 1068 { 1069 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); return true; 1070 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); return true; 1071 case fpu_ftw: value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; return true; 1072 case fpu_fop: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; return true; 1073 case fpu_ip: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; return true; 1074 case fpu_cs: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; return true; 1075 case fpu_dp: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; return true; 1076 case fpu_ds: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; return true; 1077 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; return true; 1078 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; return true; 1079 1080 case fpu_stmm0: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 10); return true; 1081 case fpu_stmm1: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 10); return true; 1082 case fpu_stmm2: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 10); return true; 1083 case fpu_stmm3: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 10); return true; 1084 case fpu_stmm4: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 10); return true; 1085 case fpu_stmm5: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 10); return true; 1086 case fpu_stmm6: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 10); return true; 1087 case fpu_stmm7: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 10); return true; 1088 1089 case fpu_xmm0: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 16); return true; 1090 case fpu_xmm1: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 16); return true; 1091 case fpu_xmm2: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 16); return true; 1092 case fpu_xmm3: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 16); return true; 1093 case fpu_xmm4: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 16); return true; 1094 case fpu_xmm5: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 16); return true; 1095 case fpu_xmm6: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 16); return true; 1096 case fpu_xmm7: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 16); return true; 1097 } 1098 } 1099 break; 1100 1101 case e_regSetEXC: 1102 if (reg < k_num_exc_registers) 1103 { 1104 value->value.uint32 = (&m_state.context.exc.__trapno)[reg]; 1105 return true; 1106 } 1107 break; 1108 } 1109 } 1110 return false; 1111} 1112 1113 1114bool 1115DNBArchImplI386::SetRegisterValue(int set, int reg, const DNBRegisterValue *value) 1116{ 1117 if (set == REGISTER_SET_GENERIC) 1118 { 1119 switch (reg) 1120 { 1121 case GENERIC_REGNUM_PC: // Program Counter 1122 set = e_regSetGPR; 1123 reg = gpr_eip; 1124 break; 1125 1126 case GENERIC_REGNUM_SP: // Stack Pointer 1127 set = e_regSetGPR; 1128 reg = gpr_esp; 1129 break; 1130 1131 case GENERIC_REGNUM_FP: // Frame Pointer 1132 set = e_regSetGPR; 1133 reg = gpr_ebp; 1134 break; 1135 1136 case GENERIC_REGNUM_FLAGS: // Processor flags register 1137 set = e_regSetGPR; 1138 reg = gpr_eflags; 1139 break; 1140 1141 case GENERIC_REGNUM_RA: // Return Address 1142 default: 1143 return false; 1144 } 1145 } 1146 1147 if (GetRegisterState(set, false) != KERN_SUCCESS) 1148 return false; 1149 1150 bool success = false; 1151 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1152 if (regInfo) 1153 { 1154 switch (set) 1155 { 1156 case e_regSetGPR: 1157 if (reg < k_num_gpr_registers) 1158 { 1159 ((uint32_t*)(&m_state.context.gpr))[reg] = value->value.uint32; 1160 success = true; 1161 } 1162 break; 1163 1164 case e_regSetFPU: 1165 if (CPUHasAVX() || FORCE_AVX_REGS) 1166 { 1167 switch (reg) 1168 { 1169 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1170 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1171 case fpu_ftw: m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; success = true; break; 1172 case fpu_fop: m_state.context.fpu.avx.__fpu_fop = value->value.uint16; success = true; break; 1173 case fpu_ip: m_state.context.fpu.avx.__fpu_ip = value->value.uint32; success = true; break; 1174 case fpu_cs: m_state.context.fpu.avx.__fpu_cs = value->value.uint16; success = true; break; 1175 case fpu_dp: m_state.context.fpu.avx.__fpu_dp = value->value.uint32; success = true; break; 1176 case fpu_ds: m_state.context.fpu.avx.__fpu_ds = value->value.uint16; success = true; break; 1177 case fpu_mxcsr: m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1178 case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1179 1180 case fpu_stmm0: memcpy (m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10); success = true; break; 1181 case fpu_stmm1: memcpy (m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10); success = true; break; 1182 case fpu_stmm2: memcpy (m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10); success = true; break; 1183 case fpu_stmm3: memcpy (m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10); success = true; break; 1184 case fpu_stmm4: memcpy (m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10); success = true; break; 1185 case fpu_stmm5: memcpy (m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10); success = true; break; 1186 case fpu_stmm6: memcpy (m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10); success = true; break; 1187 case fpu_stmm7: memcpy (m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10); success = true; break; 1188 1189 case fpu_xmm0: memcpy(m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16); success = true; break; 1190 case fpu_xmm1: memcpy(m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16); success = true; break; 1191 case fpu_xmm2: memcpy(m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16); success = true; break; 1192 case fpu_xmm3: memcpy(m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16); success = true; break; 1193 case fpu_xmm4: memcpy(m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16); success = true; break; 1194 case fpu_xmm5: memcpy(m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16); success = true; break; 1195 case fpu_xmm6: memcpy(m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16); success = true; break; 1196 case fpu_xmm7: memcpy(m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16); success = true; break; 1197 1198#define MEMCPY_YMM(n) \ 1199 memcpy(m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, &value->value.uint8, 16); \ 1200 memcpy(m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, (&value->value.uint8) + 16, 16); 1201 case fpu_ymm0: MEMCPY_YMM(0); return true; 1202 case fpu_ymm1: MEMCPY_YMM(1); return true; 1203 case fpu_ymm2: MEMCPY_YMM(2); return true; 1204 case fpu_ymm3: MEMCPY_YMM(3); return true; 1205 case fpu_ymm4: MEMCPY_YMM(4); return true; 1206 case fpu_ymm5: MEMCPY_YMM(5); return true; 1207 case fpu_ymm6: MEMCPY_YMM(6); return true; 1208 case fpu_ymm7: MEMCPY_YMM(7); return true; 1209#undef MEMCPY_YMM 1210 } 1211 } 1212 else 1213 { 1214 switch (reg) 1215 { 1216 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1217 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1218 case fpu_ftw: m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; success = true; break; 1219 case fpu_fop: m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; success = true; break; 1220 case fpu_ip: m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; success = true; break; 1221 case fpu_cs: m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; success = true; break; 1222 case fpu_dp: m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; success = true; break; 1223 case fpu_ds: m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; success = true; break; 1224 case fpu_mxcsr: m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1225 case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1226 1227 case fpu_stmm0: memcpy (m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10); success = true; break; 1228 case fpu_stmm1: memcpy (m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10); success = true; break; 1229 case fpu_stmm2: memcpy (m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10); success = true; break; 1230 case fpu_stmm3: memcpy (m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10); success = true; break; 1231 case fpu_stmm4: memcpy (m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10); success = true; break; 1232 case fpu_stmm5: memcpy (m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10); success = true; break; 1233 case fpu_stmm6: memcpy (m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10); success = true; break; 1234 case fpu_stmm7: memcpy (m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10); success = true; break; 1235 1236 case fpu_xmm0: memcpy(m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16); success = true; break; 1237 case fpu_xmm1: memcpy(m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16); success = true; break; 1238 case fpu_xmm2: memcpy(m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16); success = true; break; 1239 case fpu_xmm3: memcpy(m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16); success = true; break; 1240 case fpu_xmm4: memcpy(m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16); success = true; break; 1241 case fpu_xmm5: memcpy(m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16); success = true; break; 1242 case fpu_xmm6: memcpy(m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16); success = true; break; 1243 case fpu_xmm7: memcpy(m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16); success = true; break; 1244 } 1245 } 1246 break; 1247 1248 case e_regSetEXC: 1249 if (reg < k_num_exc_registers) 1250 { 1251 (&m_state.context.exc.__trapno)[reg] = value->value.uint32; 1252 success = true; 1253 } 1254 break; 1255 } 1256 } 1257 1258 if (success) 1259 return SetRegisterState(set) == KERN_SUCCESS; 1260 return false; 1261} 1262 1263 1264nub_size_t 1265DNBArchImplI386::GetRegisterContext (void *buf, nub_size_t buf_len) 1266{ 1267 nub_size_t size = sizeof (m_state.context); 1268 1269 if (buf && buf_len) 1270 { 1271 if (size > buf_len) 1272 size = buf_len; 1273 1274 bool force = false; 1275 if (GetGPRState(force) | GetFPUState(force) | GetEXCState(force)) 1276 return 0; 1277 ::memcpy (buf, &m_state.context, size); 1278 } 1279 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size); 1280 // Return the size of the register context even if NULL was passed in 1281 return size; 1282} 1283 1284nub_size_t 1285DNBArchImplI386::SetRegisterContext (const void *buf, nub_size_t buf_len) 1286{ 1287 nub_size_t size = sizeof (m_state.context); 1288 if (buf == NULL || buf_len == 0) 1289 size = 0; 1290 1291 if (size) 1292 { 1293 if (size > buf_len) 1294 size = buf_len; 1295 1296 ::memcpy (&m_state.context, buf, size); 1297 SetGPRState(); 1298 SetFPUState(); 1299 SetEXCState(); 1300 } 1301 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size); 1302 return size; 1303} 1304 1305 1306 1307kern_return_t 1308DNBArchImplI386::GetRegisterState(int set, bool force) 1309{ 1310 switch (set) 1311 { 1312 case e_regSetALL: return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 1313 case e_regSetGPR: return GetGPRState(force); 1314 case e_regSetFPU: return GetFPUState(force); 1315 case e_regSetEXC: return GetEXCState(force); 1316 default: break; 1317 } 1318 return KERN_INVALID_ARGUMENT; 1319} 1320 1321kern_return_t 1322DNBArchImplI386::SetRegisterState(int set) 1323{ 1324 // Make sure we have a valid context to set. 1325 if (RegisterSetStateIsValid(set)) 1326 { 1327 switch (set) 1328 { 1329 case e_regSetALL: return SetGPRState() | SetFPUState() | SetEXCState(); 1330 case e_regSetGPR: return SetGPRState(); 1331 case e_regSetFPU: return SetFPUState(); 1332 case e_regSetEXC: return SetEXCState(); 1333 default: break; 1334 } 1335 } 1336 return KERN_INVALID_ARGUMENT; 1337} 1338 1339bool 1340DNBArchImplI386::RegisterSetStateIsValid (int set) const 1341{ 1342 return m_state.RegsAreValid(set); 1343} 1344 1345#endif // #if defined (__i386__) 1346