DNBArchImplI386.cpp revision 44eb9fb021023027159df55f91c3e95384088970
1//===-- DNBArchImplI386.cpp -------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Created by Greg Clayton on 6/25/07. 11// 12//===----------------------------------------------------------------------===// 13 14#if defined (__i386__) || defined (__x86_64__) 15 16#include <sys/cdefs.h> 17 18#include "MacOSX/i386/DNBArchImplI386.h" 19#include "DNBLog.h" 20#include "MachThread.h" 21#include "MachProcess.h" 22 23extern "C" bool CPUHasAVX(); // Defined over in DNBArchImplX86_64.cpp 24 25#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG) 26enum debugState { 27 debugStateUnknown, 28 debugStateOff, 29 debugStateOn 30}; 31 32static debugState sFPUDebugState = debugStateUnknown; 33static debugState sAVXForceState = debugStateUnknown; 34 35static bool DebugFPURegs () 36{ 37 if (sFPUDebugState == debugStateUnknown) 38 { 39 if (getenv("DNB_DEBUG_FPU_REGS")) 40 sFPUDebugState = debugStateOn; 41 else 42 sFPUDebugState = debugStateOff; 43 } 44 45 return (sFPUDebugState == debugStateOn); 46} 47 48static bool ForceAVXRegs () 49{ 50 if (sFPUDebugState == debugStateUnknown) 51 { 52 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 53 sAVXForceState = debugStateOn; 54 else 55 sAVXForceState = debugStateOff; 56 } 57 58 return (sAVXForceState == debugStateOn); 59} 60 61#define DEBUG_FPU_REGS (DebugFPURegs()) 62#define FORCE_AVX_REGS (ForceAVXRegs()) 63#else 64#define DEBUG_FPU_REGS (0) 65#define FORCE_AVX_REGS (0) 66#endif 67 68enum 69{ 70 gpr_eax = 0, 71 gpr_ebx = 1, 72 gpr_ecx = 2, 73 gpr_edx = 3, 74 gpr_edi = 4, 75 gpr_esi = 5, 76 gpr_ebp = 6, 77 gpr_esp = 7, 78 gpr_ss = 8, 79 gpr_eflags = 9, 80 gpr_eip = 10, 81 gpr_cs = 11, 82 gpr_ds = 12, 83 gpr_es = 13, 84 gpr_fs = 14, 85 gpr_gs = 15, 86 gpr_ax , 87 gpr_bx , 88 gpr_cx , 89 gpr_dx , 90 gpr_di , 91 gpr_si , 92 gpr_bp , 93 gpr_sp , 94 gpr_ah , 95 gpr_bh , 96 gpr_ch , 97 gpr_dh , 98 gpr_al , 99 gpr_bl , 100 gpr_cl , 101 gpr_dl , 102 gpr_dil, 103 gpr_sil, 104 gpr_bpl, 105 gpr_spl, 106 k_num_gpr_regs 107}; 108 109enum { 110 fpu_fcw, 111 fpu_fsw, 112 fpu_ftw, 113 fpu_fop, 114 fpu_ip, 115 fpu_cs, 116 fpu_dp, 117 fpu_ds, 118 fpu_mxcsr, 119 fpu_mxcsrmask, 120 fpu_stmm0, 121 fpu_stmm1, 122 fpu_stmm2, 123 fpu_stmm3, 124 fpu_stmm4, 125 fpu_stmm5, 126 fpu_stmm6, 127 fpu_stmm7, 128 fpu_xmm0, 129 fpu_xmm1, 130 fpu_xmm2, 131 fpu_xmm3, 132 fpu_xmm4, 133 fpu_xmm5, 134 fpu_xmm6, 135 fpu_xmm7, 136 fpu_ymm0, 137 fpu_ymm1, 138 fpu_ymm2, 139 fpu_ymm3, 140 fpu_ymm4, 141 fpu_ymm5, 142 fpu_ymm6, 143 fpu_ymm7, 144 k_num_fpu_regs, 145 146 // Aliases 147 fpu_fctrl = fpu_fcw, 148 fpu_fstat = fpu_fsw, 149 fpu_ftag = fpu_ftw, 150 fpu_fiseg = fpu_cs, 151 fpu_fioff = fpu_ip, 152 fpu_foseg = fpu_ds, 153 fpu_fooff = fpu_dp 154}; 155 156enum { 157 exc_trapno, 158 exc_err, 159 exc_faultvaddr, 160 k_num_exc_regs, 161}; 162 163 164enum 165{ 166 gcc_eax = 0, 167 gcc_ecx, 168 gcc_edx, 169 gcc_ebx, 170 gcc_ebp, 171 gcc_esp, 172 gcc_esi, 173 gcc_edi, 174 gcc_eip, 175 gcc_eflags 176}; 177 178enum 179{ 180 dwarf_eax = 0, 181 dwarf_ecx, 182 dwarf_edx, 183 dwarf_ebx, 184 dwarf_esp, 185 dwarf_ebp, 186 dwarf_esi, 187 dwarf_edi, 188 dwarf_eip, 189 dwarf_eflags, 190 dwarf_stmm0 = 11, 191 dwarf_stmm1, 192 dwarf_stmm2, 193 dwarf_stmm3, 194 dwarf_stmm4, 195 dwarf_stmm5, 196 dwarf_stmm6, 197 dwarf_stmm7, 198 dwarf_xmm0 = 21, 199 dwarf_xmm1, 200 dwarf_xmm2, 201 dwarf_xmm3, 202 dwarf_xmm4, 203 dwarf_xmm5, 204 dwarf_xmm6, 205 dwarf_xmm7, 206 dwarf_ymm0 = dwarf_xmm0, 207 dwarf_ymm1 = dwarf_xmm1, 208 dwarf_ymm2 = dwarf_xmm2, 209 dwarf_ymm3 = dwarf_xmm3, 210 dwarf_ymm4 = dwarf_xmm4, 211 dwarf_ymm5 = dwarf_xmm5, 212 dwarf_ymm6 = dwarf_xmm6, 213 dwarf_ymm7 = dwarf_xmm7, 214}; 215 216enum 217{ 218 gdb_eax = 0, 219 gdb_ecx = 1, 220 gdb_edx = 2, 221 gdb_ebx = 3, 222 gdb_esp = 4, 223 gdb_ebp = 5, 224 gdb_esi = 6, 225 gdb_edi = 7, 226 gdb_eip = 8, 227 gdb_eflags = 9, 228 gdb_cs = 10, 229 gdb_ss = 11, 230 gdb_ds = 12, 231 gdb_es = 13, 232 gdb_fs = 14, 233 gdb_gs = 15, 234 gdb_stmm0 = 16, 235 gdb_stmm1 = 17, 236 gdb_stmm2 = 18, 237 gdb_stmm3 = 19, 238 gdb_stmm4 = 20, 239 gdb_stmm5 = 21, 240 gdb_stmm6 = 22, 241 gdb_stmm7 = 23, 242 gdb_fctrl = 24, gdb_fcw = gdb_fctrl, 243 gdb_fstat = 25, gdb_fsw = gdb_fstat, 244 gdb_ftag = 26, gdb_ftw = gdb_ftag, 245 gdb_fiseg = 27, gdb_fpu_cs = gdb_fiseg, 246 gdb_fioff = 28, gdb_ip = gdb_fioff, 247 gdb_foseg = 29, gdb_fpu_ds = gdb_foseg, 248 gdb_fooff = 30, gdb_dp = gdb_fooff, 249 gdb_fop = 31, 250 gdb_xmm0 = 32, 251 gdb_xmm1 = 33, 252 gdb_xmm2 = 34, 253 gdb_xmm3 = 35, 254 gdb_xmm4 = 36, 255 gdb_xmm5 = 37, 256 gdb_xmm6 = 38, 257 gdb_xmm7 = 39, 258 gdb_mxcsr = 40, 259 gdb_mm0 = 41, 260 gdb_mm1 = 42, 261 gdb_mm2 = 43, 262 gdb_mm3 = 44, 263 gdb_mm4 = 45, 264 gdb_mm5 = 46, 265 gdb_mm6 = 47, 266 gdb_mm7 = 48, 267 gdb_ymm0 = gdb_xmm0, 268 gdb_ymm1 = gdb_xmm1, 269 gdb_ymm2 = gdb_xmm2, 270 gdb_ymm3 = gdb_xmm3, 271 gdb_ymm4 = gdb_xmm4, 272 gdb_ymm5 = gdb_xmm5, 273 gdb_ymm6 = gdb_xmm6, 274 gdb_ymm7 = gdb_xmm7 275}; 276 277uint64_t 278DNBArchImplI386::GetPC(uint64_t failValue) 279{ 280 // Get program counter 281 if (GetGPRState(false) == KERN_SUCCESS) 282 return m_state.context.gpr.__eip; 283 return failValue; 284} 285 286kern_return_t 287DNBArchImplI386::SetPC(uint64_t value) 288{ 289 // Get program counter 290 kern_return_t err = GetGPRState(false); 291 if (err == KERN_SUCCESS) 292 { 293 m_state.context.gpr.__eip = value; 294 err = SetGPRState(); 295 } 296 return err == KERN_SUCCESS; 297} 298 299uint64_t 300DNBArchImplI386::GetSP(uint64_t failValue) 301{ 302 // Get stack pointer 303 if (GetGPRState(false) == KERN_SUCCESS) 304 return m_state.context.gpr.__esp; 305 return failValue; 306} 307 308// Uncomment the value below to verify the values in the debugger. 309//#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 310//#define SET_GPR(reg) m_state.context.gpr.__##reg = gpr_##reg 311 312kern_return_t 313DNBArchImplI386::GetGPRState(bool force) 314{ 315 if (force || m_state.GetError(e_regSetGPR, Read)) 316 { 317#if DEBUG_GPR_VALUES 318 SET_GPR(eax); 319 SET_GPR(ebx); 320 SET_GPR(ecx); 321 SET_GPR(edx); 322 SET_GPR(edi); 323 SET_GPR(esi); 324 SET_GPR(ebp); 325 SET_GPR(esp); 326 SET_GPR(ss); 327 SET_GPR(eflags); 328 SET_GPR(eip); 329 SET_GPR(cs); 330 SET_GPR(ds); 331 SET_GPR(es); 332 SET_GPR(fs); 333 SET_GPR(gs); 334 m_state.SetError(e_regSetGPR, Read, 0); 335#else 336 mach_msg_type_number_t count = e_regSetWordSizeGPR; 337 m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count)); 338#endif 339 } 340 return m_state.GetError(e_regSetGPR, Read); 341} 342 343// Uncomment the value below to verify the values in the debugger. 344//#define DEBUG_FPU_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 345 346kern_return_t 347DNBArchImplI386::GetFPUState(bool force) 348{ 349 if (force || m_state.GetError(e_regSetFPU, Read)) 350 { 351 if (DEBUG_FPU_REGS) 352 { 353 if (CPUHasAVX() || FORCE_AVX_REGS) 354 { 355 m_state.context.fpu.avx.__fpu_reserved[0] = -1; 356 m_state.context.fpu.avx.__fpu_reserved[1] = -1; 357 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234; 358 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678; 359 m_state.context.fpu.avx.__fpu_ftw = 1; 360 m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX; 361 m_state.context.fpu.avx.__fpu_fop = 2; 362 m_state.context.fpu.avx.__fpu_ip = 3; 363 m_state.context.fpu.avx.__fpu_cs = 4; 364 m_state.context.fpu.avx.__fpu_rsrv2 = 5; 365 m_state.context.fpu.avx.__fpu_dp = 6; 366 m_state.context.fpu.avx.__fpu_ds = 7; 367 m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX; 368 m_state.context.fpu.avx.__fpu_mxcsr = 8; 369 m_state.context.fpu.avx.__fpu_mxcsrmask = 9; 370 int i; 371 for (i=0; i<16; ++i) 372 { 373 if (i<10) 374 { 375 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 376 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 377 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 378 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 379 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 380 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 381 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 382 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 383 } 384 else 385 { 386 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 387 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 388 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 389 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 390 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 391 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 392 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 393 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 394 } 395 396 m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0'; 397 m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1'; 398 m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2'; 399 m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3'; 400 m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4'; 401 m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5'; 402 m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6'; 403 m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7'; 404 } 405 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 406 m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN; 407 m_state.context.fpu.avx.__fpu_reserved1 = -1; 408 for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i) 409 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 410 411 for (i = 0; i < 16; ++i) 412 { 413 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0'; 414 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1'; 415 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2'; 416 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3'; 417 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4'; 418 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5'; 419 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6'; 420 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7'; 421 } 422 } 423 else 424 { 425 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 426 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 427 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 428 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 429 m_state.context.fpu.no_avx.__fpu_ftw = 1; 430 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 431 m_state.context.fpu.no_avx.__fpu_fop = 2; 432 m_state.context.fpu.no_avx.__fpu_ip = 3; 433 m_state.context.fpu.no_avx.__fpu_cs = 4; 434 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 435 m_state.context.fpu.no_avx.__fpu_dp = 6; 436 m_state.context.fpu.no_avx.__fpu_ds = 7; 437 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 438 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 439 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 440 int i; 441 for (i=0; i<16; ++i) 442 { 443 if (i<10) 444 { 445 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 446 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 447 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 448 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 449 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 450 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 451 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 452 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 453 } 454 else 455 { 456 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 457 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 458 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 459 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 460 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 461 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 462 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 463 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 464 } 465 466 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 467 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 468 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 469 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 470 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 471 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 472 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 473 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 474 } 475 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 476 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 477 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 478 } 479 m_state.SetError(e_regSetFPU, Read, 0); 480 } 481 else 482 { 483 if (CPUHasAVX() || FORCE_AVX_REGS) 484 { 485 mach_msg_type_number_t count = e_regSetWordSizeAVX; 486 m_state.SetError (e_regSetFPU, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count)); 487 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in)) => 0x%8.8x", 488 m_thread->MachPortNumber(), __i386_AVX_STATE, count, e_regSetWordSizeAVX, 489 m_state.GetError(e_regSetFPU, Read)); 490 } 491 else 492 { 493 mach_msg_type_number_t count = e_regSetWordSizeFPU; 494 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count)); 495 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x", 496 m_thread->MachPortNumber(), __i386_FLOAT_STATE, count, e_regSetWordSizeFPU, 497 m_state.GetError(e_regSetFPU, Read)); 498 } 499 } 500 } 501 return m_state.GetError(e_regSetFPU, Read); 502} 503 504kern_return_t 505DNBArchImplI386::GetEXCState(bool force) 506{ 507 if (force || m_state.GetError(e_regSetEXC, Read)) 508 { 509 mach_msg_type_number_t count = e_regSetWordSizeEXC; 510 m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count)); 511 } 512 return m_state.GetError(e_regSetEXC, Read); 513} 514 515kern_return_t 516DNBArchImplI386::SetGPRState() 517{ 518 m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR)); 519 return m_state.GetError(e_regSetGPR, Write); 520} 521 522kern_return_t 523DNBArchImplI386::SetFPUState() 524{ 525 if (DEBUG_FPU_REGS) 526 { 527 m_state.SetError(e_regSetFPU, Write, 0); 528 return m_state.GetError(e_regSetFPU, Write); 529 } 530 else 531 { 532 if (CPUHasAVX() || FORCE_AVX_REGS) 533 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX)); 534 else 535 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU)); 536 return m_state.GetError(e_regSetFPU, Write); 537 } 538} 539 540kern_return_t 541DNBArchImplI386::SetEXCState() 542{ 543 m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC)); 544 return m_state.GetError(e_regSetEXC, Write); 545} 546 547kern_return_t 548DNBArchImplI386::GetDBGState(bool force) 549{ 550 if (force || m_state.GetError(e_regSetDBG, Read)) 551 { 552 mach_msg_type_number_t count = e_regSetWordSizeDBG; 553 m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count)); 554 } 555 return m_state.GetError(e_regSetDBG, Read); 556} 557 558kern_return_t 559DNBArchImplI386::SetDBGState() 560{ 561 m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG)); 562 return m_state.GetError(e_regSetDBG, Write); 563} 564 565void 566DNBArchImplI386::ThreadWillResume() 567{ 568 // Do we need to step this thread? If so, let the mach thread tell us so. 569 if (m_thread->IsStepping()) 570 { 571 // This is the primary thread, let the arch do anything it needs 572 EnableHardwareSingleStep(true); 573 } 574 575 // Reset the debug status register, if necessary, before we resume. 576 kern_return_t kret = GetDBGState(false); 577 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret); 578 if (kret != KERN_SUCCESS) 579 return; 580 581 DBG &debug_state = m_state.context.dbg; 582 bool need_reset = false; 583 uint32_t i, num = NumSupportedHardwareWatchpoints(); 584 for (i = 0; i < num; ++i) 585 if (IsWatchpointHit(debug_state, i)) 586 need_reset = true; 587 588 if (need_reset) 589 { 590 ClearWatchpointHits(debug_state); 591 kret = SetDBGState(); 592 DNBLogThreadedIf(LOG_WATCHPOINTS,"DNBArchImplI386::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret); 593 } 594} 595 596bool 597DNBArchImplI386::ThreadDidStop() 598{ 599 bool success = true; 600 601 m_state.InvalidateAllRegisterStates(); 602 603 // Are we stepping a single instruction? 604 if (GetGPRState(true) == KERN_SUCCESS) 605 { 606 // We are single stepping, was this the primary thread? 607 if (m_thread->IsStepping()) 608 { 609 // This was the primary thread, we need to clear the trace 610 // bit if so. 611 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 612 } 613 else 614 { 615 // The MachThread will automatically restore the suspend count 616 // in ThreadDidStop(), so we don't need to do anything here if 617 // we weren't the primary thread the last time 618 } 619 } 620 return success; 621} 622 623bool 624DNBArchImplI386::NotifyException(MachException::Data& exc) 625{ 626 switch (exc.exc_type) 627 { 628 case EXC_BAD_ACCESS: 629 break; 630 case EXC_BAD_INSTRUCTION: 631 break; 632 case EXC_ARITHMETIC: 633 break; 634 case EXC_EMULATION: 635 break; 636 case EXC_SOFTWARE: 637 break; 638 case EXC_BREAKPOINT: 639 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) 640 { 641 // exc_code = EXC_I386_BPT 642 // 643 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 644 if (pc != INVALID_NUB_ADDRESS && pc > 0) 645 { 646 pc -= 1; 647 // Check for a breakpoint at one byte prior to the current PC value 648 // since the PC will be just past the trap. 649 650 nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc); 651 if (NUB_BREAK_ID_IS_VALID(breakID)) 652 { 653 // Backup the PC for i386 since the trap was taken and the PC 654 // is at the address following the single byte trap instruction. 655 if (m_state.context.gpr.__eip > 0) 656 { 657 m_state.context.gpr.__eip = pc; 658 // Write the new PC back out 659 SetGPRState (); 660 } 661 } 662 return true; 663 } 664 } 665 else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1) 666 { 667 // exc_code = EXC_I386_SGL 668 // 669 // Check whether this corresponds to a watchpoint hit event. 670 // If yes, set the exc_sub_code to the data break address. 671 nub_addr_t addr = 0; 672 uint32_t hw_index = GetHardwareWatchpointHit(addr); 673 if (hw_index != INVALID_NUB_HW_INDEX) 674 { 675 exc.exc_data[1] = addr; 676 // Piggyback the hw_index in the exc.data. 677 exc.exc_data.push_back(hw_index); 678 } 679 680 return true; 681 } 682 break; 683 case EXC_SYSCALL: 684 break; 685 case EXC_MACH_SYSCALL: 686 break; 687 case EXC_RPC_ALERT: 688 break; 689 } 690 return false; 691} 692 693uint32_t 694DNBArchImplI386::NumSupportedHardwareWatchpoints() 695{ 696 // Available debug address registers: dr0, dr1, dr2, dr3. 697 return 4; 698} 699 700static uint32_t 701size_and_rw_bits(nub_size_t size, bool read, bool write) 702{ 703 uint32_t rw; 704 if (read) { 705 rw = 0x3; // READ or READ/WRITE 706 } else if (write) { 707 rw = 0x1; // WRITE 708 } else { 709 assert(0 && "read and write cannot both be false"); 710 } 711 712 switch (size) { 713 case 1: 714 return rw; 715 case 2: 716 return (0x1 << 2) | rw; 717 case 4: 718 return (0x3 << 2) | rw; 719 case 8: 720 return (0x2 << 2) | rw; 721 default: 722 assert(0 && "invalid size, must be one of 1, 2, 4, or 8"); 723 } 724} 725void 726DNBArchImplI386::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write) 727{ 728 // Set both dr7 (debug control register) and dri (debug address register). 729 730 // dr7{7-0} encodes the local/gloabl enable bits: 731 // global enable --. .-- local enable 732 // | | 733 // v v 734 // dr0 -> bits{1-0} 735 // dr1 -> bits{3-2} 736 // dr2 -> bits{5-4} 737 // dr3 -> bits{7-6} 738 // 739 // dr7{31-16} encodes the rw/len bits: 740 // b_x+3, b_x+2, b_x+1, b_x 741 // where bits{x+1, x} => rw 742 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused) 743 // and bits{x+3, x+2} => len 744 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 745 // 746 // dr0 -> bits{19-16} 747 // dr1 -> bits{23-20} 748 // dr2 -> bits{27-24} 749 // dr3 -> bits{31-28} 750 debug_state.__dr7 |= (1 << (2*hw_index) | 751 size_and_rw_bits(size, read, write) << (16+4*hw_index)); 752 uint32_t addr_32 = addr & 0xffffffff; 753 switch (hw_index) { 754 case 0: 755 debug_state.__dr0 = addr_32; break; 756 case 1: 757 debug_state.__dr1 = addr_32; break; 758 case 2: 759 debug_state.__dr2 = addr_32; break; 760 case 3: 761 debug_state.__dr3 = addr_32; break; 762 default: 763 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 764 } 765 return; 766} 767 768void 769DNBArchImplI386::ClearWatchpoint(DBG &debug_state, uint32_t hw_index) 770{ 771 debug_state.__dr7 &= ~(3 << (2*hw_index)); 772 switch (hw_index) { 773 case 0: 774 debug_state.__dr0 = 0; break; 775 case 1: 776 debug_state.__dr1 = 0; break; 777 case 2: 778 debug_state.__dr2 = 0; break; 779 case 3: 780 debug_state.__dr3 = 0; break; 781 default: 782 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 783 } 784 return; 785} 786 787bool 788DNBArchImplI386::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index) 789{ 790 // Check dr7 (debug control register) for local/global enable bits: 791 // global enable --. .-- local enable 792 // | | 793 // v v 794 // dr0 -> bits{1-0} 795 // dr1 -> bits{3-2} 796 // dr2 -> bits{5-4} 797 // dr3 -> bits{7-6} 798 return (debug_state.__dr7 & (3 << (2*hw_index))) == 0; 799} 800 801// Resets local copy of debug status register to wait for the next debug excpetion. 802void 803DNBArchImplI386::ClearWatchpointHits(DBG &debug_state) 804{ 805 // See also IsWatchpointHit(). 806 debug_state.__dr6 = 0; 807 return; 808} 809 810bool 811DNBArchImplI386::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index) 812{ 813 // Check dr6 (debug status register) whether a watchpoint hits: 814 // is watchpoint hit? 815 // | 816 // v 817 // dr0 -> bits{0} 818 // dr1 -> bits{1} 819 // dr2 -> bits{2} 820 // dr3 -> bits{3} 821 return (debug_state.__dr6 & (1 << hw_index)); 822} 823 824nub_addr_t 825DNBArchImplI386::GetWatchAddress(const DBG &debug_state, uint32_t hw_index) 826{ 827 switch (hw_index) { 828 case 0: 829 return debug_state.__dr0; 830 case 1: 831 return debug_state.__dr1; 832 case 2: 833 return debug_state.__dr2; 834 case 3: 835 return debug_state.__dr3; 836 default: 837 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 838 } 839} 840 841bool 842DNBArchImplI386::StartTransForHWP() 843{ 844 if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back) 845 DNBLogError ("%s inconsistent state detected, expected %d or %d, got: %d", __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state); 846 m_2pc_dbg_checkpoint = m_state.context.dbg; 847 m_2pc_trans_state = Trans_Pending; 848 return true; 849} 850bool 851DNBArchImplI386::RollbackTransForHWP() 852{ 853 m_state.context.dbg = m_2pc_dbg_checkpoint; 854 if (m_2pc_trans_state != Trans_Pending) 855 DNBLogError ("%s inconsistent state detected, expected %d, got: %d", __FUNCTION__, Trans_Pending, m_2pc_trans_state); 856 m_2pc_trans_state = Trans_Rolled_Back; 857 kern_return_t kret = SetDBGState(); 858 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret); 859 860 if (kret == KERN_SUCCESS) 861 return true; 862 else 863 return false; 864} 865bool 866DNBArchImplI386::FinishTransForHWP() 867{ 868 m_2pc_trans_state = Trans_Done; 869 return true; 870} 871DNBArchImplI386::DBG 872DNBArchImplI386::GetDBGCheckpoint() 873{ 874 return m_2pc_dbg_checkpoint; 875} 876 877uint32_t 878DNBArchImplI386::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write) 879{ 880 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(addr = 0x%llx, size = %llu, read = %u, write = %u)", (uint64_t)addr, (uint64_t)size, read, write); 881 882 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 883 884 // Can only watch 1, 2, 4, or 8 bytes. 885 if (!(size == 1 || size == 2 || size == 4 || size == 8)) 886 return INVALID_NUB_HW_INDEX; 887 888 // We must watch for either read or write 889 if (read == false && write == false) 890 return INVALID_NUB_HW_INDEX; 891 892 // Read the debug state 893 kern_return_t kret = GetDBGState(false); 894 895 if (kret == KERN_SUCCESS) 896 { 897 // Check to make sure we have the needed hardware support 898 uint32_t i = 0; 899 900 DBG &debug_state = m_state.context.dbg; 901 for (i = 0; i < num_hw_watchpoints; ++i) 902 { 903 if (IsWatchpointVacant(debug_state, i)) 904 break; 905 } 906 907 // See if we found an available hw breakpoint slot above 908 if (i < num_hw_watchpoints) 909 { 910 StartTransForHWP(); 911 912 // Modify our local copy of the debug state, first. 913 SetWatchpoint(debug_state, i, addr, size, read, write); 914 // Now set the watch point in the inferior. 915 kret = SetDBGState(); 916 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret); 917 918 if (kret == KERN_SUCCESS) 919 return i; 920 else // Revert to the previous debug state voluntarily. The transaction coordinator knows that we have failed. 921 m_state.context.dbg = GetDBGCheckpoint(); 922 } 923 else 924 { 925 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints); 926 } 927 } 928 return INVALID_NUB_HW_INDEX; 929} 930 931bool 932DNBArchImplI386::DisableHardwareWatchpoint (uint32_t hw_index) 933{ 934 kern_return_t kret = GetDBGState(false); 935 936 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 937 if (kret == KERN_SUCCESS) 938 { 939 DBG &debug_state = m_state.context.dbg; 940 if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index)) 941 { 942 StartTransForHWP(); 943 944 // Modify our local copy of the debug state, first. 945 ClearWatchpoint(debug_state, hw_index); 946 // Now disable the watch point in the inferior. 947 kret = SetDBGState(); 948 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::DisableHardwareWatchpoint( %u )", 949 hw_index); 950 951 if (kret == KERN_SUCCESS) 952 return true; 953 else // Revert to the previous debug state voluntarily. The transaction coordinator knows that we have failed. 954 m_state.context.dbg = GetDBGCheckpoint(); 955 } 956 } 957 return false; 958} 959 960DNBArchImplI386::DBG DNBArchImplI386::Global_Debug_State = {0,0,0,0,0,0,0,0}; 961bool DNBArchImplI386::Valid_Global_Debug_State = false; 962 963// Use this callback from MachThread, which in turn was called from MachThreadList, to update 964// the global view of the hardware watchpoint state, so that when new thread comes along, they 965// get to inherit the existing hardware watchpoint state. 966void 967DNBArchImplI386::HardwareWatchpointStateChanged () 968{ 969 Global_Debug_State = m_state.context.dbg; 970 Valid_Global_Debug_State = true; 971} 972 973// Iterate through the debug status register; return the index of the first hit. 974uint32_t 975DNBArchImplI386::GetHardwareWatchpointHit(nub_addr_t &addr) 976{ 977 // Read the debug state 978 kern_return_t kret = GetDBGState(true); 979 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret); 980 if (kret == KERN_SUCCESS) 981 { 982 DBG &debug_state = m_state.context.dbg; 983 uint32_t i, num = NumSupportedHardwareWatchpoints(); 984 for (i = 0; i < num; ++i) 985 { 986 if (IsWatchpointHit(debug_state, i)) 987 { 988 addr = GetWatchAddress(debug_state, i); 989 DNBLogThreadedIf(LOG_WATCHPOINTS, 990 "DNBArchImplI386::GetHardwareWatchpointHit() found => %u (addr = 0x%llx).", 991 i, (uint64_t)addr); 992 return i; 993 } 994 } 995 } 996 return INVALID_NUB_HW_INDEX; 997} 998 999// Set the single step bit in the processor status register. 1000kern_return_t 1001DNBArchImplI386::EnableHardwareSingleStep (bool enable) 1002{ 1003 if (GetGPRState(false) == KERN_SUCCESS) 1004 { 1005 const uint32_t trace_bit = 0x100u; 1006 if (enable) 1007 m_state.context.gpr.__eflags |= trace_bit; 1008 else 1009 m_state.context.gpr.__eflags &= ~trace_bit; 1010 return SetGPRState(); 1011 } 1012 return m_state.GetError(e_regSetGPR, Read); 1013} 1014 1015 1016//---------------------------------------------------------------------- 1017// Register information defintions 1018//---------------------------------------------------------------------- 1019 1020#define DEFINE_GPR_PSEUDO_16(reg16,reg32) { e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, GPR_OFFSET(reg32) ,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 } 1021#define DEFINE_GPR_PSEUDO_8H(reg8,reg32) { e_regSetGPR, gpr_##reg8 , #reg8 , NULL, Uint, Hex, 1, GPR_OFFSET(reg32)+1,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 } 1022#define DEFINE_GPR_PSEUDO_8L(reg8,reg32) { e_regSetGPR, gpr_##reg8 , #reg8 , NULL, Uint, Hex, 1, GPR_OFFSET(reg32) ,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 } 1023 1024 1025#define GPR_OFFSET(reg) (offsetof (DNBArchImplI386::GPR, __##reg)) 1026#define FPU_OFFSET(reg) (offsetof (DNBArchImplI386::FPU, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.no_avx)) 1027#define AVX_OFFSET(reg) (offsetof (DNBArchImplI386::AVX, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.avx)) 1028#define EXC_OFFSET(reg) (offsetof (DNBArchImplI386::EXC, __##reg) + offsetof (DNBArchImplI386::Context, exc)) 1029 1030#define GPR_SIZE(reg) (sizeof(((DNBArchImplI386::GPR *)NULL)->__##reg)) 1031#define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg)) 1032#define FPU_SIZE_MMST(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 1033#define FPU_SIZE_XMM(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 1034#define FPU_SIZE_YMM(reg) (32) 1035#define EXC_SIZE(reg) (sizeof(((DNBArchImplI386::EXC *)NULL)->__##reg)) 1036 1037// This does not accurately identify the location of ymm0...7 in 1038// Context.fpu.avx. That is because there is a bunch of padding 1039// in Context.fpu.avx that we don't need. Offset macros lay out 1040// the register state that Debugserver transmits to the debugger 1041// -- not to interpret the thread_get_state info. 1042#define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n)) 1043 1044// These macros will auto define the register name, alt name, register size, 1045// register offset, encoding, format and native register. This ensures that 1046// the register state structures are defined correctly and have the correct 1047// sizes and offsets. 1048 1049uint32_t g_contained_eax[] = { gpr_eax, INVALID_NUB_REGNUM }; 1050uint32_t g_contained_ebx[] = { gpr_ebx, INVALID_NUB_REGNUM }; 1051uint32_t g_contained_ecx[] = { gpr_ecx, INVALID_NUB_REGNUM }; 1052uint32_t g_contained_edx[] = { gpr_edx, INVALID_NUB_REGNUM }; 1053uint32_t g_contained_edi[] = { gpr_edi, INVALID_NUB_REGNUM }; 1054uint32_t g_contained_esi[] = { gpr_esi, INVALID_NUB_REGNUM }; 1055uint32_t g_contained_ebp[] = { gpr_ebp, INVALID_NUB_REGNUM }; 1056uint32_t g_contained_esp[] = { gpr_esp, INVALID_NUB_REGNUM }; 1057 1058uint32_t g_invalidate_eax[] = { gpr_eax , gpr_ax , gpr_ah , gpr_al, INVALID_NUB_REGNUM }; 1059uint32_t g_invalidate_ebx[] = { gpr_ebx , gpr_bx , gpr_bh , gpr_bl, INVALID_NUB_REGNUM }; 1060uint32_t g_invalidate_ecx[] = { gpr_ecx , gpr_cx , gpr_ch , gpr_cl, INVALID_NUB_REGNUM }; 1061uint32_t g_invalidate_edx[] = { gpr_edx , gpr_dx , gpr_dh , gpr_dl, INVALID_NUB_REGNUM }; 1062uint32_t g_invalidate_edi[] = { gpr_edi , gpr_di , gpr_dil , INVALID_NUB_REGNUM }; 1063uint32_t g_invalidate_esi[] = { gpr_esi , gpr_si , gpr_sil , INVALID_NUB_REGNUM }; 1064uint32_t g_invalidate_ebp[] = { gpr_ebp , gpr_bp , gpr_bpl , INVALID_NUB_REGNUM }; 1065uint32_t g_invalidate_esp[] = { gpr_esp , gpr_sp , gpr_spl , INVALID_NUB_REGNUM }; 1066 1067// General purpose registers for 64 bit 1068const DNBRegisterInfo 1069DNBArchImplI386::g_gpr_registers[] = 1070{ 1071{ e_regSetGPR, gpr_eax, "eax" , NULL , Uint, Hex, GPR_SIZE(eax), GPR_OFFSET(eax) , gcc_eax , dwarf_eax , INVALID_NUB_REGNUM , gdb_eax , NULL, g_invalidate_eax }, 1072{ e_regSetGPR, gpr_ebx, "ebx" , NULL , Uint, Hex, GPR_SIZE(ebx), GPR_OFFSET(ebx) , gcc_ebx , dwarf_ebx , INVALID_NUB_REGNUM , gdb_ebx , NULL, g_invalidate_ebx }, 1073{ e_regSetGPR, gpr_ecx, "ecx" , NULL , Uint, Hex, GPR_SIZE(ecx), GPR_OFFSET(ecx) , gcc_ecx , dwarf_ecx , INVALID_NUB_REGNUM , gdb_ecx , NULL, g_invalidate_ecx }, 1074{ e_regSetGPR, gpr_edx, "edx" , NULL , Uint, Hex, GPR_SIZE(edx), GPR_OFFSET(edx) , gcc_edx , dwarf_edx , INVALID_NUB_REGNUM , gdb_edx , NULL, g_invalidate_edx }, 1075{ e_regSetGPR, gpr_edi, "edi" , NULL , Uint, Hex, GPR_SIZE(edi), GPR_OFFSET(edi) , gcc_edi , dwarf_edi , INVALID_NUB_REGNUM , gdb_edi , NULL, g_invalidate_edi }, 1076{ e_regSetGPR, gpr_esi, "esi" , NULL , Uint, Hex, GPR_SIZE(esi), GPR_OFFSET(esi) , gcc_esi , dwarf_esi , INVALID_NUB_REGNUM , gdb_esi , NULL, g_invalidate_esi }, 1077{ e_regSetGPR, gpr_ebp, "ebp" , "fp" , Uint, Hex, GPR_SIZE(ebp), GPR_OFFSET(ebp) , gcc_ebp , dwarf_ebp , GENERIC_REGNUM_FP , gdb_ebp , NULL, g_invalidate_ebp }, 1078{ e_regSetGPR, gpr_esp, "esp" , "sp" , Uint, Hex, GPR_SIZE(esp), GPR_OFFSET(esp) , gcc_esp , dwarf_esp , GENERIC_REGNUM_SP , gdb_esp , NULL, g_invalidate_esp }, 1079{ e_regSetGPR, gpr_ss, "ss" , NULL , Uint, Hex, GPR_SIZE(ss), GPR_OFFSET(ss) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , gdb_ss , NULL, NULL}, 1080{ e_regSetGPR, gpr_eflags, "eflags", "flags" , Uint, Hex, GPR_SIZE(eflags), GPR_OFFSET(eflags) , gcc_eflags , dwarf_eflags , GENERIC_REGNUM_FLAGS , gdb_eflags, NULL, NULL}, 1081{ e_regSetGPR, gpr_eip, "eip" , "pc" , Uint, Hex, GPR_SIZE(eip), GPR_OFFSET(eip) , gcc_eip , dwarf_eip , GENERIC_REGNUM_PC , gdb_eip , NULL, NULL}, 1082{ e_regSetGPR, gpr_cs, "cs" , NULL , Uint, Hex, GPR_SIZE(cs), GPR_OFFSET(cs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , gdb_cs , NULL, NULL}, 1083{ e_regSetGPR, gpr_ds, "ds" , NULL , Uint, Hex, GPR_SIZE(ds), GPR_OFFSET(ds) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , gdb_ds , NULL, NULL}, 1084{ e_regSetGPR, gpr_es, "es" , NULL , Uint, Hex, GPR_SIZE(es), GPR_OFFSET(es) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , gdb_es , NULL, NULL}, 1085{ e_regSetGPR, gpr_fs, "fs" , NULL , Uint, Hex, GPR_SIZE(fs), GPR_OFFSET(fs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , gdb_fs , NULL, NULL}, 1086{ e_regSetGPR, gpr_gs, "gs" , NULL , Uint, Hex, GPR_SIZE(gs), GPR_OFFSET(gs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , gdb_gs , NULL, NULL}, 1087DEFINE_GPR_PSEUDO_16 (ax , eax), 1088DEFINE_GPR_PSEUDO_16 (bx , ebx), 1089DEFINE_GPR_PSEUDO_16 (cx , ecx), 1090DEFINE_GPR_PSEUDO_16 (dx , edx), 1091DEFINE_GPR_PSEUDO_16 (di , edi), 1092DEFINE_GPR_PSEUDO_16 (si , esi), 1093DEFINE_GPR_PSEUDO_16 (bp , ebp), 1094DEFINE_GPR_PSEUDO_16 (sp , esp), 1095DEFINE_GPR_PSEUDO_8H (ah , eax), 1096DEFINE_GPR_PSEUDO_8H (bh , ebx), 1097DEFINE_GPR_PSEUDO_8H (ch , ecx), 1098DEFINE_GPR_PSEUDO_8H (dh , edx), 1099DEFINE_GPR_PSEUDO_8L (al , eax), 1100DEFINE_GPR_PSEUDO_8L (bl , ebx), 1101DEFINE_GPR_PSEUDO_8L (cl , ecx), 1102DEFINE_GPR_PSEUDO_8L (dl , edx), 1103DEFINE_GPR_PSEUDO_8L (dil, edi), 1104DEFINE_GPR_PSEUDO_8L (sil, esi), 1105DEFINE_GPR_PSEUDO_8L (bpl, ebp), 1106DEFINE_GPR_PSEUDO_8L (spl, esp) 1107}; 1108 1109 1110const DNBRegisterInfo 1111DNBArchImplI386::g_fpu_registers_no_avx[] = 1112{ 1113{ e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , FPU_OFFSET(fcw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1114{ e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , FPU_OFFSET(fsw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1115{ e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , FPU_OFFSET(ftw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1116{ e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , FPU_OFFSET(fop) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1117{ e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , FPU_OFFSET(ip) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1118{ e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , FPU_OFFSET(cs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1119{ e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , FPU_OFFSET(dp) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1120{ e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , FPU_OFFSET(ds) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1121{ e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , FPU_OFFSET(mxcsr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1122{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1123 1124{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, INVALID_NUB_REGNUM, gdb_stmm0, NULL, NULL }, 1125{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, INVALID_NUB_REGNUM, gdb_stmm1, NULL, NULL }, 1126{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, INVALID_NUB_REGNUM, gdb_stmm2, NULL, NULL }, 1127{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, INVALID_NUB_REGNUM, gdb_stmm3, NULL, NULL }, 1128{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, INVALID_NUB_REGNUM, gdb_stmm4, NULL, NULL }, 1129{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, INVALID_NUB_REGNUM, gdb_stmm5, NULL, NULL }, 1130{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, INVALID_NUB_REGNUM, gdb_stmm6, NULL, NULL }, 1131{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, INVALID_NUB_REGNUM, gdb_stmm7, NULL, NULL }, 1132 1133{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), INVALID_NUB_REGNUM, dwarf_xmm0, INVALID_NUB_REGNUM, gdb_xmm0, NULL, NULL }, 1134{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), INVALID_NUB_REGNUM, dwarf_xmm1, INVALID_NUB_REGNUM, gdb_xmm1, NULL, NULL }, 1135{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), INVALID_NUB_REGNUM, dwarf_xmm2, INVALID_NUB_REGNUM, gdb_xmm2, NULL, NULL }, 1136{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), INVALID_NUB_REGNUM, dwarf_xmm3, INVALID_NUB_REGNUM, gdb_xmm3, NULL, NULL }, 1137{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), INVALID_NUB_REGNUM, dwarf_xmm4, INVALID_NUB_REGNUM, gdb_xmm4, NULL, NULL }, 1138{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), INVALID_NUB_REGNUM, dwarf_xmm5, INVALID_NUB_REGNUM, gdb_xmm5, NULL, NULL }, 1139{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), INVALID_NUB_REGNUM, dwarf_xmm6, INVALID_NUB_REGNUM, gdb_xmm6, NULL, NULL }, 1140{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), INVALID_NUB_REGNUM, dwarf_xmm7, INVALID_NUB_REGNUM, gdb_xmm7, NULL, NULL } 1141}; 1142 1143const DNBRegisterInfo 1144DNBArchImplI386::g_fpu_registers_avx[] = 1145{ 1146{ e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , AVX_OFFSET(fcw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1147{ e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , AVX_OFFSET(fsw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1148{ e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , AVX_OFFSET(ftw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1149{ e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , AVX_OFFSET(fop) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1150{ e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , AVX_OFFSET(ip) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1151{ e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , AVX_OFFSET(cs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1152{ e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , AVX_OFFSET(dp) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1153{ e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , AVX_OFFSET(ds) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1154{ e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , AVX_OFFSET(mxcsr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1155{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1156 1157{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, INVALID_NUB_REGNUM, gdb_stmm0, NULL, NULL }, 1158{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, INVALID_NUB_REGNUM, gdb_stmm1, NULL, NULL }, 1159{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, INVALID_NUB_REGNUM, gdb_stmm2, NULL, NULL }, 1160{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, INVALID_NUB_REGNUM, gdb_stmm3, NULL, NULL }, 1161{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, INVALID_NUB_REGNUM, gdb_stmm4, NULL, NULL }, 1162{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, INVALID_NUB_REGNUM, gdb_stmm5, NULL, NULL }, 1163{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, INVALID_NUB_REGNUM, gdb_stmm6, NULL, NULL }, 1164{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, INVALID_NUB_REGNUM, gdb_stmm7, NULL, NULL }, 1165 1166{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), AVX_OFFSET(xmm0), INVALID_NUB_REGNUM, dwarf_xmm0, INVALID_NUB_REGNUM, gdb_xmm0, NULL, NULL }, 1167{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), AVX_OFFSET(xmm1), INVALID_NUB_REGNUM, dwarf_xmm1, INVALID_NUB_REGNUM, gdb_xmm1, NULL, NULL }, 1168{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), AVX_OFFSET(xmm2), INVALID_NUB_REGNUM, dwarf_xmm2, INVALID_NUB_REGNUM, gdb_xmm2, NULL, NULL }, 1169{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), AVX_OFFSET(xmm3), INVALID_NUB_REGNUM, dwarf_xmm3, INVALID_NUB_REGNUM, gdb_xmm3, NULL, NULL }, 1170{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), AVX_OFFSET(xmm4), INVALID_NUB_REGNUM, dwarf_xmm4, INVALID_NUB_REGNUM, gdb_xmm4, NULL, NULL }, 1171{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), AVX_OFFSET(xmm5), INVALID_NUB_REGNUM, dwarf_xmm5, INVALID_NUB_REGNUM, gdb_xmm5, NULL, NULL }, 1172{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), AVX_OFFSET(xmm6), INVALID_NUB_REGNUM, dwarf_xmm6, INVALID_NUB_REGNUM, gdb_xmm6, NULL, NULL }, 1173{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), AVX_OFFSET(xmm7), INVALID_NUB_REGNUM, dwarf_xmm7, INVALID_NUB_REGNUM, gdb_xmm7, NULL, NULL }, 1174 1175{ e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), INVALID_NUB_REGNUM, dwarf_ymm0, INVALID_NUB_REGNUM, gdb_ymm0, NULL, NULL }, 1176{ e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), INVALID_NUB_REGNUM, dwarf_ymm1, INVALID_NUB_REGNUM, gdb_ymm1, NULL, NULL }, 1177{ e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), INVALID_NUB_REGNUM, dwarf_ymm2, INVALID_NUB_REGNUM, gdb_ymm2, NULL, NULL }, 1178{ e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), INVALID_NUB_REGNUM, dwarf_ymm3, INVALID_NUB_REGNUM, gdb_ymm3, NULL, NULL }, 1179{ e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), INVALID_NUB_REGNUM, dwarf_ymm4, INVALID_NUB_REGNUM, gdb_ymm4, NULL, NULL }, 1180{ e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), INVALID_NUB_REGNUM, dwarf_ymm5, INVALID_NUB_REGNUM, gdb_ymm5, NULL, NULL }, 1181{ e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), INVALID_NUB_REGNUM, dwarf_ymm6, INVALID_NUB_REGNUM, gdb_ymm6, NULL, NULL }, 1182{ e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), INVALID_NUB_REGNUM, dwarf_ymm7, INVALID_NUB_REGNUM, gdb_ymm7, NULL, NULL } 1183}; 1184 1185const DNBRegisterInfo 1186DNBArchImplI386::g_exc_registers[] = 1187{ 1188{ e_regSetEXC, exc_trapno, "trapno" , NULL, Uint, Hex, EXC_SIZE (trapno) , EXC_OFFSET (trapno) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1189{ e_regSetEXC, exc_err, "err" , NULL, Uint, Hex, EXC_SIZE (err) , EXC_OFFSET (err) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1190{ e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL } 1191}; 1192 1193// Number of registers in each register set 1194const size_t DNBArchImplI386::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo); 1195const size_t DNBArchImplI386::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo); 1196const size_t DNBArchImplI386::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo); 1197const size_t DNBArchImplI386::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo); 1198const size_t DNBArchImplI386::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 1199const size_t DNBArchImplI386::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 1200 1201//---------------------------------------------------------------------- 1202// Register set definitions. The first definitions at register set index 1203// of zero is for all registers, followed by other registers sets. The 1204// register information for the all register set need not be filled in. 1205//---------------------------------------------------------------------- 1206const DNBRegisterSetInfo 1207DNBArchImplI386::g_reg_sets_no_avx[] = 1208{ 1209 { "i386 Registers", NULL, k_num_all_registers_no_avx }, 1210 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 1211 { "Floating Point Registers", g_fpu_registers_no_avx, k_num_fpu_registers_no_avx }, 1212 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 1213}; 1214 1215const DNBRegisterSetInfo 1216DNBArchImplI386::g_reg_sets_avx[] = 1217{ 1218 { "i386 Registers", NULL, k_num_all_registers_avx }, 1219 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 1220 { "Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx }, 1221 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 1222}; 1223 1224// Total number of register sets for this architecture 1225const size_t DNBArchImplI386::k_num_register_sets = sizeof(g_reg_sets_no_avx)/sizeof(DNBRegisterSetInfo); 1226 1227DNBArchProtocol * 1228DNBArchImplI386::Create (MachThread *thread) 1229{ 1230 DNBArchImplI386 *obj = new DNBArchImplI386 (thread); 1231 1232 // When new thread comes along, it tries to inherit from the global debug state, if it is valid. 1233 if (Valid_Global_Debug_State) 1234 { 1235 obj->m_state.context.dbg = Global_Debug_State; 1236 kern_return_t kret = obj->SetDBGState(); 1237 DNBLogThreadedIf(LOG_WATCHPOINTS, 1238 "DNBArchImplX86_64::Create() Inherit and SetDBGState() => 0x%8.8x.", kret); 1239 } 1240 return obj; 1241} 1242 1243const uint8_t * const 1244DNBArchImplI386::SoftwareBreakpointOpcode (nub_size_t byte_size) 1245{ 1246 static const uint8_t g_breakpoint_opcode[] = { 0xCC }; 1247 if (byte_size == 1) 1248 return g_breakpoint_opcode; 1249 return NULL; 1250} 1251 1252const DNBRegisterSetInfo * 1253DNBArchImplI386::GetRegisterSetInfo(nub_size_t *num_reg_sets) 1254{ 1255 *num_reg_sets = k_num_register_sets; 1256 if (CPUHasAVX() || FORCE_AVX_REGS) 1257 return g_reg_sets_avx; 1258 else 1259 return g_reg_sets_no_avx; 1260} 1261 1262 1263void 1264DNBArchImplI386::Initialize() 1265{ 1266 DNBArchPluginInfo arch_plugin_info = 1267 { 1268 CPU_TYPE_I386, 1269 DNBArchImplI386::Create, 1270 DNBArchImplI386::GetRegisterSetInfo, 1271 DNBArchImplI386::SoftwareBreakpointOpcode 1272 }; 1273 1274 // Register this arch plug-in with the main protocol class 1275 DNBArchProtocol::RegisterArchPlugin (arch_plugin_info); 1276} 1277 1278bool 1279DNBArchImplI386::GetRegisterValue(int set, int reg, DNBRegisterValue *value) 1280{ 1281 if (set == REGISTER_SET_GENERIC) 1282 { 1283 switch (reg) 1284 { 1285 case GENERIC_REGNUM_PC: // Program Counter 1286 set = e_regSetGPR; 1287 reg = gpr_eip; 1288 break; 1289 1290 case GENERIC_REGNUM_SP: // Stack Pointer 1291 set = e_regSetGPR; 1292 reg = gpr_esp; 1293 break; 1294 1295 case GENERIC_REGNUM_FP: // Frame Pointer 1296 set = e_regSetGPR; 1297 reg = gpr_ebp; 1298 break; 1299 1300 case GENERIC_REGNUM_FLAGS: // Processor flags register 1301 set = e_regSetGPR; 1302 reg = gpr_eflags; 1303 break; 1304 1305 case GENERIC_REGNUM_RA: // Return Address 1306 default: 1307 return false; 1308 } 1309 } 1310 1311 if (GetRegisterState(set, false) != KERN_SUCCESS) 1312 return false; 1313 1314 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1315 if (regInfo) 1316 { 1317 value->info = *regInfo; 1318 switch (set) 1319 { 1320 case e_regSetGPR: 1321 if (reg < k_num_gpr_registers) 1322 { 1323 value->value.uint32 = ((uint32_t*)(&m_state.context.gpr))[reg]; 1324 return true; 1325 } 1326 break; 1327 1328 case e_regSetFPU: 1329 if (CPUHasAVX() || FORCE_AVX_REGS) 1330 { 1331 switch (reg) 1332 { 1333 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); return true; 1334 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); return true; 1335 case fpu_ftw: value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; return true; 1336 case fpu_fop: value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; return true; 1337 case fpu_ip: value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; return true; 1338 case fpu_cs: value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; return true; 1339 case fpu_dp: value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; return true; 1340 case fpu_ds: value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; return true; 1341 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; return true; 1342 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; return true; 1343 1344 case fpu_stmm0: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, 10); return true; 1345 case fpu_stmm1: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, 10); return true; 1346 case fpu_stmm2: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, 10); return true; 1347 case fpu_stmm3: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, 10); return true; 1348 case fpu_stmm4: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, 10); return true; 1349 case fpu_stmm5: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, 10); return true; 1350 case fpu_stmm6: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, 10); return true; 1351 case fpu_stmm7: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, 10); return true; 1352 1353 case fpu_xmm0: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, 16); return true; 1354 case fpu_xmm1: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, 16); return true; 1355 case fpu_xmm2: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, 16); return true; 1356 case fpu_xmm3: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, 16); return true; 1357 case fpu_xmm4: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, 16); return true; 1358 case fpu_xmm5: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, 16); return true; 1359 case fpu_xmm6: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, 16); return true; 1360 case fpu_xmm7: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, 16); return true; 1361 1362#define MEMCPY_YMM(n) \ 1363 memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, 16); \ 1364 memcpy((&value->value.uint8) + 16, m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, 16); 1365 case fpu_ymm0: MEMCPY_YMM(0); return true; 1366 case fpu_ymm1: MEMCPY_YMM(1); return true; 1367 case fpu_ymm2: MEMCPY_YMM(2); return true; 1368 case fpu_ymm3: MEMCPY_YMM(3); return true; 1369 case fpu_ymm4: MEMCPY_YMM(4); return true; 1370 case fpu_ymm5: MEMCPY_YMM(5); return true; 1371 case fpu_ymm6: MEMCPY_YMM(6); return true; 1372 case fpu_ymm7: MEMCPY_YMM(7); return true; 1373#undef MEMCPY_YMM 1374 } 1375 } 1376 else 1377 { 1378 switch (reg) 1379 { 1380 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); return true; 1381 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); return true; 1382 case fpu_ftw: value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; return true; 1383 case fpu_fop: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; return true; 1384 case fpu_ip: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; return true; 1385 case fpu_cs: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; return true; 1386 case fpu_dp: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; return true; 1387 case fpu_ds: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; return true; 1388 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; return true; 1389 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; return true; 1390 1391 case fpu_stmm0: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 10); return true; 1392 case fpu_stmm1: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 10); return true; 1393 case fpu_stmm2: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 10); return true; 1394 case fpu_stmm3: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 10); return true; 1395 case fpu_stmm4: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 10); return true; 1396 case fpu_stmm5: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 10); return true; 1397 case fpu_stmm6: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 10); return true; 1398 case fpu_stmm7: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 10); return true; 1399 1400 case fpu_xmm0: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 16); return true; 1401 case fpu_xmm1: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 16); return true; 1402 case fpu_xmm2: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 16); return true; 1403 case fpu_xmm3: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 16); return true; 1404 case fpu_xmm4: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 16); return true; 1405 case fpu_xmm5: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 16); return true; 1406 case fpu_xmm6: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 16); return true; 1407 case fpu_xmm7: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 16); return true; 1408 } 1409 } 1410 break; 1411 1412 case e_regSetEXC: 1413 if (reg < k_num_exc_registers) 1414 { 1415 value->value.uint32 = (&m_state.context.exc.__trapno)[reg]; 1416 return true; 1417 } 1418 break; 1419 } 1420 } 1421 return false; 1422} 1423 1424 1425bool 1426DNBArchImplI386::SetRegisterValue(int set, int reg, const DNBRegisterValue *value) 1427{ 1428 if (set == REGISTER_SET_GENERIC) 1429 { 1430 switch (reg) 1431 { 1432 case GENERIC_REGNUM_PC: // Program Counter 1433 set = e_regSetGPR; 1434 reg = gpr_eip; 1435 break; 1436 1437 case GENERIC_REGNUM_SP: // Stack Pointer 1438 set = e_regSetGPR; 1439 reg = gpr_esp; 1440 break; 1441 1442 case GENERIC_REGNUM_FP: // Frame Pointer 1443 set = e_regSetGPR; 1444 reg = gpr_ebp; 1445 break; 1446 1447 case GENERIC_REGNUM_FLAGS: // Processor flags register 1448 set = e_regSetGPR; 1449 reg = gpr_eflags; 1450 break; 1451 1452 case GENERIC_REGNUM_RA: // Return Address 1453 default: 1454 return false; 1455 } 1456 } 1457 1458 if (GetRegisterState(set, false) != KERN_SUCCESS) 1459 return false; 1460 1461 bool success = false; 1462 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1463 if (regInfo) 1464 { 1465 switch (set) 1466 { 1467 case e_regSetGPR: 1468 if (reg < k_num_gpr_registers) 1469 { 1470 ((uint32_t*)(&m_state.context.gpr))[reg] = value->value.uint32; 1471 success = true; 1472 } 1473 break; 1474 1475 case e_regSetFPU: 1476 if (CPUHasAVX() || FORCE_AVX_REGS) 1477 { 1478 switch (reg) 1479 { 1480 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1481 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1482 case fpu_ftw: m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; success = true; break; 1483 case fpu_fop: m_state.context.fpu.avx.__fpu_fop = value->value.uint16; success = true; break; 1484 case fpu_ip: m_state.context.fpu.avx.__fpu_ip = value->value.uint32; success = true; break; 1485 case fpu_cs: m_state.context.fpu.avx.__fpu_cs = value->value.uint16; success = true; break; 1486 case fpu_dp: m_state.context.fpu.avx.__fpu_dp = value->value.uint32; success = true; break; 1487 case fpu_ds: m_state.context.fpu.avx.__fpu_ds = value->value.uint16; success = true; break; 1488 case fpu_mxcsr: m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1489 case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1490 1491 case fpu_stmm0: memcpy (m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10); success = true; break; 1492 case fpu_stmm1: memcpy (m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10); success = true; break; 1493 case fpu_stmm2: memcpy (m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10); success = true; break; 1494 case fpu_stmm3: memcpy (m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10); success = true; break; 1495 case fpu_stmm4: memcpy (m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10); success = true; break; 1496 case fpu_stmm5: memcpy (m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10); success = true; break; 1497 case fpu_stmm6: memcpy (m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10); success = true; break; 1498 case fpu_stmm7: memcpy (m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10); success = true; break; 1499 1500 case fpu_xmm0: memcpy(m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16); success = true; break; 1501 case fpu_xmm1: memcpy(m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16); success = true; break; 1502 case fpu_xmm2: memcpy(m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16); success = true; break; 1503 case fpu_xmm3: memcpy(m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16); success = true; break; 1504 case fpu_xmm4: memcpy(m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16); success = true; break; 1505 case fpu_xmm5: memcpy(m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16); success = true; break; 1506 case fpu_xmm6: memcpy(m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16); success = true; break; 1507 case fpu_xmm7: memcpy(m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16); success = true; break; 1508 1509#define MEMCPY_YMM(n) \ 1510 memcpy(m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, &value->value.uint8, 16); \ 1511 memcpy(m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, (&value->value.uint8) + 16, 16); 1512 case fpu_ymm0: MEMCPY_YMM(0); return true; 1513 case fpu_ymm1: MEMCPY_YMM(1); return true; 1514 case fpu_ymm2: MEMCPY_YMM(2); return true; 1515 case fpu_ymm3: MEMCPY_YMM(3); return true; 1516 case fpu_ymm4: MEMCPY_YMM(4); return true; 1517 case fpu_ymm5: MEMCPY_YMM(5); return true; 1518 case fpu_ymm6: MEMCPY_YMM(6); return true; 1519 case fpu_ymm7: MEMCPY_YMM(7); return true; 1520#undef MEMCPY_YMM 1521 } 1522 } 1523 else 1524 { 1525 switch (reg) 1526 { 1527 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1528 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1529 case fpu_ftw: m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; success = true; break; 1530 case fpu_fop: m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; success = true; break; 1531 case fpu_ip: m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; success = true; break; 1532 case fpu_cs: m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; success = true; break; 1533 case fpu_dp: m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; success = true; break; 1534 case fpu_ds: m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; success = true; break; 1535 case fpu_mxcsr: m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1536 case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1537 1538 case fpu_stmm0: memcpy (m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10); success = true; break; 1539 case fpu_stmm1: memcpy (m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10); success = true; break; 1540 case fpu_stmm2: memcpy (m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10); success = true; break; 1541 case fpu_stmm3: memcpy (m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10); success = true; break; 1542 case fpu_stmm4: memcpy (m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10); success = true; break; 1543 case fpu_stmm5: memcpy (m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10); success = true; break; 1544 case fpu_stmm6: memcpy (m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10); success = true; break; 1545 case fpu_stmm7: memcpy (m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10); success = true; break; 1546 1547 case fpu_xmm0: memcpy(m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16); success = true; break; 1548 case fpu_xmm1: memcpy(m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16); success = true; break; 1549 case fpu_xmm2: memcpy(m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16); success = true; break; 1550 case fpu_xmm3: memcpy(m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16); success = true; break; 1551 case fpu_xmm4: memcpy(m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16); success = true; break; 1552 case fpu_xmm5: memcpy(m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16); success = true; break; 1553 case fpu_xmm6: memcpy(m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16); success = true; break; 1554 case fpu_xmm7: memcpy(m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16); success = true; break; 1555 } 1556 } 1557 break; 1558 1559 case e_regSetEXC: 1560 if (reg < k_num_exc_registers) 1561 { 1562 (&m_state.context.exc.__trapno)[reg] = value->value.uint32; 1563 success = true; 1564 } 1565 break; 1566 } 1567 } 1568 1569 if (success) 1570 return SetRegisterState(set) == KERN_SUCCESS; 1571 return false; 1572} 1573 1574 1575nub_size_t 1576DNBArchImplI386::GetRegisterContext (void *buf, nub_size_t buf_len) 1577{ 1578 nub_size_t size = sizeof (m_state.context); 1579 1580 if (buf && buf_len) 1581 { 1582 if (size > buf_len) 1583 size = buf_len; 1584 1585 bool force = false; 1586 kern_return_t kret; 1587 if ((kret = GetGPRState(force)) != KERN_SUCCESS) 1588 { 1589 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) error: GPR regs failed to read: %u ", buf, (uint64_t)buf_len, kret); 1590 size = 0; 1591 } 1592 else if ((kret = GetFPUState(force)) != KERN_SUCCESS) 1593 { 1594 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) error: %s regs failed to read: %u", buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 1595 size = 0; 1596 } 1597 else if ((kret = GetEXCState(force)) != KERN_SUCCESS) 1598 { 1599 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) error: EXC regs failed to read: %u", buf, (uint64_t)buf_len, kret); 1600 size = 0; 1601 } 1602 else 1603 { 1604 // Success 1605 ::memcpy (buf, &m_state.context, size); 1606 } 1607 } 1608 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size); 1609 // Return the size of the register context even if NULL was passed in 1610 return size; 1611} 1612 1613nub_size_t 1614DNBArchImplI386::SetRegisterContext (const void *buf, nub_size_t buf_len) 1615{ 1616 nub_size_t size = sizeof (m_state.context); 1617 if (buf == NULL || buf_len == 0) 1618 size = 0; 1619 1620 if (size) 1621 { 1622 if (size > buf_len) 1623 size = buf_len; 1624 1625 ::memcpy (&m_state.context, buf, size); 1626 kern_return_t kret; 1627 if ((kret = SetGPRState()) != KERN_SUCCESS) 1628 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) error: GPR regs failed to write: %u", buf, (uint64_t)buf_len, kret); 1629 if ((kret = SetFPUState()) != KERN_SUCCESS) 1630 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) error: %s regs failed to write: %u", buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 1631 if ((kret = SetEXCState()) != KERN_SUCCESS) 1632 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) error: EXP regs failed to write: %u", buf, (uint64_t)buf_len, kret); 1633 } 1634 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size); 1635 return size; 1636} 1637 1638 1639 1640kern_return_t 1641DNBArchImplI386::GetRegisterState(int set, bool force) 1642{ 1643 switch (set) 1644 { 1645 case e_regSetALL: return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 1646 case e_regSetGPR: return GetGPRState(force); 1647 case e_regSetFPU: return GetFPUState(force); 1648 case e_regSetEXC: return GetEXCState(force); 1649 default: break; 1650 } 1651 return KERN_INVALID_ARGUMENT; 1652} 1653 1654kern_return_t 1655DNBArchImplI386::SetRegisterState(int set) 1656{ 1657 // Make sure we have a valid context to set. 1658 if (RegisterSetStateIsValid(set)) 1659 { 1660 switch (set) 1661 { 1662 case e_regSetALL: return SetGPRState() | SetFPUState() | SetEXCState(); 1663 case e_regSetGPR: return SetGPRState(); 1664 case e_regSetFPU: return SetFPUState(); 1665 case e_regSetEXC: return SetEXCState(); 1666 default: break; 1667 } 1668 } 1669 return KERN_INVALID_ARGUMENT; 1670} 1671 1672bool 1673DNBArchImplI386::RegisterSetStateIsValid (int set) const 1674{ 1675 return m_state.RegsAreValid(set); 1676} 1677 1678#endif // #if defined (__i386__) 1679