DNBArchImplX86_64.cpp revision 0e8147bd867e4cdaae9400f56d02c7aacd40a9b3
1//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Created by Greg Clayton on 6/25/07. 11// 12//===----------------------------------------------------------------------===// 13 14#if defined (__i386__) || defined (__x86_64__) 15 16#include <sys/cdefs.h> 17 18#include "MacOSX/x86_64/DNBArchImplX86_64.h" 19#include "DNBLog.h" 20#include "MachThread.h" 21#include "MachProcess.h" 22#include <mach/mach.h> 23#include <stdlib.h> 24 25#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG) 26enum debugState { 27 debugStateUnknown, 28 debugStateOff, 29 debugStateOn 30}; 31 32static debugState sFPUDebugState = debugStateUnknown; 33static debugState sAVXForceState = debugStateUnknown; 34 35static bool DebugFPURegs () 36{ 37 if (sFPUDebugState == debugStateUnknown) 38 { 39 if (getenv("DNB_DEBUG_FPU_REGS")) 40 sFPUDebugState = debugStateOn; 41 else 42 sFPUDebugState = debugStateOff; 43 } 44 45 return (sFPUDebugState == debugStateOn); 46} 47 48static bool ForceAVXRegs () 49{ 50 if (sFPUDebugState == debugStateUnknown) 51 { 52 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 53 sAVXForceState = debugStateOn; 54 else 55 sAVXForceState = debugStateOff; 56 } 57 58 return (sAVXForceState == debugStateOn); 59} 60 61#define DEBUG_FPU_REGS (DebugFPURegs()) 62#define FORCE_AVX_REGS (ForceAVXRegs()) 63#else 64#define DEBUG_FPU_REGS (0) 65#define FORCE_AVX_REGS (0) 66#endif 67 68enum DNBArchImplX86_64::AVXPresence DNBArchImplX86_64::s_has_avx = DNBArchImplX86_64::kAVXNotPresent; 69 70uint64_t 71DNBArchImplX86_64::GetPC(uint64_t failValue) 72{ 73 // Get program counter 74 if (GetGPRState(false) == KERN_SUCCESS) 75 return m_state.context.gpr.__rip; 76 return failValue; 77} 78 79kern_return_t 80DNBArchImplX86_64::SetPC(uint64_t value) 81{ 82 // Get program counter 83 kern_return_t err = GetGPRState(false); 84 if (err == KERN_SUCCESS) 85 { 86 m_state.context.gpr.__rip = value; 87 err = SetGPRState(); 88 } 89 return err == KERN_SUCCESS; 90} 91 92uint64_t 93DNBArchImplX86_64::GetSP(uint64_t failValue) 94{ 95 // Get stack pointer 96 if (GetGPRState(false) == KERN_SUCCESS) 97 return m_state.context.gpr.__rsp; 98 return failValue; 99} 100 101// Uncomment the value below to verify the values in the debugger. 102//#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 103 104kern_return_t 105DNBArchImplX86_64::GetGPRState(bool force) 106{ 107 if (force || m_state.GetError(e_regSetGPR, Read)) 108 { 109 kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID()); 110 DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (GetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount()); 111 112#if DEBUG_GPR_VALUES 113 m_state.context.gpr.__rax = ('a' << 8) + 'x'; 114 m_state.context.gpr.__rbx = ('b' << 8) + 'x'; 115 m_state.context.gpr.__rcx = ('c' << 8) + 'x'; 116 m_state.context.gpr.__rdx = ('d' << 8) + 'x'; 117 m_state.context.gpr.__rdi = ('d' << 8) + 'i'; 118 m_state.context.gpr.__rsi = ('s' << 8) + 'i'; 119 m_state.context.gpr.__rbp = ('b' << 8) + 'p'; 120 m_state.context.gpr.__rsp = ('s' << 8) + 'p'; 121 m_state.context.gpr.__r8 = ('r' << 8) + '8'; 122 m_state.context.gpr.__r9 = ('r' << 8) + '9'; 123 m_state.context.gpr.__r10 = ('r' << 8) + 'a'; 124 m_state.context.gpr.__r11 = ('r' << 8) + 'b'; 125 m_state.context.gpr.__r12 = ('r' << 8) + 'c'; 126 m_state.context.gpr.__r13 = ('r' << 8) + 'd'; 127 m_state.context.gpr.__r14 = ('r' << 8) + 'e'; 128 m_state.context.gpr.__r15 = ('r' << 8) + 'f'; 129 m_state.context.gpr.__rip = ('i' << 8) + 'p'; 130 m_state.context.gpr.__rflags = ('f' << 8) + 'l'; 131 m_state.context.gpr.__cs = ('c' << 8) + 's'; 132 m_state.context.gpr.__fs = ('f' << 8) + 's'; 133 m_state.context.gpr.__gs = ('g' << 8) + 's'; 134 m_state.SetError(e_regSetGPR, Read, 0); 135#else 136 mach_msg_type_number_t count = e_regSetWordSizeGPR; 137 m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count)); 138 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x" 139 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx" 140 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx" 141 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx" 142 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx" 143 "\n\trip = %16.16llx" 144 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx", 145 m_thread->ThreadID(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT, 146 m_state.GetError(e_regSetGPR, Read), 147 m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx, 148 m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi, 149 m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8, 150 m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11, 151 m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14, 152 m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags, 153 m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs); 154 155 // DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x" 156 // "\n\trax = %16.16llx" 157 // "\n\trbx = %16.16llx" 158 // "\n\trcx = %16.16llx" 159 // "\n\trdx = %16.16llx" 160 // "\n\trdi = %16.16llx" 161 // "\n\trsi = %16.16llx" 162 // "\n\trbp = %16.16llx" 163 // "\n\trsp = %16.16llx" 164 // "\n\t r8 = %16.16llx" 165 // "\n\t r9 = %16.16llx" 166 // "\n\tr10 = %16.16llx" 167 // "\n\tr11 = %16.16llx" 168 // "\n\tr12 = %16.16llx" 169 // "\n\tr13 = %16.16llx" 170 // "\n\tr14 = %16.16llx" 171 // "\n\tr15 = %16.16llx" 172 // "\n\trip = %16.16llx" 173 // "\n\tflg = %16.16llx" 174 // "\n\t cs = %16.16llx" 175 // "\n\t fs = %16.16llx" 176 // "\n\t gs = %16.16llx", 177 // m_thread->ThreadID(), 178 // x86_THREAD_STATE64, 179 // x86_THREAD_STATE64_COUNT, 180 // m_state.GetError(e_regSetGPR, Read), 181 // m_state.context.gpr.__rax, 182 // m_state.context.gpr.__rbx, 183 // m_state.context.gpr.__rcx, 184 // m_state.context.gpr.__rdx, 185 // m_state.context.gpr.__rdi, 186 // m_state.context.gpr.__rsi, 187 // m_state.context.gpr.__rbp, 188 // m_state.context.gpr.__rsp, 189 // m_state.context.gpr.__r8, 190 // m_state.context.gpr.__r9, 191 // m_state.context.gpr.__r10, 192 // m_state.context.gpr.__r11, 193 // m_state.context.gpr.__r12, 194 // m_state.context.gpr.__r13, 195 // m_state.context.gpr.__r14, 196 // m_state.context.gpr.__r15, 197 // m_state.context.gpr.__rip, 198 // m_state.context.gpr.__rflags, 199 // m_state.context.gpr.__cs, 200 // m_state.context.gpr.__fs, 201 // m_state.context.gpr.__gs); 202#endif 203 } 204 return m_state.GetError(e_regSetGPR, Read); 205} 206 207// Uncomment the value below to verify the values in the debugger. 208//#define DEBUG_FPU_REGS 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 209 210kern_return_t 211DNBArchImplX86_64::GetFPUState(bool force) 212{ 213 if (force || m_state.GetError(e_regSetFPU, Read)) 214 { 215 if (DEBUG_FPU_REGS) { 216 if (CPUHasAVX() || FORCE_AVX_REGS) 217 { 218 m_state.context.fpu.avx.__fpu_reserved[0] = -1; 219 m_state.context.fpu.avx.__fpu_reserved[1] = -1; 220 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234; 221 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678; 222 m_state.context.fpu.avx.__fpu_ftw = 1; 223 m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX; 224 m_state.context.fpu.avx.__fpu_fop = 2; 225 m_state.context.fpu.avx.__fpu_ip = 3; 226 m_state.context.fpu.avx.__fpu_cs = 4; 227 m_state.context.fpu.avx.__fpu_rsrv2 = 5; 228 m_state.context.fpu.avx.__fpu_dp = 6; 229 m_state.context.fpu.avx.__fpu_ds = 7; 230 m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX; 231 m_state.context.fpu.avx.__fpu_mxcsr = 8; 232 m_state.context.fpu.avx.__fpu_mxcsrmask = 9; 233 int i; 234 for (i=0; i<16; ++i) 235 { 236 if (i<10) 237 { 238 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 239 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 240 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 241 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 242 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 243 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 244 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 245 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 246 } 247 else 248 { 249 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 250 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 251 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 252 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 253 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 254 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 255 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 256 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 257 } 258 259 m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0'; 260 m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1'; 261 m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2'; 262 m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3'; 263 m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4'; 264 m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5'; 265 m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6'; 266 m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7'; 267 m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8'; 268 m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9'; 269 m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A'; 270 m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B'; 271 m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C'; 272 m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D'; 273 m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E'; 274 m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F'; 275 276 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0'; 277 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1'; 278 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2'; 279 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3'; 280 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4'; 281 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5'; 282 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6'; 283 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7'; 284 m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8'; 285 m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9'; 286 m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A'; 287 m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B'; 288 m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C'; 289 m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D'; 290 m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E'; 291 m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F'; 292 } 293 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 294 m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN; 295 m_state.context.fpu.avx.__fpu_reserved1 = -1; 296 for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i) 297 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 298 m_state.SetError(e_regSetFPU, Read, 0); 299 } 300 else 301 { 302 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 303 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 304 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 305 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 306 m_state.context.fpu.no_avx.__fpu_ftw = 1; 307 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 308 m_state.context.fpu.no_avx.__fpu_fop = 2; 309 m_state.context.fpu.no_avx.__fpu_ip = 3; 310 m_state.context.fpu.no_avx.__fpu_cs = 4; 311 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 312 m_state.context.fpu.no_avx.__fpu_dp = 6; 313 m_state.context.fpu.no_avx.__fpu_ds = 7; 314 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 315 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 316 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 317 int i; 318 for (i=0; i<16; ++i) 319 { 320 if (i<10) 321 { 322 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 323 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 324 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 325 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 326 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 327 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 328 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 329 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 330 } 331 else 332 { 333 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 334 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 335 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 336 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 337 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 338 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 339 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 340 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 341 } 342 343 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 344 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 345 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 346 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 347 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 348 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 349 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 350 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 351 m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8'; 352 m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9'; 353 m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A'; 354 m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B'; 355 m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C'; 356 m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D'; 357 m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E'; 358 m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F'; 359 } 360 for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i) 361 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 362 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 363 m_state.SetError(e_regSetFPU, Read, 0); 364 } 365 } 366 else 367 { 368 if (CPUHasAVX() || FORCE_AVX_REGS) 369 { 370 mach_msg_type_number_t count = e_regSetWordSizeAVX; 371 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count)); 372 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in) carp) => 0x%8.8x", 373 m_thread->ThreadID(), __x86_64_AVX_STATE, (uint32_t)count, 374 e_regSetWordSizeAVX, m_state.GetError(e_regSetFPU, Read)); 375 } 376 else 377 { 378 mach_msg_type_number_t count = e_regSetWordSizeFPU; 379 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count)); 380 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x", 381 m_thread->ThreadID(), __x86_64_FLOAT_STATE, (uint32_t)count, 382 e_regSetWordSizeFPU, m_state.GetError(e_regSetFPU, Read)); 383 } 384 } 385 } 386 return m_state.GetError(e_regSetFPU, Read); 387} 388 389kern_return_t 390DNBArchImplX86_64::GetEXCState(bool force) 391{ 392 if (force || m_state.GetError(e_regSetEXC, Read)) 393 { 394 mach_msg_type_number_t count = e_regSetWordSizeEXC; 395 m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count)); 396 } 397 return m_state.GetError(e_regSetEXC, Read); 398} 399 400kern_return_t 401DNBArchImplX86_64::SetGPRState() 402{ 403 kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID()); 404 DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount()); 405 406 m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR)); 407 DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x" 408 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx" 409 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx" 410 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx" 411 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx" 412 "\n\trip = %16.16llx" 413 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx", 414 m_thread->ThreadID(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR, 415 m_state.GetError(e_regSetGPR, Write), 416 m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx, 417 m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi, 418 m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8, 419 m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11, 420 m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14, 421 m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags, 422 m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs); 423 return m_state.GetError(e_regSetGPR, Write); 424} 425 426kern_return_t 427DNBArchImplX86_64::SetFPUState() 428{ 429 if (DEBUG_FPU_REGS) 430 { 431 m_state.SetError(e_regSetFPU, Write, 0); 432 return m_state.GetError(e_regSetFPU, Write); 433 } 434 else 435 { 436 if (CPUHasAVX() || FORCE_AVX_REGS) 437 { 438 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX)); 439 return m_state.GetError(e_regSetFPU, Write); 440 } 441 else 442 { 443 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU)); 444 return m_state.GetError(e_regSetFPU, Write); 445 } 446 } 447} 448 449kern_return_t 450DNBArchImplX86_64::SetEXCState() 451{ 452 m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC)); 453 return m_state.GetError(e_regSetEXC, Write); 454} 455 456kern_return_t 457DNBArchImplX86_64::GetDBGState(bool force) 458{ 459 if (force || m_state.GetError(e_regSetDBG, Read)) 460 { 461 mach_msg_type_number_t count = e_regSetWordSizeDBG; 462 m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count)); 463 } 464 return m_state.GetError(e_regSetDBG, Read); 465} 466 467kern_return_t 468DNBArchImplX86_64::SetDBGState() 469{ 470 m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG)); 471 return m_state.GetError(e_regSetDBG, Write); 472} 473 474void 475DNBArchImplX86_64::ThreadWillResume() 476{ 477 // Do we need to step this thread? If so, let the mach thread tell us so. 478 if (m_thread->IsStepping()) 479 { 480 // This is the primary thread, let the arch do anything it needs 481 EnableHardwareSingleStep(true); 482 } 483 484 // Reset the debug status register, if necessary, before we resume. 485 kern_return_t kret = GetDBGState(false); 486 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret); 487 if (kret != KERN_SUCCESS) 488 return; 489 490 DBG &debug_state = m_state.context.dbg; 491 bool need_reset = false; 492 uint32_t i, num = NumSupportedHardwareWatchpoints(); 493 for (i = 0; i < num; ++i) 494 if (IsWatchpointHit(debug_state, i)) 495 need_reset = true; 496 497 if (need_reset) 498 { 499 ClearWatchpointHits(debug_state); 500 kret = SetDBGState(); 501 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret); 502 } 503} 504 505bool 506DNBArchImplX86_64::ThreadDidStop() 507{ 508 bool success = true; 509 510 m_state.InvalidateAllRegisterStates(); 511 512 // Are we stepping a single instruction? 513 if (GetGPRState(true) == KERN_SUCCESS) 514 { 515 // We are single stepping, was this the primary thread? 516 if (m_thread->IsStepping()) 517 { 518 // This was the primary thread, we need to clear the trace 519 // bit if so. 520 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 521 } 522 else 523 { 524 // The MachThread will automatically restore the suspend count 525 // in ThreadDidStop(), so we don't need to do anything here if 526 // we weren't the primary thread the last time 527 } 528 } 529 return success; 530} 531 532bool 533DNBArchImplX86_64::NotifyException(MachException::Data& exc) 534{ 535 switch (exc.exc_type) 536 { 537 case EXC_BAD_ACCESS: 538 break; 539 case EXC_BAD_INSTRUCTION: 540 break; 541 case EXC_ARITHMETIC: 542 break; 543 case EXC_EMULATION: 544 break; 545 case EXC_SOFTWARE: 546 break; 547 case EXC_BREAKPOINT: 548 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) 549 { 550 // exc_code = EXC_I386_BPT 551 // 552 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 553 if (pc != INVALID_NUB_ADDRESS && pc > 0) 554 { 555 pc -= 1; 556 // Check for a breakpoint at one byte prior to the current PC value 557 // since the PC will be just past the trap. 558 559 nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc); 560 if (NUB_BREAK_ID_IS_VALID(breakID)) 561 { 562 // Backup the PC for i386 since the trap was taken and the PC 563 // is at the address following the single byte trap instruction. 564 if (m_state.context.gpr.__rip > 0) 565 { 566 m_state.context.gpr.__rip = pc; 567 // Write the new PC back out 568 SetGPRState (); 569 } 570 } 571 return true; 572 } 573 } 574 else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1) 575 { 576 // exc_code = EXC_I386_SGL 577 // 578 // Check whether this corresponds to a watchpoint hit event. 579 // If yes, set the exc_sub_code to the data break address. 580 nub_addr_t addr = 0; 581 uint32_t hw_index = GetHardwareWatchpointHit(addr); 582 if (hw_index != INVALID_NUB_HW_INDEX) 583 { 584 exc.exc_data[1] = addr; 585 // Piggyback the hw_index in the exc.data. 586 exc.exc_data.push_back(hw_index); 587 } 588 589 return true; 590 } 591 break; 592 case EXC_SYSCALL: 593 break; 594 case EXC_MACH_SYSCALL: 595 break; 596 case EXC_RPC_ALERT: 597 break; 598 } 599 return false; 600} 601 602uint32_t 603DNBArchImplX86_64::NumSupportedHardwareWatchpoints() 604{ 605 // Available debug address registers: dr0, dr1, dr2, dr3. 606 return 4; 607} 608 609static uint32_t 610size_and_rw_bits(nub_size_t size, bool read, bool write) 611{ 612 uint32_t rw; 613 if (read) { 614 rw = 0x3; // READ or READ/WRITE 615 } else if (write) { 616 rw = 0x1; // WRITE 617 } else { 618 assert(0 && "read and write cannot both be false"); 619 } 620 621 switch (size) { 622 case 1: 623 return rw; 624 case 2: 625 return (0x1 << 2) | rw; 626 case 4: 627 return (0x3 << 2) | rw; 628 case 8: 629 return (0x2 << 2) | rw; 630 default: 631 assert(0 && "invalid size, must be one of 1, 2, 4, or 8"); 632 } 633} 634void 635DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write) 636{ 637 // Set both dr7 (debug control register) and dri (debug address register). 638 639 // dr7{7-0} encodes the local/gloabl enable bits: 640 // global enable --. .-- local enable 641 // | | 642 // v v 643 // dr0 -> bits{1-0} 644 // dr1 -> bits{3-2} 645 // dr2 -> bits{5-4} 646 // dr3 -> bits{7-6} 647 // 648 // dr7{31-16} encodes the rw/len bits: 649 // b_x+3, b_x+2, b_x+1, b_x 650 // where bits{x+1, x} => rw 651 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused) 652 // and bits{x+3, x+2} => len 653 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 654 // 655 // dr0 -> bits{19-16} 656 // dr1 -> bits{23-20} 657 // dr2 -> bits{27-24} 658 // dr3 -> bits{31-28} 659 debug_state.__dr7 |= (1 << (2*hw_index) | 660 size_and_rw_bits(size, read, write) << (16+4*hw_index)); 661 switch (hw_index) { 662 case 0: 663 debug_state.__dr0 = addr; break; 664 case 1: 665 debug_state.__dr1 = addr; break; 666 case 2: 667 debug_state.__dr2 = addr; break; 668 case 3: 669 debug_state.__dr3 = addr; break; 670 default: 671 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 672 } 673 return; 674} 675 676void 677DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index) 678{ 679 debug_state.__dr7 &= ~(3 << (2*hw_index)); 680 switch (hw_index) { 681 case 0: 682 debug_state.__dr0 = 0; break; 683 case 1: 684 debug_state.__dr1 = 0; break; 685 case 2: 686 debug_state.__dr2 = 0; break; 687 case 3: 688 debug_state.__dr3 = 0; break; 689 default: 690 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 691 } 692 return; 693} 694 695bool 696DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index) 697{ 698 // Check dr7 (debug control register) for local/global enable bits: 699 // global enable --. .-- local enable 700 // | | 701 // v v 702 // dr0 -> bits{1-0} 703 // dr1 -> bits{3-2} 704 // dr2 -> bits{5-4} 705 // dr3 -> bits{7-6} 706 return (debug_state.__dr7 & (3 << (2*hw_index))) == 0; 707} 708 709// Resets local copy of debug status register to wait for the next debug excpetion. 710void 711DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state) 712{ 713 // See also IsWatchpointHit(). 714 debug_state.__dr6 = 0; 715 return; 716} 717 718bool 719DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index) 720{ 721 // Check dr6 (debug status register) whether a watchpoint hits: 722 // is watchpoint hit? 723 // | 724 // v 725 // dr0 -> bits{0} 726 // dr1 -> bits{1} 727 // dr2 -> bits{2} 728 // dr3 -> bits{3} 729 return (debug_state.__dr6 & (1 << hw_index)); 730} 731 732nub_addr_t 733DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state, uint32_t hw_index) 734{ 735 switch (hw_index) { 736 case 0: 737 return debug_state.__dr0; 738 case 1: 739 return debug_state.__dr1; 740 case 2: 741 return debug_state.__dr2; 742 case 3: 743 return debug_state.__dr3; 744 default: 745 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 746 } 747} 748 749uint32_t 750DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write) 751{ 752 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = 0x%llx, size = %zu, read = %u, write = %u)", (uint64_t)addr, size, read, write); 753 754 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 755 756 // Can only watch 1, 2, 4, or 8 bytes. 757 if (!(size == 1 || size == 2 || size == 4 || size == 8)) 758 return INVALID_NUB_HW_INDEX; 759 760 // We must watch for either read or write 761 if (read == false && write == false) 762 return INVALID_NUB_HW_INDEX; 763 764 // Read the debug state 765 kern_return_t kret = GetDBGState(false); 766 767 if (kret == KERN_SUCCESS) 768 { 769 // Check to make sure we have the needed hardware support 770 uint32_t i = 0; 771 772 DBG &debug_state = m_state.context.dbg; 773 for (i = 0; i < num_hw_watchpoints; ++i) 774 { 775 if (IsWatchpointVacant(debug_state, i)) 776 break; 777 } 778 779 // See if we found an available hw breakpoint slot above 780 if (i < num_hw_watchpoints) 781 { 782 // Modify our local copy of the debug state, first. 783 SetWatchpoint(debug_state, i, addr, size, read, write); 784 // Now set the watch point in the inferior. 785 kret = SetDBGState(); 786 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret); 787 788 if (kret == KERN_SUCCESS) 789 return i; 790 } 791 else 792 { 793 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints); 794 } 795 } 796 return INVALID_NUB_HW_INDEX; 797} 798 799bool 800DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index) 801{ 802 kern_return_t kret = GetDBGState(false); 803 804 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 805 if (kret == KERN_SUCCESS) 806 { 807 DBG &debug_state = m_state.context.dbg; 808 if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index)) 809 { 810 // Modify our local copy of the debug state, first. 811 ClearWatchpoint(debug_state, hw_index); 812 // Now disable the watch point in the inferior. 813 kret = SetDBGState(); 814 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )", 815 hw_index); 816 817 if (kret == KERN_SUCCESS) 818 return true; 819 } 820 } 821 return false; 822} 823 824DNBArchImplX86_64::DBG DNBArchImplX86_64::Global_Debug_State = {0,0,0,0,0,0,0,0}; 825bool DNBArchImplX86_64::Valid_Global_Debug_State = false; 826 827// Use this callback from MachThread, which in turn was called from MachThreadList, to update 828// the global view of the hardware watchpoint state, so that when new thread comes along, they 829// get to inherit the existing hardware watchpoint state. 830void 831DNBArchImplX86_64::HardwareWatchpointStateChanged () 832{ 833 Global_Debug_State = m_state.context.dbg; 834 Valid_Global_Debug_State = true; 835} 836 837// Iterate through the debug status register; return the index of the first hit. 838uint32_t 839DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr) 840{ 841 // Read the debug state 842 kern_return_t kret = GetDBGState(true); 843 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret); 844 if (kret == KERN_SUCCESS) 845 { 846 DBG &debug_state = m_state.context.dbg; 847 uint32_t i, num = NumSupportedHardwareWatchpoints(); 848 for (i = 0; i < num; ++i) 849 { 850 if (IsWatchpointHit(debug_state, i)) 851 { 852 addr = GetWatchAddress(debug_state, i); 853 DNBLogThreadedIf(LOG_WATCHPOINTS, 854 "DNBArchImplX86_64::GetHardwareWatchpointHit() found => %u (addr = 0x%llx).", 855 i, 856 (uint64_t)addr); 857 return i; 858 } 859 } 860 } 861 return INVALID_NUB_HW_INDEX; 862} 863 864// Set the single step bit in the processor status register. 865kern_return_t 866DNBArchImplX86_64::EnableHardwareSingleStep (bool enable) 867{ 868 if (GetGPRState(false) == KERN_SUCCESS) 869 { 870 const uint32_t trace_bit = 0x100u; 871 if (enable) 872 m_state.context.gpr.__rflags |= trace_bit; 873 else 874 m_state.context.gpr.__rflags &= ~trace_bit; 875 return SetGPRState(); 876 } 877 return m_state.GetError(e_regSetGPR, Read); 878} 879 880 881//---------------------------------------------------------------------- 882// Register information defintions 883//---------------------------------------------------------------------- 884 885enum 886{ 887 gpr_rax = 0, 888 gpr_rbx, 889 gpr_rcx, 890 gpr_rdx, 891 gpr_rdi, 892 gpr_rsi, 893 gpr_rbp, 894 gpr_rsp, 895 gpr_r8, 896 gpr_r9, 897 gpr_r10, 898 gpr_r11, 899 gpr_r12, 900 gpr_r13, 901 gpr_r14, 902 gpr_r15, 903 gpr_rip, 904 gpr_rflags, 905 gpr_cs, 906 gpr_fs, 907 gpr_gs, 908 k_num_gpr_regs 909}; 910 911enum { 912 fpu_fcw, 913 fpu_fsw, 914 fpu_ftw, 915 fpu_fop, 916 fpu_ip, 917 fpu_cs, 918 fpu_dp, 919 fpu_ds, 920 fpu_mxcsr, 921 fpu_mxcsrmask, 922 fpu_stmm0, 923 fpu_stmm1, 924 fpu_stmm2, 925 fpu_stmm3, 926 fpu_stmm4, 927 fpu_stmm5, 928 fpu_stmm6, 929 fpu_stmm7, 930 fpu_xmm0, 931 fpu_xmm1, 932 fpu_xmm2, 933 fpu_xmm3, 934 fpu_xmm4, 935 fpu_xmm5, 936 fpu_xmm6, 937 fpu_xmm7, 938 fpu_xmm8, 939 fpu_xmm9, 940 fpu_xmm10, 941 fpu_xmm11, 942 fpu_xmm12, 943 fpu_xmm13, 944 fpu_xmm14, 945 fpu_xmm15, 946 fpu_ymm0, 947 fpu_ymm1, 948 fpu_ymm2, 949 fpu_ymm3, 950 fpu_ymm4, 951 fpu_ymm5, 952 fpu_ymm6, 953 fpu_ymm7, 954 fpu_ymm8, 955 fpu_ymm9, 956 fpu_ymm10, 957 fpu_ymm11, 958 fpu_ymm12, 959 fpu_ymm13, 960 fpu_ymm14, 961 fpu_ymm15, 962 k_num_fpu_regs, 963 964 // Aliases 965 fpu_fctrl = fpu_fcw, 966 fpu_fstat = fpu_fsw, 967 fpu_ftag = fpu_ftw, 968 fpu_fiseg = fpu_cs, 969 fpu_fioff = fpu_ip, 970 fpu_foseg = fpu_ds, 971 fpu_fooff = fpu_dp 972}; 973 974enum { 975 exc_trapno, 976 exc_err, 977 exc_faultvaddr, 978 k_num_exc_regs, 979}; 980 981 982enum gcc_dwarf_regnums 983{ 984 gcc_dwarf_rax = 0, 985 gcc_dwarf_rdx = 1, 986 gcc_dwarf_rcx = 2, 987 gcc_dwarf_rbx = 3, 988 gcc_dwarf_rsi = 4, 989 gcc_dwarf_rdi = 5, 990 gcc_dwarf_rbp = 6, 991 gcc_dwarf_rsp = 7, 992 gcc_dwarf_r8, 993 gcc_dwarf_r9, 994 gcc_dwarf_r10, 995 gcc_dwarf_r11, 996 gcc_dwarf_r12, 997 gcc_dwarf_r13, 998 gcc_dwarf_r14, 999 gcc_dwarf_r15, 1000 gcc_dwarf_rip, 1001 gcc_dwarf_xmm0, 1002 gcc_dwarf_xmm1, 1003 gcc_dwarf_xmm2, 1004 gcc_dwarf_xmm3, 1005 gcc_dwarf_xmm4, 1006 gcc_dwarf_xmm5, 1007 gcc_dwarf_xmm6, 1008 gcc_dwarf_xmm7, 1009 gcc_dwarf_xmm8, 1010 gcc_dwarf_xmm9, 1011 gcc_dwarf_xmm10, 1012 gcc_dwarf_xmm11, 1013 gcc_dwarf_xmm12, 1014 gcc_dwarf_xmm13, 1015 gcc_dwarf_xmm14, 1016 gcc_dwarf_xmm15, 1017 gcc_dwarf_stmm0, 1018 gcc_dwarf_stmm1, 1019 gcc_dwarf_stmm2, 1020 gcc_dwarf_stmm3, 1021 gcc_dwarf_stmm4, 1022 gcc_dwarf_stmm5, 1023 gcc_dwarf_stmm6, 1024 gcc_dwarf_stmm7, 1025 gcc_dwarf_ymm0 = gcc_dwarf_xmm0, 1026 gcc_dwarf_ymm1 = gcc_dwarf_xmm1, 1027 gcc_dwarf_ymm2 = gcc_dwarf_xmm2, 1028 gcc_dwarf_ymm3 = gcc_dwarf_xmm3, 1029 gcc_dwarf_ymm4 = gcc_dwarf_xmm4, 1030 gcc_dwarf_ymm5 = gcc_dwarf_xmm5, 1031 gcc_dwarf_ymm6 = gcc_dwarf_xmm6, 1032 gcc_dwarf_ymm7 = gcc_dwarf_xmm7, 1033 gcc_dwarf_ymm8 = gcc_dwarf_xmm8, 1034 gcc_dwarf_ymm9 = gcc_dwarf_xmm9, 1035 gcc_dwarf_ymm10 = gcc_dwarf_xmm10, 1036 gcc_dwarf_ymm11 = gcc_dwarf_xmm11, 1037 gcc_dwarf_ymm12 = gcc_dwarf_xmm12, 1038 gcc_dwarf_ymm13 = gcc_dwarf_xmm13, 1039 gcc_dwarf_ymm14 = gcc_dwarf_xmm14, 1040 gcc_dwarf_ymm15 = gcc_dwarf_xmm15 1041}; 1042 1043enum gdb_regnums 1044{ 1045 gdb_rax = 0, 1046 gdb_rbx = 1, 1047 gdb_rcx = 2, 1048 gdb_rdx = 3, 1049 gdb_rsi = 4, 1050 gdb_rdi = 5, 1051 gdb_rbp = 6, 1052 gdb_rsp = 7, 1053 gdb_r8 = 8, 1054 gdb_r9 = 9, 1055 gdb_r10 = 10, 1056 gdb_r11 = 11, 1057 gdb_r12 = 12, 1058 gdb_r13 = 13, 1059 gdb_r14 = 14, 1060 gdb_r15 = 15, 1061 gdb_rip = 16, 1062 gdb_rflags = 17, 1063 gdb_cs = 18, 1064 gdb_ss = 19, 1065 gdb_ds = 20, 1066 gdb_es = 21, 1067 gdb_fs = 22, 1068 gdb_gs = 23, 1069 gdb_stmm0 = 24, 1070 gdb_stmm1 = 25, 1071 gdb_stmm2 = 26, 1072 gdb_stmm3 = 27, 1073 gdb_stmm4 = 28, 1074 gdb_stmm5 = 29, 1075 gdb_stmm6 = 30, 1076 gdb_stmm7 = 31, 1077 gdb_fctrl = 32, gdb_fcw = gdb_fctrl, 1078 gdb_fstat = 33, gdb_fsw = gdb_fstat, 1079 gdb_ftag = 34, gdb_ftw = gdb_ftag, 1080 gdb_fiseg = 35, gdb_fpu_cs = gdb_fiseg, 1081 gdb_fioff = 36, gdb_ip = gdb_fioff, 1082 gdb_foseg = 37, gdb_fpu_ds = gdb_foseg, 1083 gdb_fooff = 38, gdb_dp = gdb_fooff, 1084 gdb_fop = 39, 1085 gdb_xmm0 = 40, 1086 gdb_xmm1 = 41, 1087 gdb_xmm2 = 42, 1088 gdb_xmm3 = 43, 1089 gdb_xmm4 = 44, 1090 gdb_xmm5 = 45, 1091 gdb_xmm6 = 46, 1092 gdb_xmm7 = 47, 1093 gdb_xmm8 = 48, 1094 gdb_xmm9 = 49, 1095 gdb_xmm10 = 50, 1096 gdb_xmm11 = 51, 1097 gdb_xmm12 = 52, 1098 gdb_xmm13 = 53, 1099 gdb_xmm14 = 54, 1100 gdb_xmm15 = 55, 1101 gdb_mxcsr = 56, 1102 gdb_ymm0 = gdb_xmm0, 1103 gdb_ymm1 = gdb_xmm1, 1104 gdb_ymm2 = gdb_xmm2, 1105 gdb_ymm3 = gdb_xmm3, 1106 gdb_ymm4 = gdb_xmm4, 1107 gdb_ymm5 = gdb_xmm5, 1108 gdb_ymm6 = gdb_xmm6, 1109 gdb_ymm7 = gdb_xmm7, 1110 gdb_ymm8 = gdb_xmm8, 1111 gdb_ymm9 = gdb_xmm9, 1112 gdb_ymm10 = gdb_xmm10, 1113 gdb_ymm11 = gdb_xmm11, 1114 gdb_ymm12 = gdb_xmm12, 1115 gdb_ymm13 = gdb_xmm13, 1116 gdb_ymm14 = gdb_xmm14, 1117 gdb_ymm15 = gdb_xmm15 1118}; 1119 1120#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg)) 1121#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx)) 1122#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx)) 1123#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg) + offsetof (DNBArchImplX86_64::Context, exc)) 1124 1125// This does not accurately identify the location of ymm0...7 in 1126// Context.fpu.avx. That is because there is a bunch of padding 1127// in Context.fpu.avx that we don't need. Offset macros lay out 1128// the register state that Debugserver transmits to the debugger 1129// -- not to interpret the thread_get_state info. 1130#define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n)) 1131 1132#define GPR_SIZE(reg) (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg)) 1133#define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg)) 1134#define FPU_SIZE_MMST(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 1135#define FPU_SIZE_XMM(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 1136#define FPU_SIZE_YMM(reg) (32) 1137#define EXC_SIZE(reg) (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg)) 1138 1139// These macros will auto define the register name, alt name, register size, 1140// register offset, encoding, format and native register. This ensures that 1141// the register state structures are defined correctly and have the correct 1142// sizes and offsets. 1143#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg } 1144#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg } 1145#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg } 1146#define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, gdb_##reg } 1147 1148// General purpose registers for 64 bit 1149const DNBRegisterInfo 1150DNBArchImplX86_64::g_gpr_registers[] = 1151{ 1152 DEFINE_GPR (rax), 1153 DEFINE_GPR (rbx), 1154 DEFINE_GPR_ALT (rcx , "arg4", GENERIC_REGNUM_ARG4), 1155 DEFINE_GPR_ALT (rdx , "arg3", GENERIC_REGNUM_ARG3), 1156 DEFINE_GPR_ALT (rdi , "arg1", GENERIC_REGNUM_ARG1), 1157 DEFINE_GPR_ALT (rsi , "arg2", GENERIC_REGNUM_ARG2), 1158 DEFINE_GPR_ALT (rbp , "fp" , GENERIC_REGNUM_FP), 1159 DEFINE_GPR_ALT (rsp , "sp" , GENERIC_REGNUM_SP), 1160 DEFINE_GPR_ALT (r8 , "arg5", GENERIC_REGNUM_ARG5), 1161 DEFINE_GPR_ALT (r9 , "arg6", GENERIC_REGNUM_ARG6), 1162 DEFINE_GPR (r10), 1163 DEFINE_GPR (r11), 1164 DEFINE_GPR (r12), 1165 DEFINE_GPR (r13), 1166 DEFINE_GPR (r14), 1167 DEFINE_GPR (r15), 1168 DEFINE_GPR_ALT (rip , "pc", GENERIC_REGNUM_PC), 1169 DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS), 1170 DEFINE_GPR_ALT2 (cs, NULL), 1171 DEFINE_GPR_ALT2 (fs, NULL), 1172 DEFINE_GPR_ALT2 (gs, NULL), 1173}; 1174 1175// Floating point registers 64 bit 1176const DNBRegisterInfo 1177DNBArchImplX86_64::g_fpu_registers_no_avx[] = 1178{ 1179 { e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , FPU_OFFSET(fcw) , -1, -1, -1, -1 }, 1180 { e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , FPU_OFFSET(fsw) , -1, -1, -1, -1 }, 1181 { e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , FPU_OFFSET(ftw) , -1, -1, -1, -1 }, 1182 { e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , FPU_OFFSET(fop) , -1, -1, -1, -1 }, 1183 { e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , FPU_OFFSET(ip) , -1, -1, -1, -1 }, 1184 { e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , FPU_OFFSET(cs) , -1, -1, -1, -1 }, 1185 { e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , FPU_OFFSET(dp) , -1, -1, -1, -1 }, 1186 { e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , FPU_OFFSET(ds) , -1, -1, -1, -1 }, 1187 { e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , FPU_OFFSET(mxcsr) , -1, -1, -1, -1 }, 1188 { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1, -1, -1, -1 }, 1189 1190 { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 }, 1191 { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 }, 1192 { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 }, 1193 { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 }, 1194 { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 }, 1195 { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 }, 1196 { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 }, 1197 { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 }, 1198 1199 { e_regSetFPU, fpu_xmm0 , "xmm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0) , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 }, 1200 { e_regSetFPU, fpu_xmm1 , "xmm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1) , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 }, 1201 { e_regSetFPU, fpu_xmm2 , "xmm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2) , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 }, 1202 { e_regSetFPU, fpu_xmm3 , "xmm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3) , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 }, 1203 { e_regSetFPU, fpu_xmm4 , "xmm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4) , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 }, 1204 { e_regSetFPU, fpu_xmm5 , "xmm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5) , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 }, 1205 { e_regSetFPU, fpu_xmm6 , "xmm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6) , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 }, 1206 { e_regSetFPU, fpu_xmm7 , "xmm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7) , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 }, 1207 { e_regSetFPU, fpu_xmm8 , "xmm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8) , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8 }, 1208 { e_regSetFPU, fpu_xmm9 , "xmm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9) , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9 }, 1209 { e_regSetFPU, fpu_xmm10, "xmm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10) , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 }, 1210 { e_regSetFPU, fpu_xmm11, "xmm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11) , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 }, 1211 { e_regSetFPU, fpu_xmm12, "xmm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12) , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 }, 1212 { e_regSetFPU, fpu_xmm13, "xmm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13) , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 }, 1213 { e_regSetFPU, fpu_xmm14, "xmm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14) , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 }, 1214 { e_regSetFPU, fpu_xmm15, "xmm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15) , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 }, 1215}; 1216 1217const DNBRegisterInfo 1218DNBArchImplX86_64::g_fpu_registers_avx[] = 1219{ 1220 { e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , AVX_OFFSET(fcw) , -1, -1, -1, -1 }, 1221 { e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , AVX_OFFSET(fsw) , -1, -1, -1, -1 }, 1222 { e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , AVX_OFFSET(ftw) , -1, -1, -1, -1 }, 1223 { e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , AVX_OFFSET(fop) , -1, -1, -1, -1 }, 1224 { e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , AVX_OFFSET(ip) , -1, -1, -1, -1 }, 1225 { e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , AVX_OFFSET(cs) , -1, -1, -1, -1 }, 1226 { e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , AVX_OFFSET(dp) , -1, -1, -1, -1 }, 1227 { e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , AVX_OFFSET(ds) , -1, -1, -1, -1 }, 1228 { e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , AVX_OFFSET(mxcsr) , -1, -1, -1, -1 }, 1229 { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1, -1, -1, -1 }, 1230 1231 { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 }, 1232 { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 }, 1233 { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 }, 1234 { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 }, 1235 { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 }, 1236 { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 }, 1237 { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 }, 1238 { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 }, 1239 1240 { e_regSetFPU, fpu_xmm0 , "xmm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0) , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 }, 1241 { e_regSetFPU, fpu_xmm1 , "xmm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1) , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 }, 1242 { e_regSetFPU, fpu_xmm2 , "xmm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2) , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 }, 1243 { e_regSetFPU, fpu_xmm3 , "xmm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3) , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 }, 1244 { e_regSetFPU, fpu_xmm4 , "xmm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4) , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 }, 1245 { e_regSetFPU, fpu_xmm5 , "xmm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5) , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 }, 1246 { e_regSetFPU, fpu_xmm6 , "xmm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6) , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 }, 1247 { e_regSetFPU, fpu_xmm7 , "xmm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7) , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 }, 1248 { e_regSetFPU, fpu_xmm8 , "xmm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8) , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8 }, 1249 { e_regSetFPU, fpu_xmm9 , "xmm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9) , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9 }, 1250 { e_regSetFPU, fpu_xmm10, "xmm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10) , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 }, 1251 { e_regSetFPU, fpu_xmm11, "xmm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11) , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 }, 1252 { e_regSetFPU, fpu_xmm12, "xmm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12) , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 }, 1253 { e_regSetFPU, fpu_xmm13, "xmm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13) , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 }, 1254 { e_regSetFPU, fpu_xmm14, "xmm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14) , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 }, 1255 { e_regSetFPU, fpu_xmm15, "xmm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15) , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 }, 1256 1257 { e_regSetFPU, fpu_ymm0 , "ymm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0) , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1, gdb_ymm0 }, 1258 { e_regSetFPU, fpu_ymm1 , "ymm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1) , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1, gdb_ymm1 }, 1259 { e_regSetFPU, fpu_ymm2 , "ymm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2) , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1, gdb_ymm2 }, 1260 { e_regSetFPU, fpu_ymm3 , "ymm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3) , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1, gdb_ymm3 }, 1261 { e_regSetFPU, fpu_ymm4 , "ymm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4) , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1, gdb_ymm4 }, 1262 { e_regSetFPU, fpu_ymm5 , "ymm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5) , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1, gdb_ymm5 }, 1263 { e_regSetFPU, fpu_ymm6 , "ymm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6) , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1, gdb_ymm6 }, 1264 { e_regSetFPU, fpu_ymm7 , "ymm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7) , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1, gdb_ymm7 }, 1265 { e_regSetFPU, fpu_ymm8 , "ymm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8) , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1, gdb_ymm8 }, 1266 { e_regSetFPU, fpu_ymm9 , "ymm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9) , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1, gdb_ymm9 }, 1267 { e_regSetFPU, fpu_ymm10, "ymm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10) , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1, gdb_ymm10 }, 1268 { e_regSetFPU, fpu_ymm11, "ymm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11) , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1, gdb_ymm11 }, 1269 { e_regSetFPU, fpu_ymm12, "ymm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12) , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1, gdb_ymm12 }, 1270 { e_regSetFPU, fpu_ymm13, "ymm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13) , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1, gdb_ymm13 }, 1271 { e_regSetFPU, fpu_ymm14, "ymm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14) , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1, gdb_ymm14 }, 1272 { e_regSetFPU, fpu_ymm15, "ymm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15) , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1, gdb_ymm15 } 1273}; 1274 1275// Exception registers 1276 1277const DNBRegisterInfo 1278DNBArchImplX86_64::g_exc_registers[] = 1279{ 1280 { e_regSetEXC, exc_trapno, "trapno" , NULL, Uint, Hex, EXC_SIZE (trapno) , EXC_OFFSET (trapno) , -1, -1, -1, -1 }, 1281 { e_regSetEXC, exc_err, "err" , NULL, Uint, Hex, EXC_SIZE (err) , EXC_OFFSET (err) , -1, -1, -1, -1 }, 1282 { e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr) , -1, -1, -1, -1 } 1283}; 1284 1285// Number of registers in each register set 1286const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo); 1287const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo); 1288const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo); 1289const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo); 1290const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 1291const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 1292 1293//---------------------------------------------------------------------- 1294// Register set definitions. The first definitions at register set index 1295// of zero is for all registers, followed by other registers sets. The 1296// register information for the all register set need not be filled in. 1297//---------------------------------------------------------------------- 1298const DNBRegisterSetInfo 1299DNBArchImplX86_64::g_reg_sets_no_avx[] = 1300{ 1301 { "x86_64 Registers", NULL, k_num_all_registers_no_avx }, 1302 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 1303 { "Floating Point Registers", g_fpu_registers_no_avx, k_num_fpu_registers_no_avx }, 1304 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 1305}; 1306 1307const DNBRegisterSetInfo 1308DNBArchImplX86_64::g_reg_sets_avx[] = 1309{ 1310 { "x86_64 Registers", NULL, k_num_all_registers_avx }, 1311 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 1312 { "Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx }, 1313 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 1314}; 1315 1316// Total number of register sets for this architecture 1317const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo); 1318 1319 1320DNBArchProtocol * 1321DNBArchImplX86_64::Create (MachThread *thread) 1322{ 1323 DNBArchImplX86_64 *obj = new DNBArchImplX86_64 (thread); 1324 1325 // When new thread comes along, it tries to inherit from the global debug state, if it is valid. 1326 if (Valid_Global_Debug_State) 1327 { 1328 obj->m_state.context.dbg = Global_Debug_State; 1329 kern_return_t kret = obj->SetDBGState(); 1330 DNBLogThreadedIf(LOG_WATCHPOINTS, 1331 "DNBArchImplX86_64::Create() Inherit and SetDBGState() => 0x%8.8x.", kret); 1332 } 1333 return obj; 1334} 1335 1336const uint8_t * const 1337DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size) 1338{ 1339 static const uint8_t g_breakpoint_opcode[] = { 0xCC }; 1340 if (byte_size == 1) 1341 return g_breakpoint_opcode; 1342 return NULL; 1343} 1344 1345const DNBRegisterSetInfo * 1346DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets) 1347{ 1348 *num_reg_sets = k_num_register_sets; 1349 1350 if (CPUHasAVX() || FORCE_AVX_REGS) 1351 return g_reg_sets_avx; 1352 else 1353 return g_reg_sets_no_avx; 1354} 1355 1356void 1357DNBArchImplX86_64::Initialize() 1358{ 1359 DNBArchPluginInfo arch_plugin_info = 1360 { 1361 CPU_TYPE_X86_64, 1362 DNBArchImplX86_64::Create, 1363 DNBArchImplX86_64::GetRegisterSetInfo, 1364 DNBArchImplX86_64::SoftwareBreakpointOpcode 1365 }; 1366 1367 // Register this arch plug-in with the main protocol class 1368 DNBArchProtocol::RegisterArchPlugin (arch_plugin_info); 1369} 1370 1371bool 1372DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value) 1373{ 1374 if (set == REGISTER_SET_GENERIC) 1375 { 1376 switch (reg) 1377 { 1378 case GENERIC_REGNUM_PC: // Program Counter 1379 set = e_regSetGPR; 1380 reg = gpr_rip; 1381 break; 1382 1383 case GENERIC_REGNUM_SP: // Stack Pointer 1384 set = e_regSetGPR; 1385 reg = gpr_rsp; 1386 break; 1387 1388 case GENERIC_REGNUM_FP: // Frame Pointer 1389 set = e_regSetGPR; 1390 reg = gpr_rbp; 1391 break; 1392 1393 case GENERIC_REGNUM_FLAGS: // Processor flags register 1394 set = e_regSetGPR; 1395 reg = gpr_rflags; 1396 break; 1397 1398 case GENERIC_REGNUM_RA: // Return Address 1399 default: 1400 return false; 1401 } 1402 } 1403 1404 if (GetRegisterState(set, false) != KERN_SUCCESS) 1405 return false; 1406 1407 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1408 if (regInfo) 1409 { 1410 value->info = *regInfo; 1411 switch (set) 1412 { 1413 case e_regSetGPR: 1414 if (reg < k_num_gpr_registers) 1415 { 1416 value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg]; 1417 return true; 1418 } 1419 break; 1420 1421 case e_regSetFPU: 1422 if (CPUHasAVX() || FORCE_AVX_REGS) 1423 { 1424 switch (reg) 1425 { 1426 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); return true; 1427 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); return true; 1428 case fpu_ftw: value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; return true; 1429 case fpu_fop: value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; return true; 1430 case fpu_ip: value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; return true; 1431 case fpu_cs: value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; return true; 1432 case fpu_dp: value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; return true; 1433 case fpu_ds: value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; return true; 1434 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; return true; 1435 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; return true; 1436 1437 case fpu_stmm0: 1438 case fpu_stmm1: 1439 case fpu_stmm2: 1440 case fpu_stmm3: 1441 case fpu_stmm4: 1442 case fpu_stmm5: 1443 case fpu_stmm6: 1444 case fpu_stmm7: 1445 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10); 1446 return true; 1447 1448 case fpu_xmm0: 1449 case fpu_xmm1: 1450 case fpu_xmm2: 1451 case fpu_xmm3: 1452 case fpu_xmm4: 1453 case fpu_xmm5: 1454 case fpu_xmm6: 1455 case fpu_xmm7: 1456 case fpu_xmm8: 1457 case fpu_xmm9: 1458 case fpu_xmm10: 1459 case fpu_xmm11: 1460 case fpu_xmm12: 1461 case fpu_xmm13: 1462 case fpu_xmm14: 1463 case fpu_xmm15: 1464 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16); 1465 return true; 1466 1467 case fpu_ymm0: 1468 case fpu_ymm1: 1469 case fpu_ymm2: 1470 case fpu_ymm3: 1471 case fpu_ymm4: 1472 case fpu_ymm5: 1473 case fpu_ymm6: 1474 case fpu_ymm7: 1475 case fpu_ymm8: 1476 case fpu_ymm9: 1477 case fpu_ymm10: 1478 case fpu_ymm11: 1479 case fpu_ymm12: 1480 case fpu_ymm13: 1481 case fpu_ymm14: 1482 case fpu_ymm15: 1483 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16); 1484 memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16); 1485 return true; 1486 } 1487 } 1488 else 1489 { 1490 switch (reg) 1491 { 1492 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); return true; 1493 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); return true; 1494 case fpu_ftw: value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; return true; 1495 case fpu_fop: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; return true; 1496 case fpu_ip: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; return true; 1497 case fpu_cs: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; return true; 1498 case fpu_dp: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; return true; 1499 case fpu_ds: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; return true; 1500 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; return true; 1501 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; return true; 1502 1503 case fpu_stmm0: 1504 case fpu_stmm1: 1505 case fpu_stmm2: 1506 case fpu_stmm3: 1507 case fpu_stmm4: 1508 case fpu_stmm5: 1509 case fpu_stmm6: 1510 case fpu_stmm7: 1511 memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10); 1512 return true; 1513 1514 case fpu_xmm0: 1515 case fpu_xmm1: 1516 case fpu_xmm2: 1517 case fpu_xmm3: 1518 case fpu_xmm4: 1519 case fpu_xmm5: 1520 case fpu_xmm6: 1521 case fpu_xmm7: 1522 case fpu_xmm8: 1523 case fpu_xmm9: 1524 case fpu_xmm10: 1525 case fpu_xmm11: 1526 case fpu_xmm12: 1527 case fpu_xmm13: 1528 case fpu_xmm14: 1529 case fpu_xmm15: 1530 memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16); 1531 return true; 1532 } 1533 } 1534 break; 1535 1536 case e_regSetEXC: 1537 switch (reg) 1538 { 1539 case exc_trapno: value->value.uint32 = m_state.context.exc.__trapno; return true; 1540 case exc_err: value->value.uint32 = m_state.context.exc.__err; return true; 1541 case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true; 1542 } 1543 break; 1544 } 1545 } 1546 return false; 1547} 1548 1549 1550bool 1551DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value) 1552{ 1553 if (set == REGISTER_SET_GENERIC) 1554 { 1555 switch (reg) 1556 { 1557 case GENERIC_REGNUM_PC: // Program Counter 1558 set = e_regSetGPR; 1559 reg = gpr_rip; 1560 break; 1561 1562 case GENERIC_REGNUM_SP: // Stack Pointer 1563 set = e_regSetGPR; 1564 reg = gpr_rsp; 1565 break; 1566 1567 case GENERIC_REGNUM_FP: // Frame Pointer 1568 set = e_regSetGPR; 1569 reg = gpr_rbp; 1570 break; 1571 1572 case GENERIC_REGNUM_FLAGS: // Processor flags register 1573 set = e_regSetGPR; 1574 reg = gpr_rflags; 1575 break; 1576 1577 case GENERIC_REGNUM_RA: // Return Address 1578 default: 1579 return false; 1580 } 1581 } 1582 1583 if (GetRegisterState(set, false) != KERN_SUCCESS) 1584 return false; 1585 1586 bool success = false; 1587 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1588 if (regInfo) 1589 { 1590 switch (set) 1591 { 1592 case e_regSetGPR: 1593 if (reg < k_num_gpr_registers) 1594 { 1595 ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64; 1596 success = true; 1597 } 1598 break; 1599 1600 case e_regSetFPU: 1601 if (CPUHasAVX() || FORCE_AVX_REGS) 1602 { 1603 switch (reg) 1604 { 1605 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1606 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1607 case fpu_ftw: m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; success = true; break; 1608 case fpu_fop: m_state.context.fpu.avx.__fpu_fop = value->value.uint16; success = true; break; 1609 case fpu_ip: m_state.context.fpu.avx.__fpu_ip = value->value.uint32; success = true; break; 1610 case fpu_cs: m_state.context.fpu.avx.__fpu_cs = value->value.uint16; success = true; break; 1611 case fpu_dp: m_state.context.fpu.avx.__fpu_dp = value->value.uint32; success = true; break; 1612 case fpu_ds: m_state.context.fpu.avx.__fpu_ds = value->value.uint16; success = true; break; 1613 case fpu_mxcsr: m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1614 case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1615 1616 case fpu_stmm0: 1617 case fpu_stmm1: 1618 case fpu_stmm2: 1619 case fpu_stmm3: 1620 case fpu_stmm4: 1621 case fpu_stmm5: 1622 case fpu_stmm6: 1623 case fpu_stmm7: 1624 memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10); 1625 success = true; 1626 break; 1627 1628 case fpu_xmm0: 1629 case fpu_xmm1: 1630 case fpu_xmm2: 1631 case fpu_xmm3: 1632 case fpu_xmm4: 1633 case fpu_xmm5: 1634 case fpu_xmm6: 1635 case fpu_xmm7: 1636 case fpu_xmm8: 1637 case fpu_xmm9: 1638 case fpu_xmm10: 1639 case fpu_xmm11: 1640 case fpu_xmm12: 1641 case fpu_xmm13: 1642 case fpu_xmm14: 1643 case fpu_xmm15: 1644 memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16); 1645 success = true; 1646 break; 1647 1648 case fpu_ymm0: 1649 case fpu_ymm1: 1650 case fpu_ymm2: 1651 case fpu_ymm3: 1652 case fpu_ymm4: 1653 case fpu_ymm5: 1654 case fpu_ymm6: 1655 case fpu_ymm7: 1656 case fpu_ymm8: 1657 case fpu_ymm9: 1658 case fpu_ymm10: 1659 case fpu_ymm11: 1660 case fpu_ymm12: 1661 case fpu_ymm13: 1662 case fpu_ymm14: 1663 case fpu_ymm15: 1664 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16); 1665 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16); 1666 return true; 1667 } 1668 } 1669 else 1670 { 1671 switch (reg) 1672 { 1673 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1674 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1675 case fpu_ftw: m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; success = true; break; 1676 case fpu_fop: m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; success = true; break; 1677 case fpu_ip: m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; success = true; break; 1678 case fpu_cs: m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; success = true; break; 1679 case fpu_dp: m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; success = true; break; 1680 case fpu_ds: m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; success = true; break; 1681 case fpu_mxcsr: m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1682 case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1683 1684 case fpu_stmm0: 1685 case fpu_stmm1: 1686 case fpu_stmm2: 1687 case fpu_stmm3: 1688 case fpu_stmm4: 1689 case fpu_stmm5: 1690 case fpu_stmm6: 1691 case fpu_stmm7: 1692 memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10); 1693 success = true; 1694 break; 1695 1696 case fpu_xmm0: 1697 case fpu_xmm1: 1698 case fpu_xmm2: 1699 case fpu_xmm3: 1700 case fpu_xmm4: 1701 case fpu_xmm5: 1702 case fpu_xmm6: 1703 case fpu_xmm7: 1704 case fpu_xmm8: 1705 case fpu_xmm9: 1706 case fpu_xmm10: 1707 case fpu_xmm11: 1708 case fpu_xmm12: 1709 case fpu_xmm13: 1710 case fpu_xmm14: 1711 case fpu_xmm15: 1712 memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16); 1713 success = true; 1714 break; 1715 } 1716 } 1717 break; 1718 1719 case e_regSetEXC: 1720 switch (reg) 1721 { 1722 case exc_trapno: m_state.context.exc.__trapno = value->value.uint32; success = true; break; 1723 case exc_err: m_state.context.exc.__err = value->value.uint32; success = true; break; 1724 case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break; 1725 } 1726 break; 1727 } 1728 } 1729 1730 if (success) 1731 return SetRegisterState(set) == KERN_SUCCESS; 1732 return false; 1733} 1734 1735 1736nub_size_t 1737DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len) 1738{ 1739 nub_size_t size = sizeof (m_state.context); 1740 1741 if (buf && buf_len) 1742 { 1743 if (size > buf_len) 1744 size = buf_len; 1745 1746 bool force = false; 1747 kern_return_t kret; 1748 if ((kret = GetGPRState(force)) != KERN_SUCCESS) 1749 { 1750 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to read: %u ", buf, buf_len, kret); 1751 size = 0; 1752 } 1753 else 1754 if ((kret = GetFPUState(force)) != KERN_SUCCESS) 1755 { 1756 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: %s regs failed to read: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 1757 size = 0; 1758 } 1759 else 1760 if ((kret = GetEXCState(force)) != KERN_SUCCESS) 1761 { 1762 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: EXC regs failed to read: %u", buf, buf_len, kret); 1763 size = 0; 1764 } 1765 else 1766 { 1767 // Success 1768 ::memcpy (buf, &m_state.context, size); 1769 } 1770 } 1771 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size); 1772 // Return the size of the register context even if NULL was passed in 1773 return size; 1774} 1775 1776nub_size_t 1777DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len) 1778{ 1779 nub_size_t size = sizeof (m_state.context); 1780 if (buf == NULL || buf_len == 0) 1781 size = 0; 1782 1783 if (size) 1784 { 1785 if (size > buf_len) 1786 size = buf_len; 1787 1788 ::memcpy (&m_state.context, buf, size); 1789 kern_return_t kret; 1790 if ((kret = SetGPRState()) != KERN_SUCCESS) 1791 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to write: %u", buf, buf_len, kret); 1792 if ((kret = SetFPUState()) != KERN_SUCCESS) 1793 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: %s regs failed to write: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 1794 if ((kret = SetEXCState()) != KERN_SUCCESS) 1795 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: EXP regs failed to write: %u", buf, buf_len, kret); 1796 } 1797 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size); 1798 return size; 1799} 1800 1801 1802kern_return_t 1803DNBArchImplX86_64::GetRegisterState(int set, bool force) 1804{ 1805 switch (set) 1806 { 1807 case e_regSetALL: return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 1808 case e_regSetGPR: return GetGPRState(force); 1809 case e_regSetFPU: return GetFPUState(force); 1810 case e_regSetEXC: return GetEXCState(force); 1811 default: break; 1812 } 1813 return KERN_INVALID_ARGUMENT; 1814} 1815 1816kern_return_t 1817DNBArchImplX86_64::SetRegisterState(int set) 1818{ 1819 // Make sure we have a valid context to set. 1820 if (RegisterSetStateIsValid(set)) 1821 { 1822 switch (set) 1823 { 1824 case e_regSetALL: return SetGPRState() | SetFPUState() | SetEXCState(); 1825 case e_regSetGPR: return SetGPRState(); 1826 case e_regSetFPU: return SetFPUState(); 1827 case e_regSetEXC: return SetEXCState(); 1828 default: break; 1829 } 1830 } 1831 return KERN_INVALID_ARGUMENT; 1832} 1833 1834bool 1835DNBArchImplX86_64::RegisterSetStateIsValid (int set) const 1836{ 1837 return m_state.RegsAreValid(set); 1838} 1839 1840 1841 1842#endif // #if defined (__i386__) || defined (__x86_64__) 1843