DNBArchImplX86_64.cpp revision 5e47385b4f036f79b2acf6ea58b6fd6a40763443
1//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Created by Greg Clayton on 6/25/07. 11// 12//===----------------------------------------------------------------------===// 13 14#if defined (__i386__) || defined (__x86_64__) 15 16#include <sys/cdefs.h> 17 18#include "MacOSX/x86_64/DNBArchImplX86_64.h" 19#include "DNBLog.h" 20#include "MachThread.h" 21#include "MachProcess.h" 22#include <mach/mach.h> 23#include <stdlib.h> 24 25#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG) 26enum debugState { 27 debugStateUnknown, 28 debugStateOff, 29 debugStateOn 30}; 31 32static debugState sFPUDebugState = debugStateUnknown; 33static debugState sAVXForceState = debugStateUnknown; 34 35static bool DebugFPURegs () 36{ 37 if (sFPUDebugState == debugStateUnknown) 38 { 39 if (getenv("DNB_DEBUG_FPU_REGS")) 40 sFPUDebugState = debugStateOn; 41 else 42 sFPUDebugState = debugStateOff; 43 } 44 45 return (sFPUDebugState == debugStateOn); 46} 47 48static bool ForceAVXRegs () 49{ 50 if (sFPUDebugState == debugStateUnknown) 51 { 52 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 53 sAVXForceState = debugStateOn; 54 else 55 sAVXForceState = debugStateOff; 56 } 57 58 return (sAVXForceState == debugStateOn); 59} 60 61#define DEBUG_FPU_REGS (DebugFPURegs()) 62#define FORCE_AVX_REGS (ForceAVXRegs()) 63#else 64#define DEBUG_FPU_REGS (0) 65#define FORCE_AVX_REGS (0) 66#endif 67 68enum DNBArchImplX86_64::AVXPresence DNBArchImplX86_64::s_has_avx = DNBArchImplX86_64::kAVXUnknown; 69 70uint64_t 71DNBArchImplX86_64::GetPC(uint64_t failValue) 72{ 73 // Get program counter 74 if (GetGPRState(false) == KERN_SUCCESS) 75 return m_state.context.gpr.__rip; 76 return failValue; 77} 78 79kern_return_t 80DNBArchImplX86_64::SetPC(uint64_t value) 81{ 82 // Get program counter 83 kern_return_t err = GetGPRState(false); 84 if (err == KERN_SUCCESS) 85 { 86 m_state.context.gpr.__rip = value; 87 err = SetGPRState(); 88 } 89 return err == KERN_SUCCESS; 90} 91 92uint64_t 93DNBArchImplX86_64::GetSP(uint64_t failValue) 94{ 95 // Get stack pointer 96 if (GetGPRState(false) == KERN_SUCCESS) 97 return m_state.context.gpr.__rsp; 98 return failValue; 99} 100 101// Uncomment the value below to verify the values in the debugger. 102//#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 103 104kern_return_t 105DNBArchImplX86_64::GetGPRState(bool force) 106{ 107 if (force || m_state.GetError(e_regSetGPR, Read)) 108 { 109 kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID()); 110 DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (GetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount()); 111 112#if DEBUG_GPR_VALUES 113 m_state.context.gpr.__rax = ('a' << 8) + 'x'; 114 m_state.context.gpr.__rbx = ('b' << 8) + 'x'; 115 m_state.context.gpr.__rcx = ('c' << 8) + 'x'; 116 m_state.context.gpr.__rdx = ('d' << 8) + 'x'; 117 m_state.context.gpr.__rdi = ('d' << 8) + 'i'; 118 m_state.context.gpr.__rsi = ('s' << 8) + 'i'; 119 m_state.context.gpr.__rbp = ('b' << 8) + 'p'; 120 m_state.context.gpr.__rsp = ('s' << 8) + 'p'; 121 m_state.context.gpr.__r8 = ('r' << 8) + '8'; 122 m_state.context.gpr.__r9 = ('r' << 8) + '9'; 123 m_state.context.gpr.__r10 = ('r' << 8) + 'a'; 124 m_state.context.gpr.__r11 = ('r' << 8) + 'b'; 125 m_state.context.gpr.__r12 = ('r' << 8) + 'c'; 126 m_state.context.gpr.__r13 = ('r' << 8) + 'd'; 127 m_state.context.gpr.__r14 = ('r' << 8) + 'e'; 128 m_state.context.gpr.__r15 = ('r' << 8) + 'f'; 129 m_state.context.gpr.__rip = ('i' << 8) + 'p'; 130 m_state.context.gpr.__rflags = ('f' << 8) + 'l'; 131 m_state.context.gpr.__cs = ('c' << 8) + 's'; 132 m_state.context.gpr.__fs = ('f' << 8) + 's'; 133 m_state.context.gpr.__gs = ('g' << 8) + 's'; 134 m_state.SetError(e_regSetGPR, Read, 0); 135#else 136 mach_msg_type_number_t count = e_regSetWordSizeGPR; 137 m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count)); 138 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x" 139 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx" 140 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx" 141 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx" 142 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx" 143 "\n\trip = %16.16llx" 144 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx", 145 m_thread->ThreadID(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT, 146 m_state.GetError(e_regSetGPR, Read), 147 m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx, 148 m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi, 149 m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8, 150 m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11, 151 m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14, 152 m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags, 153 m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs); 154 155 // DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x" 156 // "\n\trax = %16.16llx" 157 // "\n\trbx = %16.16llx" 158 // "\n\trcx = %16.16llx" 159 // "\n\trdx = %16.16llx" 160 // "\n\trdi = %16.16llx" 161 // "\n\trsi = %16.16llx" 162 // "\n\trbp = %16.16llx" 163 // "\n\trsp = %16.16llx" 164 // "\n\t r8 = %16.16llx" 165 // "\n\t r9 = %16.16llx" 166 // "\n\tr10 = %16.16llx" 167 // "\n\tr11 = %16.16llx" 168 // "\n\tr12 = %16.16llx" 169 // "\n\tr13 = %16.16llx" 170 // "\n\tr14 = %16.16llx" 171 // "\n\tr15 = %16.16llx" 172 // "\n\trip = %16.16llx" 173 // "\n\tflg = %16.16llx" 174 // "\n\t cs = %16.16llx" 175 // "\n\t fs = %16.16llx" 176 // "\n\t gs = %16.16llx", 177 // m_thread->ThreadID(), 178 // x86_THREAD_STATE64, 179 // x86_THREAD_STATE64_COUNT, 180 // m_state.GetError(e_regSetGPR, Read), 181 // m_state.context.gpr.__rax, 182 // m_state.context.gpr.__rbx, 183 // m_state.context.gpr.__rcx, 184 // m_state.context.gpr.__rdx, 185 // m_state.context.gpr.__rdi, 186 // m_state.context.gpr.__rsi, 187 // m_state.context.gpr.__rbp, 188 // m_state.context.gpr.__rsp, 189 // m_state.context.gpr.__r8, 190 // m_state.context.gpr.__r9, 191 // m_state.context.gpr.__r10, 192 // m_state.context.gpr.__r11, 193 // m_state.context.gpr.__r12, 194 // m_state.context.gpr.__r13, 195 // m_state.context.gpr.__r14, 196 // m_state.context.gpr.__r15, 197 // m_state.context.gpr.__rip, 198 // m_state.context.gpr.__rflags, 199 // m_state.context.gpr.__cs, 200 // m_state.context.gpr.__fs, 201 // m_state.context.gpr.__gs); 202#endif 203 } 204 return m_state.GetError(e_regSetGPR, Read); 205} 206 207// Uncomment the value below to verify the values in the debugger. 208//#define DEBUG_FPU_REGS 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 209 210kern_return_t 211DNBArchImplX86_64::GetFPUState(bool force) 212{ 213 if (force || m_state.GetError(e_regSetFPU, Read)) 214 { 215 if (DEBUG_FPU_REGS) { 216 if (HasAVX() || FORCE_AVX_REGS) 217 { 218 m_state.context.fpu.avx.__fpu_reserved[0] = -1; 219 m_state.context.fpu.avx.__fpu_reserved[1] = -1; 220 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234; 221 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678; 222 m_state.context.fpu.avx.__fpu_ftw = 1; 223 m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX; 224 m_state.context.fpu.avx.__fpu_fop = 2; 225 m_state.context.fpu.avx.__fpu_ip = 3; 226 m_state.context.fpu.avx.__fpu_cs = 4; 227 m_state.context.fpu.avx.__fpu_rsrv2 = 5; 228 m_state.context.fpu.avx.__fpu_dp = 6; 229 m_state.context.fpu.avx.__fpu_ds = 7; 230 m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX; 231 m_state.context.fpu.avx.__fpu_mxcsr = 8; 232 m_state.context.fpu.avx.__fpu_mxcsrmask = 9; 233 int i; 234 for (i=0; i<16; ++i) 235 { 236 if (i<10) 237 { 238 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 239 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 240 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 241 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 242 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 243 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 244 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 245 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 246 } 247 else 248 { 249 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 250 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 251 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 252 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 253 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 254 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 255 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 256 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 257 } 258 259 m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0'; 260 m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1'; 261 m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2'; 262 m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3'; 263 m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4'; 264 m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5'; 265 m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6'; 266 m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7'; 267 m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8'; 268 m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9'; 269 m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A'; 270 m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B'; 271 m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C'; 272 m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D'; 273 m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E'; 274 m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F'; 275 276 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0'; 277 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1'; 278 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2'; 279 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3'; 280 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4'; 281 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5'; 282 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6'; 283 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7'; 284 m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8'; 285 m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9'; 286 m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A'; 287 m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B'; 288 m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C'; 289 m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D'; 290 m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E'; 291 m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F'; 292 } 293 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 294 m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN; 295 m_state.context.fpu.avx.__fpu_reserved1 = -1; 296 for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i) 297 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 298 m_state.SetError(e_regSetFPU, Read, 0); 299 } 300 else 301 { 302 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 303 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 304 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 305 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 306 m_state.context.fpu.no_avx.__fpu_ftw = 1; 307 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 308 m_state.context.fpu.no_avx.__fpu_fop = 2; 309 m_state.context.fpu.no_avx.__fpu_ip = 3; 310 m_state.context.fpu.no_avx.__fpu_cs = 4; 311 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 312 m_state.context.fpu.no_avx.__fpu_dp = 6; 313 m_state.context.fpu.no_avx.__fpu_ds = 7; 314 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 315 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 316 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 317 int i; 318 for (i=0; i<16; ++i) 319 { 320 if (i<10) 321 { 322 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 323 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 324 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 325 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 326 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 327 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 328 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 329 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 330 } 331 else 332 { 333 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 334 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 335 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 336 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 337 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 338 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 339 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 340 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 341 } 342 343 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 344 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 345 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 346 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 347 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 348 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 349 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 350 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 351 m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8'; 352 m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9'; 353 m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A'; 354 m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B'; 355 m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C'; 356 m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D'; 357 m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E'; 358 m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F'; 359 } 360 for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i) 361 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 362 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 363 m_state.SetError(e_regSetFPU, Read, 0); 364 } 365 } 366 else 367 { 368 if (HasAVX() || FORCE_AVX_REGS) 369 { 370 mach_msg_type_number_t count = e_regSetWordSizeAVX; 371 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count)); 372 } 373 else 374 { 375 mach_msg_type_number_t count = e_regSetWordSizeFPR; 376 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count)); 377 } 378 } 379 } 380 return m_state.GetError(e_regSetFPU, Read); 381} 382 383kern_return_t 384DNBArchImplX86_64::GetEXCState(bool force) 385{ 386 if (force || m_state.GetError(e_regSetEXC, Read)) 387 { 388 mach_msg_type_number_t count = e_regSetWordSizeEXC; 389 m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count)); 390 } 391 return m_state.GetError(e_regSetEXC, Read); 392} 393 394kern_return_t 395DNBArchImplX86_64::SetGPRState() 396{ 397 kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID()); 398 DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount()); 399 400 m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR)); 401 DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x" 402 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx" 403 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx" 404 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx" 405 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx" 406 "\n\trip = %16.16llx" 407 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx", 408 m_thread->ThreadID(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR, 409 m_state.GetError(e_regSetGPR, Write), 410 m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx, 411 m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi, 412 m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8, 413 m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11, 414 m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14, 415 m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags, 416 m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs); 417 return m_state.GetError(e_regSetGPR, Write); 418} 419 420kern_return_t 421DNBArchImplX86_64::SetFPUState() 422{ 423 if (DEBUG_FPU_REGS) 424 { 425 m_state.SetError(e_regSetFPU, Write, 0); 426 return m_state.GetError(e_regSetFPU, Write); 427 } 428 else 429 { 430 if (HasAVX() || FORCE_AVX_REGS) 431 { 432 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX)); 433 return m_state.GetError(e_regSetFPU, Write); 434 } 435 else 436 { 437 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPR)); 438 return m_state.GetError(e_regSetFPU, Write); 439 } 440 } 441} 442 443kern_return_t 444DNBArchImplX86_64::SetEXCState() 445{ 446 m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC)); 447 return m_state.GetError(e_regSetEXC, Write); 448} 449 450void 451DNBArchImplX86_64::ThreadWillResume() 452{ 453 // Do we need to step this thread? If so, let the mach thread tell us so. 454 if (m_thread->IsStepping()) 455 { 456 // This is the primary thread, let the arch do anything it needs 457 EnableHardwareSingleStep(true); 458 } 459} 460 461bool 462DNBArchImplX86_64::ThreadDidStop() 463{ 464 bool success = true; 465 466 m_state.InvalidateAllRegisterStates(); 467 468 // Are we stepping a single instruction? 469 if (GetGPRState(true) == KERN_SUCCESS) 470 { 471 // We are single stepping, was this the primary thread? 472 if (m_thread->IsStepping()) 473 { 474 // This was the primary thread, we need to clear the trace 475 // bit if so. 476 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 477 } 478 else 479 { 480 // The MachThread will automatically restore the suspend count 481 // in ThreadDidStop(), so we don't need to do anything here if 482 // we weren't the primary thread the last time 483 } 484 } 485 return success; 486} 487 488bool 489DNBArchImplX86_64::NotifyException(MachException::Data& exc) 490{ 491 switch (exc.exc_type) 492 { 493 case EXC_BAD_ACCESS: 494 break; 495 case EXC_BAD_INSTRUCTION: 496 break; 497 case EXC_ARITHMETIC: 498 break; 499 case EXC_EMULATION: 500 break; 501 case EXC_SOFTWARE: 502 break; 503 case EXC_BREAKPOINT: 504 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) 505 { 506 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 507 if (pc != INVALID_NUB_ADDRESS && pc > 0) 508 { 509 pc -= 1; 510 // Check for a breakpoint at one byte prior to the current PC value 511 // since the PC will be just past the trap. 512 513 nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc); 514 if (NUB_BREAK_ID_IS_VALID(breakID)) 515 { 516 // Backup the PC for i386 since the trap was taken and the PC 517 // is at the address following the single byte trap instruction. 518 if (m_state.context.gpr.__rip > 0) 519 { 520 m_state.context.gpr.__rip = pc; 521 // Write the new PC back out 522 SetGPRState (); 523 } 524 } 525 return true; 526 } 527 } 528 break; 529 case EXC_SYSCALL: 530 break; 531 case EXC_MACH_SYSCALL: 532 break; 533 case EXC_RPC_ALERT: 534 break; 535 } 536 return false; 537} 538 539 540// Set the single step bit in the processor status register. 541kern_return_t 542DNBArchImplX86_64::EnableHardwareSingleStep (bool enable) 543{ 544 if (GetGPRState(false) == KERN_SUCCESS) 545 { 546 const uint32_t trace_bit = 0x100u; 547 if (enable) 548 m_state.context.gpr.__rflags |= trace_bit; 549 else 550 m_state.context.gpr.__rflags &= ~trace_bit; 551 return SetGPRState(); 552 } 553 return m_state.GetError(e_regSetGPR, Read); 554} 555 556 557//---------------------------------------------------------------------- 558// Register information defintions 559//---------------------------------------------------------------------- 560 561enum 562{ 563 gpr_rax = 0, 564 gpr_rbx, 565 gpr_rcx, 566 gpr_rdx, 567 gpr_rdi, 568 gpr_rsi, 569 gpr_rbp, 570 gpr_rsp, 571 gpr_r8, 572 gpr_r9, 573 gpr_r10, 574 gpr_r11, 575 gpr_r12, 576 gpr_r13, 577 gpr_r14, 578 gpr_r15, 579 gpr_rip, 580 gpr_rflags, 581 gpr_cs, 582 gpr_fs, 583 gpr_gs, 584 k_num_gpr_regs 585}; 586 587enum { 588 fpu_fcw, 589 fpu_fsw, 590 fpu_ftw, 591 fpu_fop, 592 fpu_ip, 593 fpu_cs, 594 fpu_dp, 595 fpu_ds, 596 fpu_mxcsr, 597 fpu_mxcsrmask, 598 fpu_stmm0, 599 fpu_stmm1, 600 fpu_stmm2, 601 fpu_stmm3, 602 fpu_stmm4, 603 fpu_stmm5, 604 fpu_stmm6, 605 fpu_stmm7, 606 fpu_xmm0, 607 fpu_xmm1, 608 fpu_xmm2, 609 fpu_xmm3, 610 fpu_xmm4, 611 fpu_xmm5, 612 fpu_xmm6, 613 fpu_xmm7, 614 fpu_xmm8, 615 fpu_xmm9, 616 fpu_xmm10, 617 fpu_xmm11, 618 fpu_xmm12, 619 fpu_xmm13, 620 fpu_xmm14, 621 fpu_xmm15, 622 fpu_ymm0, 623 fpu_ymm1, 624 fpu_ymm2, 625 fpu_ymm3, 626 fpu_ymm4, 627 fpu_ymm5, 628 fpu_ymm6, 629 fpu_ymm7, 630 fpu_ymm8, 631 fpu_ymm9, 632 fpu_ymm10, 633 fpu_ymm11, 634 fpu_ymm12, 635 fpu_ymm13, 636 fpu_ymm14, 637 fpu_ymm15, 638 k_num_fpu_regs, 639 640 // Aliases 641 fpu_fctrl = fpu_fcw, 642 fpu_fstat = fpu_fsw, 643 fpu_ftag = fpu_ftw, 644 fpu_fiseg = fpu_cs, 645 fpu_fioff = fpu_ip, 646 fpu_foseg = fpu_ds, 647 fpu_fooff = fpu_dp 648}; 649 650enum { 651 exc_trapno, 652 exc_err, 653 exc_faultvaddr, 654 k_num_exc_regs, 655}; 656 657 658enum gcc_dwarf_regnums 659{ 660 gcc_dwarf_rax = 0, 661 gcc_dwarf_rdx, 662 gcc_dwarf_rcx, 663 gcc_dwarf_rbx, 664 gcc_dwarf_rsi, 665 gcc_dwarf_rdi, 666 gcc_dwarf_rbp, 667 gcc_dwarf_rsp, 668 gcc_dwarf_r8, 669 gcc_dwarf_r9, 670 gcc_dwarf_r10, 671 gcc_dwarf_r11, 672 gcc_dwarf_r12, 673 gcc_dwarf_r13, 674 gcc_dwarf_r14, 675 gcc_dwarf_r15, 676 gcc_dwarf_rip, 677 gcc_dwarf_xmm0, 678 gcc_dwarf_xmm1, 679 gcc_dwarf_xmm2, 680 gcc_dwarf_xmm3, 681 gcc_dwarf_xmm4, 682 gcc_dwarf_xmm5, 683 gcc_dwarf_xmm6, 684 gcc_dwarf_xmm7, 685 gcc_dwarf_xmm8, 686 gcc_dwarf_xmm9, 687 gcc_dwarf_xmm10, 688 gcc_dwarf_xmm11, 689 gcc_dwarf_xmm12, 690 gcc_dwarf_xmm13, 691 gcc_dwarf_xmm14, 692 gcc_dwarf_xmm15, 693 gcc_dwarf_stmm0, 694 gcc_dwarf_stmm1, 695 gcc_dwarf_stmm2, 696 gcc_dwarf_stmm3, 697 gcc_dwarf_stmm4, 698 gcc_dwarf_stmm5, 699 gcc_dwarf_stmm6, 700 gcc_dwarf_stmm7, 701 gcc_dwarf_ymm0 = gcc_dwarf_xmm0, 702 gcc_dwarf_ymm1 = gcc_dwarf_xmm1, 703 gcc_dwarf_ymm2 = gcc_dwarf_xmm2, 704 gcc_dwarf_ymm3 = gcc_dwarf_xmm3, 705 gcc_dwarf_ymm4 = gcc_dwarf_xmm4, 706 gcc_dwarf_ymm5 = gcc_dwarf_xmm5, 707 gcc_dwarf_ymm6 = gcc_dwarf_xmm6, 708 gcc_dwarf_ymm7 = gcc_dwarf_xmm7, 709 gcc_dwarf_ymm8 = gcc_dwarf_xmm8, 710 gcc_dwarf_ymm9 = gcc_dwarf_xmm9, 711 gcc_dwarf_ymm10 = gcc_dwarf_xmm10, 712 gcc_dwarf_ymm11 = gcc_dwarf_xmm11, 713 gcc_dwarf_ymm12 = gcc_dwarf_xmm12, 714 gcc_dwarf_ymm13 = gcc_dwarf_xmm13, 715 gcc_dwarf_ymm14 = gcc_dwarf_xmm14, 716 gcc_dwarf_ymm15 = gcc_dwarf_xmm15 717}; 718 719enum gdb_regnums 720{ 721 gdb_rax = 0, 722 gdb_rbx = 1, 723 gdb_rcx = 2, 724 gdb_rdx = 3, 725 gdb_rsi = 4, 726 gdb_rdi = 5, 727 gdb_rbp = 6, 728 gdb_rsp = 7, 729 gdb_r8 = 8, 730 gdb_r9 = 9, 731 gdb_r10 = 10, 732 gdb_r11 = 11, 733 gdb_r12 = 12, 734 gdb_r13 = 13, 735 gdb_r14 = 14, 736 gdb_r15 = 15, 737 gdb_rip = 16, 738 gdb_rflags = 17, 739 gdb_cs = 18, 740 gdb_ss = 19, 741 gdb_ds = 20, 742 gdb_es = 21, 743 gdb_fs = 22, 744 gdb_gs = 23, 745 gdb_stmm0 = 24, 746 gdb_stmm1 = 25, 747 gdb_stmm2 = 26, 748 gdb_stmm3 = 27, 749 gdb_stmm4 = 28, 750 gdb_stmm5 = 29, 751 gdb_stmm6 = 30, 752 gdb_stmm7 = 31, 753 gdb_fctrl = 32, gdb_fcw = gdb_fctrl, 754 gdb_fstat = 33, gdb_fsw = gdb_fstat, 755 gdb_ftag = 34, gdb_ftw = gdb_ftag, 756 gdb_fiseg = 35, gdb_fpu_cs = gdb_fiseg, 757 gdb_fioff = 36, gdb_ip = gdb_fioff, 758 gdb_foseg = 37, gdb_fpu_ds = gdb_foseg, 759 gdb_fooff = 38, gdb_dp = gdb_fooff, 760 gdb_fop = 39, 761 gdb_xmm0 = 40, 762 gdb_xmm1 = 41, 763 gdb_xmm2 = 42, 764 gdb_xmm3 = 43, 765 gdb_xmm4 = 44, 766 gdb_xmm5 = 45, 767 gdb_xmm6 = 46, 768 gdb_xmm7 = 47, 769 gdb_xmm8 = 48, 770 gdb_xmm9 = 49, 771 gdb_xmm10 = 50, 772 gdb_xmm11 = 51, 773 gdb_xmm12 = 52, 774 gdb_xmm13 = 53, 775 gdb_xmm14 = 54, 776 gdb_xmm15 = 55, 777 gdb_mxcsr = 56, 778 gdb_ymm0 = gdb_xmm0, 779 gdb_ymm1 = gdb_xmm1, 780 gdb_ymm2 = gdb_xmm2, 781 gdb_ymm3 = gdb_xmm3, 782 gdb_ymm4 = gdb_xmm4, 783 gdb_ymm5 = gdb_xmm5, 784 gdb_ymm6 = gdb_xmm6, 785 gdb_ymm7 = gdb_xmm7, 786 gdb_ymm8 = gdb_xmm8, 787 gdb_ymm9 = gdb_xmm9, 788 gdb_ymm10 = gdb_xmm10, 789 gdb_ymm11 = gdb_xmm11, 790 gdb_ymm12 = gdb_xmm12, 791 gdb_ymm13 = gdb_xmm13, 792 gdb_ymm14 = gdb_xmm14, 793 gdb_ymm15 = gdb_xmm15 794}; 795 796#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg)) 797#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx)) 798#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx)) 799#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg) + offsetof (DNBArchImplX86_64::Context, exc)) 800 801// This does not accurately identify the location of ymm0...7 in 802// Context.fpu.avx. That is because there is a bunch of padding 803// in Context.fpu.avx that we don't need. Offset macros lay out 804// the register state that Debugserver transmits to the debugger 805// -- not to interpret the thread_get_state info. 806#define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n)) 807 808#define GPR_SIZE(reg) (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg)) 809#define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg)) 810#define FPU_SIZE_MMST(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 811#define FPU_SIZE_XMM(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 812#define FPU_SIZE_YMM(reg) (32) 813#define EXC_SIZE(reg) (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg)) 814 815// These macros will auto define the register name, alt name, register size, 816// register offset, encoding, format and native register. This ensures that 817// the register state structures are defined correctly and have the correct 818// sizes and offsets. 819#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg } 820#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg } 821#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg } 822 823// General purpose registers for 64 bit 824const DNBRegisterInfo 825DNBArchImplX86_64::g_gpr_registers[] = 826{ 827 DEFINE_GPR (rax), 828 DEFINE_GPR (rbx), 829 DEFINE_GPR (rcx), 830 DEFINE_GPR (rdx), 831 DEFINE_GPR (rdi), 832 DEFINE_GPR (rsi), 833 DEFINE_GPR_ALT (rbp, "fp", GENERIC_REGNUM_FP), 834 DEFINE_GPR_ALT (rsp, "sp", GENERIC_REGNUM_SP), 835 DEFINE_GPR (r8), 836 DEFINE_GPR (r9), 837 DEFINE_GPR (r10), 838 DEFINE_GPR (r11), 839 DEFINE_GPR (r12), 840 DEFINE_GPR (r13), 841 DEFINE_GPR (r14), 842 DEFINE_GPR (r15), 843 DEFINE_GPR_ALT (rip, "pc", GENERIC_REGNUM_PC), 844 DEFINE_GPR_ALT2 (rflags, "flags"), 845 DEFINE_GPR_ALT2 (cs, NULL), 846 DEFINE_GPR_ALT2 (fs, NULL), 847 DEFINE_GPR_ALT2 (gs, NULL), 848}; 849 850// Floating point registers 64 bit 851const DNBRegisterInfo 852DNBArchImplX86_64::g_fpu_registers_no_avx[] = 853{ 854 { e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , FPU_OFFSET(fcw) , -1, -1, -1, -1 }, 855 { e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , FPU_OFFSET(fsw) , -1, -1, -1, -1 }, 856 { e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , FPU_OFFSET(ftw) , -1, -1, -1, -1 }, 857 { e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , FPU_OFFSET(fop) , -1, -1, -1, -1 }, 858 { e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , FPU_OFFSET(ip) , -1, -1, -1, -1 }, 859 { e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , FPU_OFFSET(cs) , -1, -1, -1, -1 }, 860 { e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , FPU_OFFSET(dp) , -1, -1, -1, -1 }, 861 { e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , FPU_OFFSET(ds) , -1, -1, -1, -1 }, 862 { e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , FPU_OFFSET(mxcsr) , -1, -1, -1, -1 }, 863 { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1, -1, -1, -1 }, 864 865 { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 }, 866 { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 }, 867 { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 }, 868 { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 }, 869 { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 }, 870 { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 }, 871 { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 }, 872 { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 }, 873 874 { e_regSetFPU, fpu_xmm0 , "xmm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0) , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 }, 875 { e_regSetFPU, fpu_xmm1 , "xmm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1) , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 }, 876 { e_regSetFPU, fpu_xmm2 , "xmm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2) , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 }, 877 { e_regSetFPU, fpu_xmm3 , "xmm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3) , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 }, 878 { e_regSetFPU, fpu_xmm4 , "xmm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4) , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 }, 879 { e_regSetFPU, fpu_xmm5 , "xmm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5) , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 }, 880 { e_regSetFPU, fpu_xmm6 , "xmm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6) , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 }, 881 { e_regSetFPU, fpu_xmm7 , "xmm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7) , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 }, 882 { e_regSetFPU, fpu_xmm8 , "xmm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8) , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8 }, 883 { e_regSetFPU, fpu_xmm9 , "xmm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9) , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9 }, 884 { e_regSetFPU, fpu_xmm10, "xmm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10) , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 }, 885 { e_regSetFPU, fpu_xmm11, "xmm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11) , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 }, 886 { e_regSetFPU, fpu_xmm12, "xmm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12) , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 }, 887 { e_regSetFPU, fpu_xmm13, "xmm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13) , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 }, 888 { e_regSetFPU, fpu_xmm14, "xmm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14) , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 }, 889 { e_regSetFPU, fpu_xmm15, "xmm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15) , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 }, 890}; 891 892const DNBRegisterInfo 893DNBArchImplX86_64::g_fpu_registers_avx[] = 894{ 895 { e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , AVX_OFFSET(fcw) , -1, -1, -1, -1 }, 896 { e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , AVX_OFFSET(fsw) , -1, -1, -1, -1 }, 897 { e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , AVX_OFFSET(ftw) , -1, -1, -1, -1 }, 898 { e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , AVX_OFFSET(fop) , -1, -1, -1, -1 }, 899 { e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , AVX_OFFSET(ip) , -1, -1, -1, -1 }, 900 { e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , AVX_OFFSET(cs) , -1, -1, -1, -1 }, 901 { e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , AVX_OFFSET(dp) , -1, -1, -1, -1 }, 902 { e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , AVX_OFFSET(ds) , -1, -1, -1, -1 }, 903 { e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , AVX_OFFSET(mxcsr) , -1, -1, -1, -1 }, 904 { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1, -1, -1, -1 }, 905 906 { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 }, 907 { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 }, 908 { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 }, 909 { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 }, 910 { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 }, 911 { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 }, 912 { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 }, 913 { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 }, 914 915 { e_regSetFPU, fpu_xmm0 , "xmm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0) , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 }, 916 { e_regSetFPU, fpu_xmm1 , "xmm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1) , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 }, 917 { e_regSetFPU, fpu_xmm2 , "xmm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2) , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 }, 918 { e_regSetFPU, fpu_xmm3 , "xmm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3) , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 }, 919 { e_regSetFPU, fpu_xmm4 , "xmm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4) , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 }, 920 { e_regSetFPU, fpu_xmm5 , "xmm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5) , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 }, 921 { e_regSetFPU, fpu_xmm6 , "xmm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6) , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 }, 922 { e_regSetFPU, fpu_xmm7 , "xmm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7) , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 }, 923 { e_regSetFPU, fpu_xmm8 , "xmm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8) , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8 }, 924 { e_regSetFPU, fpu_xmm9 , "xmm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9) , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9 }, 925 { e_regSetFPU, fpu_xmm10, "xmm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10) , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 }, 926 { e_regSetFPU, fpu_xmm11, "xmm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11) , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 }, 927 { e_regSetFPU, fpu_xmm12, "xmm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12) , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 }, 928 { e_regSetFPU, fpu_xmm13, "xmm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13) , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 }, 929 { e_regSetFPU, fpu_xmm14, "xmm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14) , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 }, 930 { e_regSetFPU, fpu_xmm15, "xmm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15) , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 }, 931 932 { e_regSetFPU, fpu_ymm0 , "ymm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0) , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1, gdb_ymm0 }, 933 { e_regSetFPU, fpu_ymm1 , "ymm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1) , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1, gdb_ymm1 }, 934 { e_regSetFPU, fpu_ymm2 , "ymm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2) , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1, gdb_ymm2 }, 935 { e_regSetFPU, fpu_ymm3 , "ymm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3) , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1, gdb_ymm3 }, 936 { e_regSetFPU, fpu_ymm4 , "ymm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4) , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1, gdb_ymm4 }, 937 { e_regSetFPU, fpu_ymm5 , "ymm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5) , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1, gdb_ymm5 }, 938 { e_regSetFPU, fpu_ymm6 , "ymm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6) , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1, gdb_ymm6 }, 939 { e_regSetFPU, fpu_ymm7 , "ymm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7) , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1, gdb_ymm7 }, 940 { e_regSetFPU, fpu_ymm8 , "ymm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8) , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1, gdb_ymm8 }, 941 { e_regSetFPU, fpu_ymm9 , "ymm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9) , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1, gdb_ymm9 }, 942 { e_regSetFPU, fpu_ymm10, "ymm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10) , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1, gdb_ymm10 }, 943 { e_regSetFPU, fpu_ymm11, "ymm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11) , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1, gdb_ymm11 }, 944 { e_regSetFPU, fpu_ymm12, "ymm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12) , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1, gdb_ymm12 }, 945 { e_regSetFPU, fpu_ymm13, "ymm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13) , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1, gdb_ymm13 }, 946 { e_regSetFPU, fpu_ymm14, "ymm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14) , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1, gdb_ymm14 }, 947 { e_regSetFPU, fpu_ymm15, "ymm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15) , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1, gdb_ymm15 } 948}; 949 950// Exception registers 951 952const DNBRegisterInfo 953DNBArchImplX86_64::g_exc_registers[] = 954{ 955 { e_regSetEXC, exc_trapno, "trapno" , NULL, Uint, Hex, EXC_SIZE (trapno) , EXC_OFFSET (trapno) , -1, -1, -1, -1 }, 956 { e_regSetEXC, exc_err, "err" , NULL, Uint, Hex, EXC_SIZE (err) , EXC_OFFSET (err) , -1, -1, -1, -1 }, 957 { e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr) , -1, -1, -1, -1 } 958}; 959 960// Number of registers in each register set 961const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo); 962const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo); 963const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo); 964const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo); 965const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 966const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 967 968//---------------------------------------------------------------------- 969// Register set definitions. The first definitions at register set index 970// of zero is for all registers, followed by other registers sets. The 971// register information for the all register set need not be filled in. 972//---------------------------------------------------------------------- 973const DNBRegisterSetInfo 974DNBArchImplX86_64::g_reg_sets_no_avx[] = 975{ 976 { "x86_64 Registers", NULL, k_num_all_registers_no_avx }, 977 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 978 { "Floating Point Registers", g_fpu_registers_no_avx, k_num_fpu_registers_no_avx }, 979 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 980}; 981 982const DNBRegisterSetInfo 983DNBArchImplX86_64::g_reg_sets_avx[] = 984{ 985 { "x86_64 Registers", NULL, k_num_all_registers_avx }, 986 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 987 { "Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx }, 988 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 989}; 990 991// Total number of register sets for this architecture 992const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo); 993 994 995DNBArchProtocol * 996DNBArchImplX86_64::Create (MachThread *thread) 997{ 998 return new DNBArchImplX86_64 (thread); 999} 1000 1001const uint8_t * const 1002DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size) 1003{ 1004 static const uint8_t g_breakpoint_opcode[] = { 0xCC }; 1005 if (byte_size == 1) 1006 return g_breakpoint_opcode; 1007 return NULL; 1008} 1009 1010const DNBRegisterSetInfo * 1011DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets) 1012{ 1013 *num_reg_sets = k_num_register_sets; 1014 1015 if (HasAVX() || FORCE_AVX_REGS) 1016 return g_reg_sets_avx; 1017 else 1018 return g_reg_sets_no_avx; 1019} 1020 1021void 1022DNBArchImplX86_64::Initialize() 1023{ 1024 DNBArchPluginInfo arch_plugin_info = 1025 { 1026 CPU_TYPE_X86_64, 1027 DNBArchImplX86_64::Create, 1028 DNBArchImplX86_64::GetRegisterSetInfo, 1029 DNBArchImplX86_64::SoftwareBreakpointOpcode 1030 }; 1031 1032 // Register this arch plug-in with the main protocol class 1033 DNBArchProtocol::RegisterArchPlugin (arch_plugin_info); 1034} 1035 1036bool 1037DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value) 1038{ 1039 if (set == REGISTER_SET_GENERIC) 1040 { 1041 switch (reg) 1042 { 1043 case GENERIC_REGNUM_PC: // Program Counter 1044 set = e_regSetGPR; 1045 reg = gpr_rip; 1046 break; 1047 1048 case GENERIC_REGNUM_SP: // Stack Pointer 1049 set = e_regSetGPR; 1050 reg = gpr_rsp; 1051 break; 1052 1053 case GENERIC_REGNUM_FP: // Frame Pointer 1054 set = e_regSetGPR; 1055 reg = gpr_rbp; 1056 break; 1057 1058 case GENERIC_REGNUM_FLAGS: // Processor flags register 1059 set = e_regSetGPR; 1060 reg = gpr_rflags; 1061 break; 1062 1063 case GENERIC_REGNUM_RA: // Return Address 1064 default: 1065 return false; 1066 } 1067 } 1068 1069 if (GetRegisterState(set, false) != KERN_SUCCESS) 1070 return false; 1071 1072 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1073 if (regInfo) 1074 { 1075 value->info = *regInfo; 1076 switch (set) 1077 { 1078 case e_regSetGPR: 1079 if (reg < k_num_gpr_registers) 1080 { 1081 value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg]; 1082 return true; 1083 } 1084 break; 1085 1086 case e_regSetFPU: 1087 if (HasAVX() || FORCE_AVX_REGS) 1088 { 1089 switch (reg) 1090 { 1091 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); return true; 1092 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); return true; 1093 case fpu_ftw: value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; return true; 1094 case fpu_fop: value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; return true; 1095 case fpu_ip: value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; return true; 1096 case fpu_cs: value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; return true; 1097 case fpu_dp: value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; return true; 1098 case fpu_ds: value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; return true; 1099 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; return true; 1100 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; return true; 1101 1102 case fpu_stmm0: 1103 case fpu_stmm1: 1104 case fpu_stmm2: 1105 case fpu_stmm3: 1106 case fpu_stmm4: 1107 case fpu_stmm5: 1108 case fpu_stmm6: 1109 case fpu_stmm7: 1110 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10); 1111 return true; 1112 1113 case fpu_xmm0: 1114 case fpu_xmm1: 1115 case fpu_xmm2: 1116 case fpu_xmm3: 1117 case fpu_xmm4: 1118 case fpu_xmm5: 1119 case fpu_xmm6: 1120 case fpu_xmm7: 1121 case fpu_xmm8: 1122 case fpu_xmm9: 1123 case fpu_xmm10: 1124 case fpu_xmm11: 1125 case fpu_xmm12: 1126 case fpu_xmm13: 1127 case fpu_xmm14: 1128 case fpu_xmm15: 1129 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16); 1130 return true; 1131 1132 case fpu_ymm0: 1133 case fpu_ymm1: 1134 case fpu_ymm2: 1135 case fpu_ymm3: 1136 case fpu_ymm4: 1137 case fpu_ymm5: 1138 case fpu_ymm6: 1139 case fpu_ymm7: 1140 case fpu_ymm8: 1141 case fpu_ymm9: 1142 case fpu_ymm10: 1143 case fpu_ymm11: 1144 case fpu_ymm12: 1145 case fpu_ymm13: 1146 case fpu_ymm14: 1147 case fpu_ymm15: 1148 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16); 1149 memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16); 1150 return true; 1151 } 1152 } 1153 else 1154 { 1155 switch (reg) 1156 { 1157 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); return true; 1158 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); return true; 1159 case fpu_ftw: value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; return true; 1160 case fpu_fop: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; return true; 1161 case fpu_ip: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; return true; 1162 case fpu_cs: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; return true; 1163 case fpu_dp: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; return true; 1164 case fpu_ds: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; return true; 1165 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; return true; 1166 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; return true; 1167 1168 case fpu_stmm0: 1169 case fpu_stmm1: 1170 case fpu_stmm2: 1171 case fpu_stmm3: 1172 case fpu_stmm4: 1173 case fpu_stmm5: 1174 case fpu_stmm6: 1175 case fpu_stmm7: 1176 memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10); 1177 return true; 1178 1179 case fpu_xmm0: 1180 case fpu_xmm1: 1181 case fpu_xmm2: 1182 case fpu_xmm3: 1183 case fpu_xmm4: 1184 case fpu_xmm5: 1185 case fpu_xmm6: 1186 case fpu_xmm7: 1187 case fpu_xmm8: 1188 case fpu_xmm9: 1189 case fpu_xmm10: 1190 case fpu_xmm11: 1191 case fpu_xmm12: 1192 case fpu_xmm13: 1193 case fpu_xmm14: 1194 case fpu_xmm15: 1195 memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16); 1196 return true; 1197 } 1198 } 1199 break; 1200 1201 case e_regSetEXC: 1202 switch (reg) 1203 { 1204 case exc_trapno: value->value.uint32 = m_state.context.exc.__trapno; return true; 1205 case exc_err: value->value.uint32 = m_state.context.exc.__err; return true; 1206 case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true; 1207 } 1208 break; 1209 } 1210 } 1211 return false; 1212} 1213 1214 1215bool 1216DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value) 1217{ 1218 if (set == REGISTER_SET_GENERIC) 1219 { 1220 switch (reg) 1221 { 1222 case GENERIC_REGNUM_PC: // Program Counter 1223 set = e_regSetGPR; 1224 reg = gpr_rip; 1225 break; 1226 1227 case GENERIC_REGNUM_SP: // Stack Pointer 1228 set = e_regSetGPR; 1229 reg = gpr_rsp; 1230 break; 1231 1232 case GENERIC_REGNUM_FP: // Frame Pointer 1233 set = e_regSetGPR; 1234 reg = gpr_rbp; 1235 break; 1236 1237 case GENERIC_REGNUM_FLAGS: // Processor flags register 1238 set = e_regSetGPR; 1239 reg = gpr_rflags; 1240 break; 1241 1242 case GENERIC_REGNUM_RA: // Return Address 1243 default: 1244 return false; 1245 } 1246 } 1247 1248 if (GetRegisterState(set, false) != KERN_SUCCESS) 1249 return false; 1250 1251 bool success = false; 1252 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1253 if (regInfo) 1254 { 1255 switch (set) 1256 { 1257 case e_regSetGPR: 1258 if (reg < k_num_gpr_registers) 1259 { 1260 ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64; 1261 success = true; 1262 } 1263 break; 1264 1265 case e_regSetFPU: 1266 if (HasAVX() || FORCE_AVX_REGS) 1267 { 1268 switch (reg) 1269 { 1270 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1271 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1272 case fpu_ftw: m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; success = true; break; 1273 case fpu_fop: m_state.context.fpu.avx.__fpu_fop = value->value.uint16; success = true; break; 1274 case fpu_ip: m_state.context.fpu.avx.__fpu_ip = value->value.uint32; success = true; break; 1275 case fpu_cs: m_state.context.fpu.avx.__fpu_cs = value->value.uint16; success = true; break; 1276 case fpu_dp: m_state.context.fpu.avx.__fpu_dp = value->value.uint32; success = true; break; 1277 case fpu_ds: m_state.context.fpu.avx.__fpu_ds = value->value.uint16; success = true; break; 1278 case fpu_mxcsr: m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1279 case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1280 1281 case fpu_stmm0: 1282 case fpu_stmm1: 1283 case fpu_stmm2: 1284 case fpu_stmm3: 1285 case fpu_stmm4: 1286 case fpu_stmm5: 1287 case fpu_stmm6: 1288 case fpu_stmm7: 1289 memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10); 1290 success = true; 1291 break; 1292 1293 case fpu_xmm0: 1294 case fpu_xmm1: 1295 case fpu_xmm2: 1296 case fpu_xmm3: 1297 case fpu_xmm4: 1298 case fpu_xmm5: 1299 case fpu_xmm6: 1300 case fpu_xmm7: 1301 case fpu_xmm8: 1302 case fpu_xmm9: 1303 case fpu_xmm10: 1304 case fpu_xmm11: 1305 case fpu_xmm12: 1306 case fpu_xmm13: 1307 case fpu_xmm14: 1308 case fpu_xmm15: 1309 memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16); 1310 success = true; 1311 break; 1312 1313 case fpu_ymm0: 1314 case fpu_ymm1: 1315 case fpu_ymm2: 1316 case fpu_ymm3: 1317 case fpu_ymm4: 1318 case fpu_ymm5: 1319 case fpu_ymm6: 1320 case fpu_ymm7: 1321 case fpu_ymm8: 1322 case fpu_ymm9: 1323 case fpu_ymm10: 1324 case fpu_ymm11: 1325 case fpu_ymm12: 1326 case fpu_ymm13: 1327 case fpu_ymm14: 1328 case fpu_ymm15: 1329 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16); 1330 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16); 1331 return true; 1332 } 1333 } 1334 else 1335 { 1336 switch (reg) 1337 { 1338 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16; success = true; break; 1339 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16; success = true; break; 1340 case fpu_ftw: m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; success = true; break; 1341 case fpu_fop: m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; success = true; break; 1342 case fpu_ip: m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; success = true; break; 1343 case fpu_cs: m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; success = true; break; 1344 case fpu_dp: m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; success = true; break; 1345 case fpu_ds: m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; success = true; break; 1346 case fpu_mxcsr: m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; success = true; break; 1347 case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; success = true; break; 1348 1349 case fpu_stmm0: 1350 case fpu_stmm1: 1351 case fpu_stmm2: 1352 case fpu_stmm3: 1353 case fpu_stmm4: 1354 case fpu_stmm5: 1355 case fpu_stmm6: 1356 case fpu_stmm7: 1357 memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10); 1358 success = true; 1359 break; 1360 1361 case fpu_xmm0: 1362 case fpu_xmm1: 1363 case fpu_xmm2: 1364 case fpu_xmm3: 1365 case fpu_xmm4: 1366 case fpu_xmm5: 1367 case fpu_xmm6: 1368 case fpu_xmm7: 1369 case fpu_xmm8: 1370 case fpu_xmm9: 1371 case fpu_xmm10: 1372 case fpu_xmm11: 1373 case fpu_xmm12: 1374 case fpu_xmm13: 1375 case fpu_xmm14: 1376 case fpu_xmm15: 1377 memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16); 1378 success = true; 1379 break; 1380 } 1381 } 1382 break; 1383 1384 case e_regSetEXC: 1385 switch (reg) 1386 { 1387 case exc_trapno: m_state.context.exc.__trapno = value->value.uint32; success = true; break; 1388 case exc_err: m_state.context.exc.__err = value->value.uint32; success = true; break; 1389 case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break; 1390 } 1391 break; 1392 } 1393 } 1394 1395 if (success) 1396 return SetRegisterState(set) == KERN_SUCCESS; 1397 return false; 1398} 1399 1400 1401nub_size_t 1402DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len) 1403{ 1404 nub_size_t size = sizeof (m_state.context); 1405 1406 if (buf && buf_len) 1407 { 1408 if (size > buf_len) 1409 size = buf_len; 1410 1411 bool force = false; 1412 if (GetGPRState(force) | GetFPUState(force) | GetEXCState(force)) 1413 return 0; 1414 ::memcpy (buf, &m_state.context, size); 1415 } 1416 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size); 1417 // Return the size of the register context even if NULL was passed in 1418 return size; 1419} 1420 1421nub_size_t 1422DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len) 1423{ 1424 nub_size_t size = sizeof (m_state.context); 1425 if (buf == NULL || buf_len == 0) 1426 size = 0; 1427 1428 if (size) 1429 { 1430 if (size > buf_len) 1431 size = buf_len; 1432 1433 ::memcpy (&m_state.context, buf, size); 1434 SetGPRState(); 1435 SetFPUState(); 1436 SetEXCState(); 1437 } 1438 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size); 1439 return size; 1440} 1441 1442 1443kern_return_t 1444DNBArchImplX86_64::GetRegisterState(int set, bool force) 1445{ 1446 switch (set) 1447 { 1448 case e_regSetALL: return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 1449 case e_regSetGPR: return GetGPRState(force); 1450 case e_regSetFPU: return GetFPUState(force); 1451 case e_regSetEXC: return GetEXCState(force); 1452 default: break; 1453 } 1454 return KERN_INVALID_ARGUMENT; 1455} 1456 1457kern_return_t 1458DNBArchImplX86_64::SetRegisterState(int set) 1459{ 1460 // Make sure we have a valid context to set. 1461 if (RegisterSetStateIsValid(set)) 1462 { 1463 switch (set) 1464 { 1465 case e_regSetALL: return SetGPRState() | SetFPUState() | SetEXCState(); 1466 case e_regSetGPR: return SetGPRState(); 1467 case e_regSetFPU: return SetFPUState(); 1468 case e_regSetEXC: return SetEXCState(); 1469 default: break; 1470 } 1471 } 1472 return KERN_INVALID_ARGUMENT; 1473} 1474 1475bool 1476DNBArchImplX86_64::RegisterSetStateIsValid (int set) const 1477{ 1478 return m_state.RegsAreValid(set); 1479} 1480 1481 1482 1483#endif // #if defined (__i386__) || defined (__x86_64__) 1484