RegisterContext_x86_64.cpp revision dae196da3bf9c181405c9c0c1083069dc007e318
1//===-- RegisterContext_x86_64.cpp -------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include <cstring> 11#include <errno.h> 12#include <stdint.h> 13 14#include "lldb/Core/DataBufferHeap.h" 15#include "lldb/Core/DataExtractor.h" 16#include "lldb/Core/RegisterValue.h" 17#include "lldb/Core/Scalar.h" 18#include "lldb/Target/Target.h" 19#include "lldb/Target/Thread.h" 20#include "lldb/Host/Endian.h" 21 22#include "ProcessPOSIX.h" 23#include "ProcessMonitor.h" 24#include "RegisterContext_i386.h" 25#include "RegisterContext_x86.h" 26#include "RegisterContext_x86_64.h" 27 28using namespace lldb_private; 29using namespace lldb; 30 31// Support ptrace extensions even when compiled without required kernel support 32#ifndef NT_X86_XSTATE 33 #define NT_X86_XSTATE 0x202 34#endif 35 36enum 37{ 38 gcc_dwarf_gpr_rax = 0, 39 gcc_dwarf_gpr_rdx, 40 gcc_dwarf_gpr_rcx, 41 gcc_dwarf_gpr_rbx, 42 gcc_dwarf_gpr_rsi, 43 gcc_dwarf_gpr_rdi, 44 gcc_dwarf_gpr_rbp, 45 gcc_dwarf_gpr_rsp, 46 gcc_dwarf_gpr_r8, 47 gcc_dwarf_gpr_r9, 48 gcc_dwarf_gpr_r10, 49 gcc_dwarf_gpr_r11, 50 gcc_dwarf_gpr_r12, 51 gcc_dwarf_gpr_r13, 52 gcc_dwarf_gpr_r14, 53 gcc_dwarf_gpr_r15, 54 gcc_dwarf_gpr_rip, 55 gcc_dwarf_fpu_xmm0, 56 gcc_dwarf_fpu_xmm1, 57 gcc_dwarf_fpu_xmm2, 58 gcc_dwarf_fpu_xmm3, 59 gcc_dwarf_fpu_xmm4, 60 gcc_dwarf_fpu_xmm5, 61 gcc_dwarf_fpu_xmm6, 62 gcc_dwarf_fpu_xmm7, 63 gcc_dwarf_fpu_xmm8, 64 gcc_dwarf_fpu_xmm9, 65 gcc_dwarf_fpu_xmm10, 66 gcc_dwarf_fpu_xmm11, 67 gcc_dwarf_fpu_xmm12, 68 gcc_dwarf_fpu_xmm13, 69 gcc_dwarf_fpu_xmm14, 70 gcc_dwarf_fpu_xmm15, 71 gcc_dwarf_fpu_stmm0, 72 gcc_dwarf_fpu_stmm1, 73 gcc_dwarf_fpu_stmm2, 74 gcc_dwarf_fpu_stmm3, 75 gcc_dwarf_fpu_stmm4, 76 gcc_dwarf_fpu_stmm5, 77 gcc_dwarf_fpu_stmm6, 78 gcc_dwarf_fpu_stmm7, 79 gcc_dwarf_fpu_ymm0, 80 gcc_dwarf_fpu_ymm1, 81 gcc_dwarf_fpu_ymm2, 82 gcc_dwarf_fpu_ymm3, 83 gcc_dwarf_fpu_ymm4, 84 gcc_dwarf_fpu_ymm5, 85 gcc_dwarf_fpu_ymm6, 86 gcc_dwarf_fpu_ymm7, 87 gcc_dwarf_fpu_ymm8, 88 gcc_dwarf_fpu_ymm9, 89 gcc_dwarf_fpu_ymm10, 90 gcc_dwarf_fpu_ymm11, 91 gcc_dwarf_fpu_ymm12, 92 gcc_dwarf_fpu_ymm13, 93 gcc_dwarf_fpu_ymm14, 94 gcc_dwarf_fpu_ymm15 95}; 96 97enum 98{ 99 gdb_gpr_rax = 0, 100 gdb_gpr_rbx = 1, 101 gdb_gpr_rcx = 2, 102 gdb_gpr_rdx = 3, 103 gdb_gpr_rsi = 4, 104 gdb_gpr_rdi = 5, 105 gdb_gpr_rbp = 6, 106 gdb_gpr_rsp = 7, 107 gdb_gpr_r8 = 8, 108 gdb_gpr_r9 = 9, 109 gdb_gpr_r10 = 10, 110 gdb_gpr_r11 = 11, 111 gdb_gpr_r12 = 12, 112 gdb_gpr_r13 = 13, 113 gdb_gpr_r14 = 14, 114 gdb_gpr_r15 = 15, 115 gdb_gpr_rip = 16, 116 gdb_gpr_rflags = 17, 117 gdb_gpr_cs = 18, 118 gdb_gpr_ss = 19, 119 gdb_gpr_ds = 20, 120 gdb_gpr_es = 21, 121 gdb_gpr_fs = 22, 122 gdb_gpr_gs = 23, 123 gdb_fpu_stmm0 = 24, 124 gdb_fpu_stmm1 = 25, 125 gdb_fpu_stmm2 = 26, 126 gdb_fpu_stmm3 = 27, 127 gdb_fpu_stmm4 = 28, 128 gdb_fpu_stmm5 = 29, 129 gdb_fpu_stmm6 = 30, 130 gdb_fpu_stmm7 = 31, 131 gdb_fpu_fcw = 32, 132 gdb_fpu_fsw = 33, 133 gdb_fpu_ftw = 34, 134 gdb_fpu_cs_64 = 35, 135 gdb_fpu_ip = 36, 136 gdb_fpu_ds_64 = 37, 137 gdb_fpu_dp = 38, 138 gdb_fpu_fop = 39, 139 gdb_fpu_xmm0 = 40, 140 gdb_fpu_xmm1 = 41, 141 gdb_fpu_xmm2 = 42, 142 gdb_fpu_xmm3 = 43, 143 gdb_fpu_xmm4 = 44, 144 gdb_fpu_xmm5 = 45, 145 gdb_fpu_xmm6 = 46, 146 gdb_fpu_xmm7 = 47, 147 gdb_fpu_xmm8 = 48, 148 gdb_fpu_xmm9 = 49, 149 gdb_fpu_xmm10 = 50, 150 gdb_fpu_xmm11 = 51, 151 gdb_fpu_xmm12 = 52, 152 gdb_fpu_xmm13 = 53, 153 gdb_fpu_xmm14 = 54, 154 gdb_fpu_xmm15 = 55, 155 gdb_fpu_mxcsr = 56, 156 gdb_fpu_ymm0 = 57, 157 gdb_fpu_ymm1 = 58, 158 gdb_fpu_ymm2 = 59, 159 gdb_fpu_ymm3 = 60, 160 gdb_fpu_ymm4 = 61, 161 gdb_fpu_ymm5 = 62, 162 gdb_fpu_ymm6 = 63, 163 gdb_fpu_ymm7 = 64, 164 gdb_fpu_ymm8 = 65, 165 gdb_fpu_ymm9 = 66, 166 gdb_fpu_ymm10 = 67, 167 gdb_fpu_ymm11 = 68, 168 gdb_fpu_ymm12 = 69, 169 gdb_fpu_ymm13 = 70, 170 gdb_fpu_ymm14 = 71, 171 gdb_fpu_ymm15 = 72 172}; 173 174static const 175uint32_t g_gpr_regnums[k_num_gpr_registers] = 176{ 177 gpr_rax, 178 gpr_rbx, 179 gpr_rcx, 180 gpr_rdx, 181 gpr_rdi, 182 gpr_rsi, 183 gpr_rbp, 184 gpr_rsp, 185 gpr_r8, 186 gpr_r9, 187 gpr_r10, 188 gpr_r11, 189 gpr_r12, 190 gpr_r13, 191 gpr_r14, 192 gpr_r15, 193 gpr_rip, 194 gpr_rflags, 195 gpr_cs, 196 gpr_fs, 197 gpr_gs, 198 gpr_ss, 199 gpr_ds, 200 gpr_es, 201 gpr_eax, 202 gpr_ebx, 203 gpr_ecx, 204 gpr_edx, 205 gpr_edi, 206 gpr_esi, 207 gpr_ebp, 208 gpr_esp, 209 gpr_eip, 210 gpr_eflags 211}; 212 213static const uint32_t 214g_fpu_regnums[k_num_fpr_registers] = 215{ 216 fpu_fcw, 217 fpu_fsw, 218 fpu_ftw, 219 fpu_fop, 220 fpu_ip, 221 fpu_cs, 222 fpu_dp, 223 fpu_ds, 224 fpu_mxcsr, 225 fpu_mxcsrmask, 226 fpu_stmm0, 227 fpu_stmm1, 228 fpu_stmm2, 229 fpu_stmm3, 230 fpu_stmm4, 231 fpu_stmm5, 232 fpu_stmm6, 233 fpu_stmm7, 234 fpu_xmm0, 235 fpu_xmm1, 236 fpu_xmm2, 237 fpu_xmm3, 238 fpu_xmm4, 239 fpu_xmm5, 240 fpu_xmm6, 241 fpu_xmm7, 242 fpu_xmm8, 243 fpu_xmm9, 244 fpu_xmm10, 245 fpu_xmm11, 246 fpu_xmm12, 247 fpu_xmm13, 248 fpu_xmm14, 249 fpu_xmm15 250}; 251 252static const uint32_t 253g_avx_regnums[k_num_avx_registers] = 254{ 255 fpu_ymm0, 256 fpu_ymm1, 257 fpu_ymm2, 258 fpu_ymm3, 259 fpu_ymm4, 260 fpu_ymm5, 261 fpu_ymm6, 262 fpu_ymm7, 263 fpu_ymm8, 264 fpu_ymm9, 265 fpu_ymm10, 266 fpu_ymm11, 267 fpu_ymm12, 268 fpu_ymm13, 269 fpu_ymm14, 270 fpu_ymm15 271}; 272 273// Number of register sets provided by this context. 274enum 275{ 276 k_num_extended_register_sets = 1, 277 k_num_register_sets = 3 278}; 279 280static const RegisterSet 281g_reg_sets[k_num_register_sets] = 282{ 283 { "General Purpose Registers", "gpr", k_num_gpr_registers, g_gpr_regnums }, 284 { "Floating Point Registers", "fpu", k_num_fpr_registers, g_fpu_regnums }, 285 { "Advanced Vector Extensions", "avx", k_num_avx_registers, g_avx_regnums } 286}; 287 288// Computes the offset of the given FPR in the user data area. 289#define FPR_OFFSET(regname) \ 290 (offsetof(RegisterContext_x86_64::UserArea, i387) + \ 291 offsetof(RegisterContext_x86_64::FPR, xstate) + \ 292 offsetof(RegisterContext_x86_64::FXSAVE, regname)) 293 294// Computes the offset of the given YMM register in the user data area. 295#define YMM_OFFSET(regname) \ 296 (offsetof(RegisterContext_x86_64::UserArea, i387) + \ 297 offsetof(RegisterContext_x86_64::FPR, ymm_set) + \ 298 offsetof(RegisterContext_x86_64::YMM, regname)) 299 300// Number of bytes needed to represent a i386 GPR 301#define GPR_i386_SIZE(reg) sizeof(((RegisterContext_i386::GPR*)NULL)->reg) 302 303// Number of bytes needed to represent a FPR. 304#define FPR_SIZE(reg) sizeof(((RegisterContext_x86_64::FXSAVE*)NULL)->reg) 305 306// Number of bytes needed to represent the i'th FP register. 307#define FP_SIZE sizeof(((RegisterContext_x86_64::MMSReg*)NULL)->bytes) 308 309// Number of bytes needed to represent an XMM register. 310#define XMM_SIZE sizeof(RegisterContext_x86_64::XMMReg) 311 312// Number of bytes needed to represent a YMM register. 313#define YMM_SIZE sizeof(RegisterContext_x86_64::YMMReg) 314 315// Note that the size and offset will be updated by platform-specific classes. 316#define DEFINE_GPR(reg, alt, kind1, kind2, kind3, kind4) \ 317 { #reg, alt, 0, 0, eEncodingUint, \ 318 eFormatHex, { kind1, kind2, kind3, kind4, gpr_##reg }, NULL, NULL } 319 320#define DEFINE_GPR_i386(reg_i386, reg_x86_64, alt, kind1, kind2, kind3, kind4) \ 321 { #reg_i386, alt, GPR_i386_SIZE(reg_i386), 0, eEncodingUint, \ 322 eFormatHex, { kind1, kind2, kind3, kind4, gpr_##reg_i386 }, NULL, NULL } 323 324#define DEFINE_FPR(reg, kind1, kind2, kind3, kind4) \ 325 { #reg, NULL, FPR_SIZE(reg), FPR_OFFSET(reg), eEncodingUint, \ 326 eFormatHex, { kind1, kind2, kind3, kind4, fpu_##reg }, NULL, NULL } 327 328#define DEFINE_FP(reg, i) \ 329 { #reg#i, NULL, FP_SIZE, FPR_OFFSET(reg[i]), eEncodingVector, \ 330 eFormatVectorOfUInt8, \ 331 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 332 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 333 334#define DEFINE_XMM(reg, i) \ 335 { #reg#i, NULL, XMM_SIZE, FPR_OFFSET(reg[i]), eEncodingVector, \ 336 eFormatVectorOfUInt8, \ 337 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 338 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 339 340#define DEFINE_YMM(reg, i) \ 341 { #reg#i, NULL, YMM_SIZE, YMM_OFFSET(reg[i]), eEncodingVector, \ 342 eFormatVectorOfUInt8, \ 343 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 344 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 345 346#define REG_CONTEXT_SIZE (GetGPRSize() + sizeof(RegisterContext_x86_64::FPR)) 347 348static RegisterInfo 349g_register_infos[k_num_registers] = 350{ 351 // General purpose registers. 352 DEFINE_GPR(rax, NULL, gcc_dwarf_gpr_rax, gcc_dwarf_gpr_rax, LLDB_INVALID_REGNUM, gdb_gpr_rax), 353 DEFINE_GPR(rbx, NULL, gcc_dwarf_gpr_rbx, gcc_dwarf_gpr_rbx, LLDB_INVALID_REGNUM, gdb_gpr_rbx), 354 DEFINE_GPR(rcx, NULL, gcc_dwarf_gpr_rcx, gcc_dwarf_gpr_rcx, LLDB_INVALID_REGNUM, gdb_gpr_rcx), 355 DEFINE_GPR(rdx, NULL, gcc_dwarf_gpr_rdx, gcc_dwarf_gpr_rdx, LLDB_INVALID_REGNUM, gdb_gpr_rdx), 356 DEFINE_GPR(rdi, NULL, gcc_dwarf_gpr_rdi, gcc_dwarf_gpr_rdi, LLDB_INVALID_REGNUM, gdb_gpr_rdi), 357 DEFINE_GPR(rsi, NULL, gcc_dwarf_gpr_rsi, gcc_dwarf_gpr_rsi, LLDB_INVALID_REGNUM, gdb_gpr_rsi), 358 DEFINE_GPR(rbp, "fp", gcc_dwarf_gpr_rbp, gcc_dwarf_gpr_rbp, LLDB_REGNUM_GENERIC_FP, gdb_gpr_rbp), 359 DEFINE_GPR(rsp, "sp", gcc_dwarf_gpr_rsp, gcc_dwarf_gpr_rsp, LLDB_REGNUM_GENERIC_SP, gdb_gpr_rsp), 360 DEFINE_GPR(r8, NULL, gcc_dwarf_gpr_r8, gcc_dwarf_gpr_r8, LLDB_INVALID_REGNUM, gdb_gpr_r8), 361 DEFINE_GPR(r9, NULL, gcc_dwarf_gpr_r9, gcc_dwarf_gpr_r9, LLDB_INVALID_REGNUM, gdb_gpr_r9), 362 DEFINE_GPR(r10, NULL, gcc_dwarf_gpr_r10, gcc_dwarf_gpr_r10, LLDB_INVALID_REGNUM, gdb_gpr_r10), 363 DEFINE_GPR(r11, NULL, gcc_dwarf_gpr_r11, gcc_dwarf_gpr_r11, LLDB_INVALID_REGNUM, gdb_gpr_r11), 364 DEFINE_GPR(r12, NULL, gcc_dwarf_gpr_r12, gcc_dwarf_gpr_r12, LLDB_INVALID_REGNUM, gdb_gpr_r12), 365 DEFINE_GPR(r13, NULL, gcc_dwarf_gpr_r13, gcc_dwarf_gpr_r13, LLDB_INVALID_REGNUM, gdb_gpr_r13), 366 DEFINE_GPR(r14, NULL, gcc_dwarf_gpr_r14, gcc_dwarf_gpr_r14, LLDB_INVALID_REGNUM, gdb_gpr_r14), 367 DEFINE_GPR(r15, NULL, gcc_dwarf_gpr_r15, gcc_dwarf_gpr_r15, LLDB_INVALID_REGNUM, gdb_gpr_r15), 368 DEFINE_GPR(rip, "pc", gcc_dwarf_gpr_rip, gcc_dwarf_gpr_rip, LLDB_REGNUM_GENERIC_PC, gdb_gpr_rip), 369 DEFINE_GPR(rflags, "flags", LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_REGNUM_GENERIC_FLAGS, gdb_gpr_rflags), 370 DEFINE_GPR(cs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_cs), 371 DEFINE_GPR(fs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_fs), 372 DEFINE_GPR(gs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_gs), 373 DEFINE_GPR(ss, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_ss), 374 DEFINE_GPR(ds, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_ds), 375 DEFINE_GPR(es, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_es), 376 // i386 registers 377 DEFINE_GPR_i386(eax, rax, NULL, gcc_eax, dwarf_eax, LLDB_INVALID_REGNUM, gdb_eax), 378 DEFINE_GPR_i386(ebx, rbx, NULL, gcc_ebx, dwarf_ebx, LLDB_INVALID_REGNUM, gdb_ebx), 379 DEFINE_GPR_i386(ecx, rcx, NULL, gcc_ecx, dwarf_ecx, LLDB_INVALID_REGNUM, gdb_ecx), 380 DEFINE_GPR_i386(edx, rdx, NULL, gcc_edx, dwarf_edx, LLDB_INVALID_REGNUM, gdb_edx), 381 DEFINE_GPR_i386(edi, rdi, NULL, gcc_edi, dwarf_edi, LLDB_INVALID_REGNUM, gdb_edi), 382 DEFINE_GPR_i386(esi, rsi, NULL, gcc_esi, dwarf_esi, LLDB_INVALID_REGNUM, gdb_esi), 383 DEFINE_GPR_i386(ebp, rbp, "fp", gcc_ebp, dwarf_ebp, LLDB_REGNUM_GENERIC_FP, gdb_ebp), 384 DEFINE_GPR_i386(esp, rsp, "sp", gcc_esp, dwarf_esp, LLDB_REGNUM_GENERIC_SP, gdb_esp), 385 DEFINE_GPR_i386(eip, rip, "pc", gcc_eip, dwarf_eip, LLDB_REGNUM_GENERIC_PC, gdb_eip), 386 DEFINE_GPR_i386(eflags, rflags, "flags", gcc_eflags, dwarf_eflags, LLDB_REGNUM_GENERIC_FLAGS, gdb_eflags), 387 // i387 Floating point registers. 388 DEFINE_FPR(fcw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fcw), 389 DEFINE_FPR(fsw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fsw), 390 DEFINE_FPR(ftw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ftw), 391 DEFINE_FPR(fop, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fop), 392 DEFINE_FPR(ip, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ip), 393 // FIXME: Extract segment from ip. 394 DEFINE_FPR(ip, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_cs_64), 395 DEFINE_FPR(dp, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_dp), 396 // FIXME: Extract segment from dp. 397 DEFINE_FPR(dp, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ds_64), 398 DEFINE_FPR(mxcsr, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_mxcsr), 399 DEFINE_FPR(mxcsrmask, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM), 400 401 // FP registers. 402 DEFINE_FP(stmm, 0), 403 DEFINE_FP(stmm, 1), 404 DEFINE_FP(stmm, 2), 405 DEFINE_FP(stmm, 3), 406 DEFINE_FP(stmm, 4), 407 DEFINE_FP(stmm, 5), 408 DEFINE_FP(stmm, 6), 409 DEFINE_FP(stmm, 7), 410 411 // XMM registers 412 DEFINE_XMM(xmm, 0), 413 DEFINE_XMM(xmm, 1), 414 DEFINE_XMM(xmm, 2), 415 DEFINE_XMM(xmm, 3), 416 DEFINE_XMM(xmm, 4), 417 DEFINE_XMM(xmm, 5), 418 DEFINE_XMM(xmm, 6), 419 DEFINE_XMM(xmm, 7), 420 DEFINE_XMM(xmm, 8), 421 DEFINE_XMM(xmm, 9), 422 DEFINE_XMM(xmm, 10), 423 DEFINE_XMM(xmm, 11), 424 DEFINE_XMM(xmm, 12), 425 DEFINE_XMM(xmm, 13), 426 DEFINE_XMM(xmm, 14), 427 DEFINE_XMM(xmm, 15), 428 429 // Copy of YMM registers assembled from xmm and ymmh 430 DEFINE_YMM(ymm, 0), 431 DEFINE_YMM(ymm, 1), 432 DEFINE_YMM(ymm, 2), 433 DEFINE_YMM(ymm, 3), 434 DEFINE_YMM(ymm, 4), 435 DEFINE_YMM(ymm, 5), 436 DEFINE_YMM(ymm, 6), 437 DEFINE_YMM(ymm, 7), 438 DEFINE_YMM(ymm, 8), 439 DEFINE_YMM(ymm, 9), 440 DEFINE_YMM(ymm, 10), 441 DEFINE_YMM(ymm, 11), 442 DEFINE_YMM(ymm, 12), 443 DEFINE_YMM(ymm, 13), 444 DEFINE_YMM(ymm, 14), 445 DEFINE_YMM(ymm, 15) 446}; 447 448RegisterInfo *RegisterContext_x86_64::m_register_infos = g_register_infos; 449 450static bool IsGPR(unsigned reg) 451{ 452 return reg <= k_last_gpr; // GPR's come first. 453} 454 455static bool IsAVX(unsigned reg) 456{ 457 return (k_first_avx <= reg && reg <= k_last_avx); 458} 459static bool IsFPR(unsigned reg) 460{ 461 return (k_first_fpr <= reg && reg <= k_last_fpr); 462} 463 464 465bool RegisterContext_x86_64::IsFPR(unsigned reg, FPRType fpr_type) 466{ 467 bool generic_fpr = ::IsFPR(reg); 468 if (fpr_type == eXSAVE) 469 return generic_fpr || IsAVX(reg); 470 471 return generic_fpr; 472} 473 474RegisterContext_x86_64::RegisterContext_x86_64(Thread &thread, 475 uint32_t concrete_frame_idx) 476 : RegisterContextPOSIX(thread, concrete_frame_idx) 477{ 478 // Initialize user.iovec to point to the buffer and buffer size 479 // using the conventions of Berkeley style UIO structures, as required 480 // by PTRACE extensions. 481 user.iovec.iov_base = &user.i387.xstate.xsave; 482 user.iovec.iov_len = sizeof(user.i387.xstate.xsave); 483 484 ::memset(&user.i387, 0, sizeof(RegisterContext_x86_64::FPR)); 485 486 // TODO: Use assembly to call cpuid on the inferior and query ebx or ecx 487 user.fpr_type = eXSAVE; // extended floating-point registers, if available 488 if (false == ReadFPR()) 489 user.fpr_type = eFXSAVE; // assume generic floating-point registers 490} 491 492RegisterContext_x86_64::~RegisterContext_x86_64() 493{ 494} 495 496ProcessMonitor & 497RegisterContext_x86_64::GetMonitor() 498{ 499 ProcessSP base = CalculateProcess(); 500 ProcessPOSIX *process = static_cast<ProcessPOSIX*>(base.get()); 501 return process->GetMonitor(); 502} 503 504void 505RegisterContext_x86_64::Invalidate() 506{ 507} 508 509void 510RegisterContext_x86_64::InvalidateAllRegisters() 511{ 512} 513 514unsigned 515RegisterContext_x86_64::GetRegisterOffset(unsigned reg) 516{ 517 assert(reg < k_num_registers && "Invalid register number."); 518 return GetRegisterInfo()[reg].byte_offset; 519} 520 521unsigned 522RegisterContext_x86_64::GetRegisterSize(unsigned reg) 523{ 524 assert(reg < k_num_registers && "Invalid register number."); 525 return GetRegisterInfo()[reg].byte_size; 526} 527 528size_t 529RegisterContext_x86_64::GetRegisterCount() 530{ 531 size_t num_registers = k_num_gpr_registers + k_num_fpr_registers; 532 if (user.fpr_type == eXSAVE) 533 return num_registers + k_num_avx_registers; 534 return num_registers; 535} 536 537const RegisterInfo * 538RegisterContext_x86_64::GetRegisterInfo() 539{ 540 return m_register_infos; 541} 542 543const RegisterInfo * 544RegisterContext_x86_64::GetRegisterInfoAtIndex(size_t reg) 545{ 546 if (reg < k_num_registers) 547 return &GetRegisterInfo()[reg]; 548 else 549 return NULL; 550} 551 552size_t 553RegisterContext_x86_64::GetRegisterSetCount() 554{ 555 size_t sets = 0; 556 for (size_t set = 0; set < k_num_register_sets; ++set) 557 if (IsRegisterSetAvailable(set)) 558 ++sets; 559 560 return sets; 561} 562 563const RegisterSet * 564RegisterContext_x86_64::GetRegisterSet(size_t set) 565{ 566 if (IsRegisterSetAvailable(set)) 567 return &g_reg_sets[set]; 568 else 569 return NULL; 570} 571 572unsigned 573RegisterContext_x86_64::GetRegisterIndexFromOffset(unsigned offset) 574{ 575 unsigned reg; 576 for (reg = 0; reg < k_num_registers; reg++) 577 { 578 if (m_register_infos[reg].byte_offset == offset) 579 break; 580 } 581 assert(reg < k_num_registers && "Invalid register offset."); 582 return reg; 583} 584 585const char * 586RegisterContext_x86_64::GetRegisterName(unsigned reg) 587{ 588 assert(reg < k_num_registers && "Invalid register offset."); 589 return m_register_infos[reg].name; 590} 591 592lldb::ByteOrder 593RegisterContext_x86_64::GetByteOrder() 594{ 595 // Get the target process whose privileged thread was used for the register read. 596 lldb::ByteOrder byte_order = eByteOrderInvalid; 597 Process *process = CalculateProcess().get(); 598 599 if (process) 600 byte_order = process->GetByteOrder(); 601 return byte_order; 602} 603 604// Parse ymm registers and into xmm.bytes and ymmh.bytes. 605bool CopyYMMtoXSTATE(uint32_t reg, RegisterContext_x86_64::UserArea &user, lldb::ByteOrder byte_order) 606{ 607 if (!IsAVX(reg)) 608 return false; 609 610 if (byte_order == eByteOrderLittle) { 611 ::memcpy(user.i387.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 612 user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes, 613 sizeof(RegisterContext_x86_64::XMMReg)); 614 ::memcpy(user.i387.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 615 user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 616 sizeof(RegisterContext_x86_64::YMMHReg)); 617 return true; 618 } 619 620 if (byte_order == eByteOrderBig) { 621 ::memcpy(user.i387.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 622 user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 623 sizeof(RegisterContext_x86_64::XMMReg)); 624 ::memcpy(user.i387.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 625 user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes, 626 sizeof(RegisterContext_x86_64::YMMHReg)); 627 return true; 628 } 629 return false; // unsupported or invalid byte order 630} 631 632// Concatenate xmm.bytes with ymmh.bytes 633bool CopyXSTATEtoYMM(uint32_t reg, RegisterContext_x86_64::UserArea &user, lldb::ByteOrder byte_order) 634{ 635 if (!IsAVX(reg)) 636 return false; 637 638 if (byte_order == eByteOrderLittle) { 639 ::memcpy(user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes, 640 user.i387.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 641 sizeof(RegisterContext_x86_64::XMMReg)); 642 ::memcpy(user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 643 user.i387.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 644 sizeof(RegisterContext_x86_64::YMMHReg)); 645 return true; 646 } 647 if (byte_order == eByteOrderBig) { 648 ::memcpy(user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 649 user.i387.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 650 sizeof(RegisterContext_x86_64::XMMReg)); 651 ::memcpy(user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes, 652 user.i387.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 653 sizeof(RegisterContext_x86_64::YMMHReg)); 654 return true; 655 } 656 return false; // unsupported or invalid byte order 657} 658 659bool 660RegisterContext_x86_64::IsRegisterSetAvailable(size_t set_index) 661{ 662 // Note: Extended register sets are assumed to be at the end of g_reg_sets... 663 size_t num_sets = k_num_register_sets - k_num_extended_register_sets; 664 if (user.fpr_type == eXSAVE) // ...and to start with AVX registers. 665 ++num_sets; 666 667 return (set_index < num_sets); 668} 669 670bool 671RegisterContext_x86_64::ReadRegister(const RegisterInfo *reg_info, RegisterValue &value) 672{ 673 const uint32_t reg = reg_info->kinds[eRegisterKindLLDB]; 674 675 if (IsFPR(reg, user.fpr_type)) { 676 if (!ReadFPR()) 677 return false; 678 } 679 else { 680 ProcessMonitor &monitor = GetMonitor(); 681 return monitor.ReadRegisterValue(m_thread.GetID(), GetRegisterOffset(reg), GetRegisterSize(reg), value); 682 } 683 684 if (reg_info->encoding == eEncodingVector) { 685 ByteOrder byte_order = GetByteOrder(); 686 687 if (byte_order != ByteOrder::eByteOrderInvalid) { 688 if (reg >= fpu_stmm0 && reg <= fpu_stmm7) { 689 value.SetBytes(user.i387.xstate.fxsave.stmm[reg - fpu_stmm0].bytes, reg_info->byte_size, byte_order); 690 } 691 if (reg >= fpu_xmm0 && reg <= fpu_xmm15) { 692 value.SetBytes(user.i387.xstate.fxsave.xmm[reg - fpu_xmm0].bytes, reg_info->byte_size, byte_order); 693 } 694 if (reg >= fpu_ymm0 && reg <= fpu_ymm15) { 695 // Concatenate ymm using the register halves in xmm.bytes and ymmh.bytes 696 if (user.fpr_type == eXSAVE && CopyXSTATEtoYMM(reg, user, byte_order)) 697 value.SetBytes(user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes, reg_info->byte_size, byte_order); 698 else 699 return false; 700 } 701 return value.GetType() == RegisterValue::eTypeBytes; 702 } 703 return false; 704 } 705 706 // Note that lldb uses slightly different naming conventions from sys/user.h 707 switch (reg) 708 { 709 default: 710 return false; 711 case fpu_dp: 712 value = user.i387.xstate.fxsave.dp; 713 break; 714 case fpu_fcw: 715 value = user.i387.xstate.fxsave.fcw; 716 break; 717 case fpu_fsw: 718 value = user.i387.xstate.fxsave.fsw; 719 break; 720 case fpu_ip: 721 value = user.i387.xstate.fxsave.ip; 722 break; 723 case fpu_fop: 724 value = user.i387.xstate.fxsave.fop; 725 break; 726 case fpu_ftw: 727 value = user.i387.xstate.fxsave.ftw; 728 break; 729 case fpu_mxcsr: 730 value = user.i387.xstate.fxsave.mxcsr; 731 break; 732 case fpu_mxcsrmask: 733 value = user.i387.xstate.fxsave.mxcsrmask; 734 break; 735 } 736 return true; 737} 738 739bool 740RegisterContext_x86_64::ReadAllRegisterValues(DataBufferSP &data_sp) 741{ 742 bool success = false; 743 data_sp.reset (new DataBufferHeap (REG_CONTEXT_SIZE, 0)); 744 if (data_sp && ReadGPR () && ReadFPR ()) 745 { 746 uint8_t *dst = data_sp->GetBytes(); 747 success = dst != 0; 748 749 if (success) { 750 ::memcpy (dst, &user.regs, GetGPRSize()); 751 dst += GetGPRSize(); 752 } 753 if (user.fpr_type == eFXSAVE) 754 ::memcpy (dst, &user.i387.xstate.fxsave, sizeof(user.i387.xstate.fxsave)); 755 756 if (user.fpr_type == eXSAVE) { 757 ByteOrder byte_order = GetByteOrder(); 758 759 // Assemble the YMM register content from the register halves. 760 for (uint32_t reg = fpu_ymm0; success && reg <= fpu_ymm15; ++reg) 761 success = CopyXSTATEtoYMM(reg, user, byte_order); 762 763 if (success) { 764 // Copy the extended register state including the assembled ymm registers. 765 ::memcpy (dst, &user.i387, sizeof(user.i387)); 766 } 767 } 768 } 769 return success; 770} 771 772bool 773RegisterContext_x86_64::WriteRegister(const lldb_private::RegisterInfo *reg_info, 774 const lldb_private::RegisterValue &value) 775{ 776 const uint32_t reg = reg_info->kinds[eRegisterKindLLDB]; 777 if (IsGPR(reg)) { 778 ProcessMonitor &monitor = GetMonitor(); 779 return monitor.WriteRegisterValue(m_thread.GetID(), GetRegisterOffset(reg), value); 780 } 781 782 if (IsFPR(reg, user.fpr_type)) { 783 switch (reg) 784 { 785 default: 786 if (reg_info->encoding != eEncodingVector) 787 return false; 788 789 if (reg >= fpu_stmm0 && reg <= fpu_stmm7) 790 ::memcpy (user.i387.xstate.fxsave.stmm[reg - fpu_stmm0].bytes, value.GetBytes(), value.GetByteSize()); 791 792 if (reg >= fpu_xmm0 && reg <= fpu_xmm15) 793 ::memcpy (user.i387.xstate.fxsave.xmm[reg - fpu_xmm0].bytes, value.GetBytes(), value.GetByteSize()); 794 795 if (reg >= fpu_ymm0 && reg <= fpu_ymm15) { 796 if (user.fpr_type != eXSAVE) 797 return false; // the target processor does not support AVX 798 799 // Store ymm register content, and split into the register halves in xmm.bytes and ymmh.bytes 800 ::memcpy (user.i387.ymm_set.ymm[reg - fpu_ymm0].bytes, value.GetBytes(), value.GetByteSize()); 801 if (false == CopyYMMtoXSTATE(reg, user, GetByteOrder())) 802 return false; 803 } 804 break; 805 case fpu_dp: 806 user.i387.xstate.fxsave.dp = value.GetAsUInt64(); 807 break; 808 case fpu_fcw: 809 user.i387.xstate.fxsave.fcw = value.GetAsUInt16(); 810 break; 811 case fpu_fsw: 812 user.i387.xstate.fxsave.fsw = value.GetAsUInt16(); 813 break; 814 case fpu_ip: 815 user.i387.xstate.fxsave.ip = value.GetAsUInt64(); 816 break; 817 case fpu_fop: 818 user.i387.xstate.fxsave.fop = value.GetAsUInt16(); 819 break; 820 case fpu_ftw: 821 user.i387.xstate.fxsave.ftw = value.GetAsUInt16(); 822 break; 823 case fpu_mxcsr: 824 user.i387.xstate.fxsave.mxcsr = value.GetAsUInt32(); 825 break; 826 case fpu_mxcsrmask: 827 user.i387.xstate.fxsave.mxcsrmask = value.GetAsUInt32(); 828 break; 829 } 830 if (WriteFPR()) { 831 if (IsAVX(reg)) 832 return CopyYMMtoXSTATE(reg, user, GetByteOrder()); 833 return true; 834 } 835 } 836 return false; 837} 838 839bool 840RegisterContext_x86_64::WriteAllRegisterValues(const DataBufferSP &data_sp) 841{ 842 bool success = false; 843 if (data_sp && data_sp->GetByteSize() == REG_CONTEXT_SIZE) 844 { 845 uint8_t *src = data_sp->GetBytes(); 846 if (src) { 847 ::memcpy (&user.regs, src, GetGPRSize()); 848 849 if (WriteGPR()) { 850 src += GetGPRSize(); 851 if (user.fpr_type == eFXSAVE) 852 ::memcpy (&user.i387.xstate.fxsave, src, sizeof(user.i387.xstate.fxsave)); 853 if (user.fpr_type == eXSAVE) 854 ::memcpy (&user.i387.xstate.xsave, src, sizeof(user.i387.xstate.xsave)); 855 856 success = WriteFPR(); 857 if (success) { 858 success = true; 859 860 if (user.fpr_type == eXSAVE) { 861 ByteOrder byte_order = GetByteOrder(); 862 863 // Parse the YMM register content from the register halves. 864 for (uint32_t reg = fpu_ymm0; success && reg <= fpu_ymm15; ++reg) 865 success = CopyYMMtoXSTATE(reg, user, byte_order); 866 } 867 } 868 } 869 } 870 } 871 return success; 872} 873 874bool 875RegisterContext_x86_64::UpdateAfterBreakpoint() 876{ 877 // PC points one byte past the int3 responsible for the breakpoint. 878 lldb::addr_t pc; 879 880 if ((pc = GetPC()) == LLDB_INVALID_ADDRESS) 881 return false; 882 883 SetPC(pc - 1); 884 return true; 885} 886 887uint32_t 888RegisterContext_x86_64::ConvertRegisterKindToRegisterNumber(uint32_t kind, 889 uint32_t num) 890{ 891 const Process *process = CalculateProcess().get(); 892 if (process) 893 { 894 const ArchSpec arch = process->GetTarget().GetArchitecture();; 895 switch (arch.GetCore()) 896 { 897 default: 898 assert(false && "CPU type not supported!"); 899 break; 900 901 case ArchSpec::eCore_x86_32_i386: 902 case ArchSpec::eCore_x86_32_i486: 903 case ArchSpec::eCore_x86_32_i486sx: 904 { 905 if (kind == eRegisterKindGeneric) 906 { 907 switch (num) 908 { 909 case LLDB_REGNUM_GENERIC_PC: return gpr_eip; 910 case LLDB_REGNUM_GENERIC_SP: return gpr_esp; 911 case LLDB_REGNUM_GENERIC_FP: return gpr_ebp; 912 case LLDB_REGNUM_GENERIC_FLAGS: return gpr_eflags; 913 case LLDB_REGNUM_GENERIC_RA: 914 default: 915 return LLDB_INVALID_REGNUM; 916 } 917 } 918 919 if (kind == eRegisterKindGCC || kind == eRegisterKindDWARF) 920 { 921 switch (num) 922 { 923 case dwarf_eax: return gpr_eax; 924 case dwarf_edx: return gpr_edx; 925 case dwarf_ecx: return gpr_ecx; 926 case dwarf_ebx: return gpr_ebx; 927 case dwarf_esi: return gpr_esi; 928 case dwarf_edi: return gpr_edi; 929 case dwarf_ebp: return gpr_ebp; 930 case dwarf_esp: return gpr_esp; 931 case dwarf_eip: return gpr_eip; 932 case dwarf_xmm0: return fpu_xmm0; 933 case dwarf_xmm1: return fpu_xmm1; 934 case dwarf_xmm2: return fpu_xmm2; 935 case dwarf_xmm3: return fpu_xmm3; 936 case dwarf_xmm4: return fpu_xmm4; 937 case dwarf_xmm5: return fpu_xmm5; 938 case dwarf_xmm6: return fpu_xmm6; 939 case dwarf_xmm7: return fpu_xmm7; 940 case dwarf_stmm0: return fpu_stmm0; 941 case dwarf_stmm1: return fpu_stmm1; 942 case dwarf_stmm2: return fpu_stmm2; 943 case dwarf_stmm3: return fpu_stmm3; 944 case dwarf_stmm4: return fpu_stmm4; 945 case dwarf_stmm5: return fpu_stmm5; 946 case dwarf_stmm6: return fpu_stmm6; 947 case dwarf_stmm7: return fpu_stmm7; 948 default: 949 return LLDB_INVALID_REGNUM; 950 } 951 } 952 953 if (kind == eRegisterKindGDB) 954 { 955 switch (num) 956 { 957 case gdb_eax : return gpr_eax; 958 case gdb_ebx : return gpr_ebx; 959 case gdb_ecx : return gpr_ecx; 960 case gdb_edx : return gpr_edx; 961 case gdb_esi : return gpr_esi; 962 case gdb_edi : return gpr_edi; 963 case gdb_ebp : return gpr_ebp; 964 case gdb_esp : return gpr_esp; 965 case gdb_eip : return gpr_eip; 966 case gdb_eflags : return gpr_eflags; 967 case gdb_cs : return gpr_cs; 968 case gdb_ss : return gpr_ss; 969 case gdb_ds : return gpr_ds; 970 case gdb_es : return gpr_es; 971 case gdb_fs : return gpr_fs; 972 case gdb_gs : return gpr_gs; 973 case gdb_stmm0 : return fpu_stmm0; 974 case gdb_stmm1 : return fpu_stmm1; 975 case gdb_stmm2 : return fpu_stmm2; 976 case gdb_stmm3 : return fpu_stmm3; 977 case gdb_stmm4 : return fpu_stmm4; 978 case gdb_stmm5 : return fpu_stmm5; 979 case gdb_stmm6 : return fpu_stmm6; 980 case gdb_stmm7 : return fpu_stmm7; 981 case gdb_fcw : return fpu_fcw; 982 case gdb_fsw : return fpu_fsw; 983 case gdb_ftw : return fpu_ftw; 984 case gdb_fpu_cs : return fpu_cs; 985 case gdb_ip : return fpu_ip; 986 case gdb_fpu_ds : return fpu_ds; //fpu_fos 987 case gdb_dp : return fpu_dp; //fpu_foo 988 case gdb_fop : return fpu_fop; 989 case gdb_xmm0 : return fpu_xmm0; 990 case gdb_xmm1 : return fpu_xmm1; 991 case gdb_xmm2 : return fpu_xmm2; 992 case gdb_xmm3 : return fpu_xmm3; 993 case gdb_xmm4 : return fpu_xmm4; 994 case gdb_xmm5 : return fpu_xmm5; 995 case gdb_xmm6 : return fpu_xmm6; 996 case gdb_xmm7 : return fpu_xmm7; 997 case gdb_mxcsr : return fpu_mxcsr; 998 default: 999 return LLDB_INVALID_REGNUM; 1000 } 1001 } 1002 else if (kind == eRegisterKindLLDB) 1003 { 1004 return num; 1005 } 1006 1007 break; 1008 } 1009 1010 case ArchSpec::eCore_x86_64_x86_64: 1011 { 1012 if (kind == eRegisterKindGeneric) 1013 { 1014 switch (num) 1015 { 1016 case LLDB_REGNUM_GENERIC_PC: return gpr_rip; 1017 case LLDB_REGNUM_GENERIC_SP: return gpr_rsp; 1018 case LLDB_REGNUM_GENERIC_FP: return gpr_rbp; 1019 case LLDB_REGNUM_GENERIC_FLAGS: return gpr_rflags; 1020 case LLDB_REGNUM_GENERIC_RA: 1021 default: 1022 return LLDB_INVALID_REGNUM; 1023 } 1024 } 1025 1026 if (kind == eRegisterKindGCC || kind == eRegisterKindDWARF) 1027 { 1028 switch (num) 1029 { 1030 case gcc_dwarf_gpr_rax: return gpr_rax; 1031 case gcc_dwarf_gpr_rdx: return gpr_rdx; 1032 case gcc_dwarf_gpr_rcx: return gpr_rcx; 1033 case gcc_dwarf_gpr_rbx: return gpr_rbx; 1034 case gcc_dwarf_gpr_rsi: return gpr_rsi; 1035 case gcc_dwarf_gpr_rdi: return gpr_rdi; 1036 case gcc_dwarf_gpr_rbp: return gpr_rbp; 1037 case gcc_dwarf_gpr_rsp: return gpr_rsp; 1038 case gcc_dwarf_gpr_r8: return gpr_r8; 1039 case gcc_dwarf_gpr_r9: return gpr_r9; 1040 case gcc_dwarf_gpr_r10: return gpr_r10; 1041 case gcc_dwarf_gpr_r11: return gpr_r11; 1042 case gcc_dwarf_gpr_r12: return gpr_r12; 1043 case gcc_dwarf_gpr_r13: return gpr_r13; 1044 case gcc_dwarf_gpr_r14: return gpr_r14; 1045 case gcc_dwarf_gpr_r15: return gpr_r15; 1046 case gcc_dwarf_gpr_rip: return gpr_rip; 1047 case gcc_dwarf_fpu_xmm0: return fpu_xmm0; 1048 case gcc_dwarf_fpu_xmm1: return fpu_xmm1; 1049 case gcc_dwarf_fpu_xmm2: return fpu_xmm2; 1050 case gcc_dwarf_fpu_xmm3: return fpu_xmm3; 1051 case gcc_dwarf_fpu_xmm4: return fpu_xmm4; 1052 case gcc_dwarf_fpu_xmm5: return fpu_xmm5; 1053 case gcc_dwarf_fpu_xmm6: return fpu_xmm6; 1054 case gcc_dwarf_fpu_xmm7: return fpu_xmm7; 1055 case gcc_dwarf_fpu_xmm8: return fpu_xmm8; 1056 case gcc_dwarf_fpu_xmm9: return fpu_xmm9; 1057 case gcc_dwarf_fpu_xmm10: return fpu_xmm10; 1058 case gcc_dwarf_fpu_xmm11: return fpu_xmm11; 1059 case gcc_dwarf_fpu_xmm12: return fpu_xmm12; 1060 case gcc_dwarf_fpu_xmm13: return fpu_xmm13; 1061 case gcc_dwarf_fpu_xmm14: return fpu_xmm14; 1062 case gcc_dwarf_fpu_xmm15: return fpu_xmm15; 1063 case gcc_dwarf_fpu_stmm0: return fpu_stmm0; 1064 case gcc_dwarf_fpu_stmm1: return fpu_stmm1; 1065 case gcc_dwarf_fpu_stmm2: return fpu_stmm2; 1066 case gcc_dwarf_fpu_stmm3: return fpu_stmm3; 1067 case gcc_dwarf_fpu_stmm4: return fpu_stmm4; 1068 case gcc_dwarf_fpu_stmm5: return fpu_stmm5; 1069 case gcc_dwarf_fpu_stmm6: return fpu_stmm6; 1070 case gcc_dwarf_fpu_stmm7: return fpu_stmm7; 1071 case gcc_dwarf_fpu_ymm0: return fpu_ymm0; 1072 case gcc_dwarf_fpu_ymm1: return fpu_ymm1; 1073 case gcc_dwarf_fpu_ymm2: return fpu_ymm2; 1074 case gcc_dwarf_fpu_ymm3: return fpu_ymm3; 1075 case gcc_dwarf_fpu_ymm4: return fpu_ymm4; 1076 case gcc_dwarf_fpu_ymm5: return fpu_ymm5; 1077 case gcc_dwarf_fpu_ymm6: return fpu_ymm6; 1078 case gcc_dwarf_fpu_ymm7: return fpu_ymm7; 1079 case gcc_dwarf_fpu_ymm8: return fpu_ymm8; 1080 case gcc_dwarf_fpu_ymm9: return fpu_ymm9; 1081 case gcc_dwarf_fpu_ymm10: return fpu_ymm10; 1082 case gcc_dwarf_fpu_ymm11: return fpu_ymm11; 1083 case gcc_dwarf_fpu_ymm12: return fpu_ymm12; 1084 case gcc_dwarf_fpu_ymm13: return fpu_ymm13; 1085 case gcc_dwarf_fpu_ymm14: return fpu_ymm14; 1086 case gcc_dwarf_fpu_ymm15: return fpu_ymm15; 1087 default: 1088 return LLDB_INVALID_REGNUM; 1089 } 1090 } 1091 1092 if (kind == eRegisterKindGDB) 1093 { 1094 switch (num) 1095 { 1096 case gdb_gpr_rax : return gpr_rax; 1097 case gdb_gpr_rbx : return gpr_rbx; 1098 case gdb_gpr_rcx : return gpr_rcx; 1099 case gdb_gpr_rdx : return gpr_rdx; 1100 case gdb_gpr_rsi : return gpr_rsi; 1101 case gdb_gpr_rdi : return gpr_rdi; 1102 case gdb_gpr_rbp : return gpr_rbp; 1103 case gdb_gpr_rsp : return gpr_rsp; 1104 case gdb_gpr_r8 : return gpr_r8; 1105 case gdb_gpr_r9 : return gpr_r9; 1106 case gdb_gpr_r10 : return gpr_r10; 1107 case gdb_gpr_r11 : return gpr_r11; 1108 case gdb_gpr_r12 : return gpr_r12; 1109 case gdb_gpr_r13 : return gpr_r13; 1110 case gdb_gpr_r14 : return gpr_r14; 1111 case gdb_gpr_r15 : return gpr_r15; 1112 case gdb_gpr_rip : return gpr_rip; 1113 case gdb_gpr_rflags : return gpr_rflags; 1114 case gdb_gpr_cs : return gpr_cs; 1115 case gdb_gpr_ss : return gpr_ss; 1116 case gdb_gpr_ds : return gpr_ds; 1117 case gdb_gpr_es : return gpr_es; 1118 case gdb_gpr_fs : return gpr_fs; 1119 case gdb_gpr_gs : return gpr_gs; 1120 case gdb_fpu_stmm0 : return fpu_stmm0; 1121 case gdb_fpu_stmm1 : return fpu_stmm1; 1122 case gdb_fpu_stmm2 : return fpu_stmm2; 1123 case gdb_fpu_stmm3 : return fpu_stmm3; 1124 case gdb_fpu_stmm4 : return fpu_stmm4; 1125 case gdb_fpu_stmm5 : return fpu_stmm5; 1126 case gdb_fpu_stmm6 : return fpu_stmm6; 1127 case gdb_fpu_stmm7 : return fpu_stmm7; 1128 case gdb_fpu_fcw : return fpu_fcw; 1129 case gdb_fpu_fsw : return fpu_fsw; 1130 case gdb_fpu_ftw : return fpu_ftw; 1131 case gdb_fpu_cs_64 : return fpu_cs; 1132 case gdb_fpu_ip : return fpu_ip; 1133 case gdb_fpu_ds_64 : return fpu_ds; 1134 case gdb_fpu_dp : return fpu_dp; 1135 case gdb_fpu_fop : return fpu_fop; 1136 case gdb_fpu_xmm0 : return fpu_xmm0; 1137 case gdb_fpu_xmm1 : return fpu_xmm1; 1138 case gdb_fpu_xmm2 : return fpu_xmm2; 1139 case gdb_fpu_xmm3 : return fpu_xmm3; 1140 case gdb_fpu_xmm4 : return fpu_xmm4; 1141 case gdb_fpu_xmm5 : return fpu_xmm5; 1142 case gdb_fpu_xmm6 : return fpu_xmm6; 1143 case gdb_fpu_xmm7 : return fpu_xmm7; 1144 case gdb_fpu_xmm8 : return fpu_xmm8; 1145 case gdb_fpu_xmm9 : return fpu_xmm9; 1146 case gdb_fpu_xmm10 : return fpu_xmm10; 1147 case gdb_fpu_xmm11 : return fpu_xmm11; 1148 case gdb_fpu_xmm12 : return fpu_xmm12; 1149 case gdb_fpu_xmm13 : return fpu_xmm13; 1150 case gdb_fpu_xmm14 : return fpu_xmm14; 1151 case gdb_fpu_xmm15 : return fpu_xmm15; 1152 case gdb_fpu_mxcsr : return fpu_mxcsr; 1153 case gdb_fpu_ymm0 : return fpu_ymm0; 1154 case gdb_fpu_ymm1 : return fpu_ymm1; 1155 case gdb_fpu_ymm2 : return fpu_ymm2; 1156 case gdb_fpu_ymm3 : return fpu_ymm3; 1157 case gdb_fpu_ymm4 : return fpu_ymm4; 1158 case gdb_fpu_ymm5 : return fpu_ymm5; 1159 case gdb_fpu_ymm6 : return fpu_ymm6; 1160 case gdb_fpu_ymm7 : return fpu_ymm7; 1161 case gdb_fpu_ymm8 : return fpu_ymm8; 1162 case gdb_fpu_ymm9 : return fpu_ymm9; 1163 case gdb_fpu_ymm10 : return fpu_ymm10; 1164 case gdb_fpu_ymm11 : return fpu_ymm11; 1165 case gdb_fpu_ymm12 : return fpu_ymm12; 1166 case gdb_fpu_ymm13 : return fpu_ymm13; 1167 case gdb_fpu_ymm14 : return fpu_ymm14; 1168 case gdb_fpu_ymm15 : return fpu_ymm15; 1169 default: 1170 return LLDB_INVALID_REGNUM; 1171 } 1172 } 1173 else if (kind == eRegisterKindLLDB) 1174 { 1175 return num; 1176 } 1177 } 1178 } 1179 } 1180 1181 return LLDB_INVALID_REGNUM; 1182} 1183 1184bool 1185RegisterContext_x86_64::HardwareSingleStep(bool enable) 1186{ 1187 enum { TRACE_BIT = 0x100 }; 1188 uint64_t rflags; 1189 1190 if ((rflags = ReadRegisterAsUnsigned(gpr_rflags, -1UL)) == -1UL) 1191 return false; 1192 1193 if (enable) 1194 { 1195 if (rflags & TRACE_BIT) 1196 return true; 1197 1198 rflags |= TRACE_BIT; 1199 } 1200 else 1201 { 1202 if (!(rflags & TRACE_BIT)) 1203 return false; 1204 1205 rflags &= ~TRACE_BIT; 1206 } 1207 1208 return WriteRegisterFromUnsigned(gpr_rflags, rflags); 1209} 1210 1211bool 1212RegisterContext_x86_64::ReadGPR() 1213{ 1214 ProcessMonitor &monitor = GetMonitor(); 1215 return monitor.ReadGPR(m_thread.GetID(), &user.regs, GetGPRSize()); 1216} 1217 1218bool 1219RegisterContext_x86_64::ReadFPR() 1220{ 1221 ProcessMonitor &monitor = GetMonitor(); 1222 if (user.fpr_type == eFXSAVE) 1223 return monitor.ReadFPR(m_thread.GetID(), &user.i387.xstate.fxsave, sizeof(user.i387.xstate.fxsave)); 1224 1225 if (user.fpr_type == eXSAVE) 1226 return monitor.ReadRegisterSet(m_thread.GetID(), &user.iovec, sizeof(user.i387.xstate.xsave), NT_X86_XSTATE); 1227 return false; 1228} 1229 1230bool 1231RegisterContext_x86_64::WriteGPR() 1232{ 1233 ProcessMonitor &monitor = GetMonitor(); 1234 return monitor.WriteGPR(m_thread.GetID(), &user.regs, GetGPRSize()); 1235} 1236 1237bool 1238RegisterContext_x86_64::WriteFPR() 1239{ 1240 ProcessMonitor &monitor = GetMonitor(); 1241 if (user.fpr_type == eFXSAVE) 1242 return monitor.WriteFPR(m_thread.GetID(), &user.i387.xstate.fxsave, sizeof(user.i387.xstate.fxsave)); 1243 1244 if (user.fpr_type == eXSAVE) 1245 return monitor.WriteRegisterSet(m_thread.GetID(), &user.iovec, sizeof(user.i387.xstate.xsave), NT_X86_XSTATE); 1246 return false; 1247} 1248 1249