RegisterContext_x86_64.cpp revision 2e4c63baa530e1a1952e5e44f8f3c26580f27544
1//===-- RegisterContext_x86_64.cpp -------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include <cstring> 11#include <errno.h> 12#include <stdint.h> 13 14#include "lldb/Core/DataBufferHeap.h" 15#include "lldb/Core/DataExtractor.h" 16#include "lldb/Core/RegisterValue.h" 17#include "lldb/Core/Scalar.h" 18#include "lldb/Target/Target.h" 19#include "lldb/Target/Thread.h" 20#include "lldb/Host/Endian.h" 21 22#include "ProcessPOSIX.h" 23#include "ProcessMonitor.h" 24#include "RegisterContext_i386.h" 25#include "RegisterContext_x86.h" 26#include "RegisterContext_x86_64.h" 27 28using namespace lldb_private; 29using namespace lldb; 30 31// Support ptrace extensions even when compiled without required kernel support 32#ifndef NT_X86_XSTATE 33 #define NT_X86_XSTATE 0x202 34#endif 35 36enum 37{ 38 gcc_dwarf_gpr_rax = 0, 39 gcc_dwarf_gpr_rdx, 40 gcc_dwarf_gpr_rcx, 41 gcc_dwarf_gpr_rbx, 42 gcc_dwarf_gpr_rsi, 43 gcc_dwarf_gpr_rdi, 44 gcc_dwarf_gpr_rbp, 45 gcc_dwarf_gpr_rsp, 46 gcc_dwarf_gpr_r8, 47 gcc_dwarf_gpr_r9, 48 gcc_dwarf_gpr_r10, 49 gcc_dwarf_gpr_r11, 50 gcc_dwarf_gpr_r12, 51 gcc_dwarf_gpr_r13, 52 gcc_dwarf_gpr_r14, 53 gcc_dwarf_gpr_r15, 54 gcc_dwarf_gpr_rip, 55 gcc_dwarf_fpu_xmm0, 56 gcc_dwarf_fpu_xmm1, 57 gcc_dwarf_fpu_xmm2, 58 gcc_dwarf_fpu_xmm3, 59 gcc_dwarf_fpu_xmm4, 60 gcc_dwarf_fpu_xmm5, 61 gcc_dwarf_fpu_xmm6, 62 gcc_dwarf_fpu_xmm7, 63 gcc_dwarf_fpu_xmm8, 64 gcc_dwarf_fpu_xmm9, 65 gcc_dwarf_fpu_xmm10, 66 gcc_dwarf_fpu_xmm11, 67 gcc_dwarf_fpu_xmm12, 68 gcc_dwarf_fpu_xmm13, 69 gcc_dwarf_fpu_xmm14, 70 gcc_dwarf_fpu_xmm15, 71 gcc_dwarf_fpu_stmm0, 72 gcc_dwarf_fpu_stmm1, 73 gcc_dwarf_fpu_stmm2, 74 gcc_dwarf_fpu_stmm3, 75 gcc_dwarf_fpu_stmm4, 76 gcc_dwarf_fpu_stmm5, 77 gcc_dwarf_fpu_stmm6, 78 gcc_dwarf_fpu_stmm7, 79 gcc_dwarf_fpu_ymm0, 80 gcc_dwarf_fpu_ymm1, 81 gcc_dwarf_fpu_ymm2, 82 gcc_dwarf_fpu_ymm3, 83 gcc_dwarf_fpu_ymm4, 84 gcc_dwarf_fpu_ymm5, 85 gcc_dwarf_fpu_ymm6, 86 gcc_dwarf_fpu_ymm7, 87 gcc_dwarf_fpu_ymm8, 88 gcc_dwarf_fpu_ymm9, 89 gcc_dwarf_fpu_ymm10, 90 gcc_dwarf_fpu_ymm11, 91 gcc_dwarf_fpu_ymm12, 92 gcc_dwarf_fpu_ymm13, 93 gcc_dwarf_fpu_ymm14, 94 gcc_dwarf_fpu_ymm15 95}; 96 97enum 98{ 99 gdb_gpr_rax = 0, 100 gdb_gpr_rbx = 1, 101 gdb_gpr_rcx = 2, 102 gdb_gpr_rdx = 3, 103 gdb_gpr_rsi = 4, 104 gdb_gpr_rdi = 5, 105 gdb_gpr_rbp = 6, 106 gdb_gpr_rsp = 7, 107 gdb_gpr_r8 = 8, 108 gdb_gpr_r9 = 9, 109 gdb_gpr_r10 = 10, 110 gdb_gpr_r11 = 11, 111 gdb_gpr_r12 = 12, 112 gdb_gpr_r13 = 13, 113 gdb_gpr_r14 = 14, 114 gdb_gpr_r15 = 15, 115 gdb_gpr_rip = 16, 116 gdb_gpr_rflags = 17, 117 gdb_gpr_cs = 18, 118 gdb_gpr_ss = 19, 119 gdb_gpr_ds = 20, 120 gdb_gpr_es = 21, 121 gdb_gpr_fs = 22, 122 gdb_gpr_gs = 23, 123 gdb_fpu_stmm0 = 24, 124 gdb_fpu_stmm1 = 25, 125 gdb_fpu_stmm2 = 26, 126 gdb_fpu_stmm3 = 27, 127 gdb_fpu_stmm4 = 28, 128 gdb_fpu_stmm5 = 29, 129 gdb_fpu_stmm6 = 30, 130 gdb_fpu_stmm7 = 31, 131 gdb_fpu_fcw = 32, 132 gdb_fpu_fsw = 33, 133 gdb_fpu_ftw = 34, 134 gdb_fpu_cs_64 = 35, 135 gdb_fpu_ip = 36, 136 gdb_fpu_ds_64 = 37, 137 gdb_fpu_dp = 38, 138 gdb_fpu_fop = 39, 139 gdb_fpu_xmm0 = 40, 140 gdb_fpu_xmm1 = 41, 141 gdb_fpu_xmm2 = 42, 142 gdb_fpu_xmm3 = 43, 143 gdb_fpu_xmm4 = 44, 144 gdb_fpu_xmm5 = 45, 145 gdb_fpu_xmm6 = 46, 146 gdb_fpu_xmm7 = 47, 147 gdb_fpu_xmm8 = 48, 148 gdb_fpu_xmm9 = 49, 149 gdb_fpu_xmm10 = 50, 150 gdb_fpu_xmm11 = 51, 151 gdb_fpu_xmm12 = 52, 152 gdb_fpu_xmm13 = 53, 153 gdb_fpu_xmm14 = 54, 154 gdb_fpu_xmm15 = 55, 155 gdb_fpu_mxcsr = 56, 156 gdb_fpu_ymm0 = 57, 157 gdb_fpu_ymm1 = 58, 158 gdb_fpu_ymm2 = 59, 159 gdb_fpu_ymm3 = 60, 160 gdb_fpu_ymm4 = 61, 161 gdb_fpu_ymm5 = 62, 162 gdb_fpu_ymm6 = 63, 163 gdb_fpu_ymm7 = 64, 164 gdb_fpu_ymm8 = 65, 165 gdb_fpu_ymm9 = 66, 166 gdb_fpu_ymm10 = 67, 167 gdb_fpu_ymm11 = 68, 168 gdb_fpu_ymm12 = 69, 169 gdb_fpu_ymm13 = 70, 170 gdb_fpu_ymm14 = 71, 171 gdb_fpu_ymm15 = 72 172}; 173 174static const 175uint32_t g_gpr_regnums[k_num_gpr_registers] = 176{ 177 gpr_rax, 178 gpr_rbx, 179 gpr_rcx, 180 gpr_rdx, 181 gpr_rdi, 182 gpr_rsi, 183 gpr_rbp, 184 gpr_rsp, 185 gpr_r8, 186 gpr_r9, 187 gpr_r10, 188 gpr_r11, 189 gpr_r12, 190 gpr_r13, 191 gpr_r14, 192 gpr_r15, 193 gpr_rip, 194 gpr_rflags, 195 gpr_cs, 196 gpr_fs, 197 gpr_gs, 198 gpr_ss, 199 gpr_ds, 200 gpr_es, 201 gpr_eax, 202 gpr_ebx, 203 gpr_ecx, 204 gpr_edx, 205 gpr_edi, 206 gpr_esi, 207 gpr_ebp, 208 gpr_esp, 209 gpr_eip, 210 gpr_eflags 211}; 212 213static const uint32_t 214g_fpu_regnums[k_num_fpr_registers] = 215{ 216 fpu_fcw, 217 fpu_fsw, 218 fpu_ftw, 219 fpu_fop, 220 fpu_ip, 221 fpu_cs, 222 fpu_dp, 223 fpu_ds, 224 fpu_mxcsr, 225 fpu_mxcsrmask, 226 fpu_stmm0, 227 fpu_stmm1, 228 fpu_stmm2, 229 fpu_stmm3, 230 fpu_stmm4, 231 fpu_stmm5, 232 fpu_stmm6, 233 fpu_stmm7, 234 fpu_xmm0, 235 fpu_xmm1, 236 fpu_xmm2, 237 fpu_xmm3, 238 fpu_xmm4, 239 fpu_xmm5, 240 fpu_xmm6, 241 fpu_xmm7, 242 fpu_xmm8, 243 fpu_xmm9, 244 fpu_xmm10, 245 fpu_xmm11, 246 fpu_xmm12, 247 fpu_xmm13, 248 fpu_xmm14, 249 fpu_xmm15 250}; 251 252static const uint32_t 253g_avx_regnums[k_num_avx_registers] = 254{ 255 fpu_ymm0, 256 fpu_ymm1, 257 fpu_ymm2, 258 fpu_ymm3, 259 fpu_ymm4, 260 fpu_ymm5, 261 fpu_ymm6, 262 fpu_ymm7, 263 fpu_ymm8, 264 fpu_ymm9, 265 fpu_ymm10, 266 fpu_ymm11, 267 fpu_ymm12, 268 fpu_ymm13, 269 fpu_ymm14, 270 fpu_ymm15 271}; 272 273// Number of register sets provided by this context. 274enum 275{ 276 k_num_extended_register_sets = 1, 277 k_num_register_sets = 3 278}; 279 280static const RegisterSet 281g_reg_sets[k_num_register_sets] = 282{ 283 { "General Purpose Registers", "gpr", k_num_gpr_registers, g_gpr_regnums }, 284 { "Floating Point Registers", "fpu", k_num_fpr_registers, g_fpu_regnums }, 285 { "Advanced Vector Extensions", "avx", k_num_avx_registers, g_avx_regnums } 286}; 287 288// Computes the offset of the given FPR in the extended data area. 289#define FPR_OFFSET(regname) \ 290 (offsetof(RegisterContext_x86_64::FPR, xstate) + \ 291 offsetof(RegisterContext_x86_64::FXSAVE, regname)) 292 293// Computes the offset of the YMM register assembled from register halves. 294#define YMM_OFFSET(regname) \ 295 (offsetof(RegisterContext_x86_64::YMM, regname)) 296 297// Number of bytes needed to represent a i386 GPR 298#define GPR_i386_SIZE(reg) sizeof(((RegisterContext_i386::GPR*)NULL)->reg) 299 300// Number of bytes needed to represent a FPR. 301#define FPR_SIZE(reg) sizeof(((RegisterContext_x86_64::FXSAVE*)NULL)->reg) 302 303// Number of bytes needed to represent the i'th FP register. 304#define FP_SIZE sizeof(((RegisterContext_x86_64::MMSReg*)NULL)->bytes) 305 306// Number of bytes needed to represent an XMM register. 307#define XMM_SIZE sizeof(RegisterContext_x86_64::XMMReg) 308 309// Number of bytes needed to represent a YMM register. 310#define YMM_SIZE sizeof(RegisterContext_x86_64::YMMReg) 311 312// Note that the size and offset will be updated by platform-specific classes. 313#define DEFINE_GPR(reg, alt, kind1, kind2, kind3, kind4) \ 314 { #reg, alt, 0, 0, eEncodingUint, \ 315 eFormatHex, { kind1, kind2, kind3, kind4, gpr_##reg }, NULL, NULL } 316 317// Dummy data for RegisterInfo::value_regs as expected by DumpRegisterSet. 318static uint32_t value_regs = LLDB_INVALID_REGNUM; 319 320#define DEFINE_GPR_i386(reg_i386, reg_x86_64, alt, kind1, kind2, kind3, kind4) \ 321 { #reg_i386, alt, GPR_i386_SIZE(reg_i386), 0, eEncodingUint, \ 322 eFormatHex, { kind1, kind2, kind3, kind4, gpr_##reg_i386 }, &value_regs, NULL } 323 324#define DEFINE_FPR(reg, kind1, kind2, kind3, kind4) \ 325 { #reg, NULL, FPR_SIZE(reg), FPR_OFFSET(reg), eEncodingUint, \ 326 eFormatHex, { kind1, kind2, kind3, kind4, fpu_##reg }, NULL, NULL } 327 328#define DEFINE_FP(reg, i) \ 329 { #reg#i, NULL, FP_SIZE, FPR_OFFSET(reg[i]), eEncodingVector, \ 330 eFormatVectorOfUInt8, \ 331 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 332 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 333 334#define DEFINE_XMM(reg, i) \ 335 { #reg#i, NULL, XMM_SIZE, FPR_OFFSET(reg[i]), eEncodingVector, \ 336 eFormatVectorOfUInt8, \ 337 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 338 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 339 340#define DEFINE_YMM(reg, i) \ 341 { #reg#i, NULL, YMM_SIZE, YMM_OFFSET(reg[i]), eEncodingVector, \ 342 eFormatVectorOfUInt8, \ 343 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 344 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 345 346#define DEFINE_DR(reg, i) \ 347 { #reg#i, NULL, 0, 0, eEncodingUint, eFormatHex, \ 348 { LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, \ 349 LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM }, NULL, NULL } 350 351#define REG_CONTEXT_SIZE (GetGPRSize() + sizeof(RegisterContext_x86_64::FPR)) 352 353static RegisterInfo 354g_register_infos[k_num_registers] = 355{ 356 // General purpose registers. 357 DEFINE_GPR(rax, NULL, gcc_dwarf_gpr_rax, gcc_dwarf_gpr_rax, LLDB_INVALID_REGNUM, gdb_gpr_rax), 358 DEFINE_GPR(rbx, NULL, gcc_dwarf_gpr_rbx, gcc_dwarf_gpr_rbx, LLDB_INVALID_REGNUM, gdb_gpr_rbx), 359 DEFINE_GPR(rcx, NULL, gcc_dwarf_gpr_rcx, gcc_dwarf_gpr_rcx, LLDB_INVALID_REGNUM, gdb_gpr_rcx), 360 DEFINE_GPR(rdx, NULL, gcc_dwarf_gpr_rdx, gcc_dwarf_gpr_rdx, LLDB_INVALID_REGNUM, gdb_gpr_rdx), 361 DEFINE_GPR(rdi, NULL, gcc_dwarf_gpr_rdi, gcc_dwarf_gpr_rdi, LLDB_INVALID_REGNUM, gdb_gpr_rdi), 362 DEFINE_GPR(rsi, NULL, gcc_dwarf_gpr_rsi, gcc_dwarf_gpr_rsi, LLDB_INVALID_REGNUM, gdb_gpr_rsi), 363 DEFINE_GPR(rbp, "fp", gcc_dwarf_gpr_rbp, gcc_dwarf_gpr_rbp, LLDB_REGNUM_GENERIC_FP, gdb_gpr_rbp), 364 DEFINE_GPR(rsp, "sp", gcc_dwarf_gpr_rsp, gcc_dwarf_gpr_rsp, LLDB_REGNUM_GENERIC_SP, gdb_gpr_rsp), 365 DEFINE_GPR(r8, NULL, gcc_dwarf_gpr_r8, gcc_dwarf_gpr_r8, LLDB_INVALID_REGNUM, gdb_gpr_r8), 366 DEFINE_GPR(r9, NULL, gcc_dwarf_gpr_r9, gcc_dwarf_gpr_r9, LLDB_INVALID_REGNUM, gdb_gpr_r9), 367 DEFINE_GPR(r10, NULL, gcc_dwarf_gpr_r10, gcc_dwarf_gpr_r10, LLDB_INVALID_REGNUM, gdb_gpr_r10), 368 DEFINE_GPR(r11, NULL, gcc_dwarf_gpr_r11, gcc_dwarf_gpr_r11, LLDB_INVALID_REGNUM, gdb_gpr_r11), 369 DEFINE_GPR(r12, NULL, gcc_dwarf_gpr_r12, gcc_dwarf_gpr_r12, LLDB_INVALID_REGNUM, gdb_gpr_r12), 370 DEFINE_GPR(r13, NULL, gcc_dwarf_gpr_r13, gcc_dwarf_gpr_r13, LLDB_INVALID_REGNUM, gdb_gpr_r13), 371 DEFINE_GPR(r14, NULL, gcc_dwarf_gpr_r14, gcc_dwarf_gpr_r14, LLDB_INVALID_REGNUM, gdb_gpr_r14), 372 DEFINE_GPR(r15, NULL, gcc_dwarf_gpr_r15, gcc_dwarf_gpr_r15, LLDB_INVALID_REGNUM, gdb_gpr_r15), 373 DEFINE_GPR(rip, "pc", gcc_dwarf_gpr_rip, gcc_dwarf_gpr_rip, LLDB_REGNUM_GENERIC_PC, gdb_gpr_rip), 374 DEFINE_GPR(rflags, "flags", LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_REGNUM_GENERIC_FLAGS, gdb_gpr_rflags), 375 DEFINE_GPR(cs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_cs), 376 DEFINE_GPR(fs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_fs), 377 DEFINE_GPR(gs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_gs), 378 DEFINE_GPR(ss, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_ss), 379 DEFINE_GPR(ds, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_ds), 380 DEFINE_GPR(es, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_es), 381 // i386 registers 382 DEFINE_GPR_i386(eax, rax, NULL, gcc_eax, dwarf_eax, LLDB_INVALID_REGNUM, gdb_eax), 383 DEFINE_GPR_i386(ebx, rbx, NULL, gcc_ebx, dwarf_ebx, LLDB_INVALID_REGNUM, gdb_ebx), 384 DEFINE_GPR_i386(ecx, rcx, NULL, gcc_ecx, dwarf_ecx, LLDB_INVALID_REGNUM, gdb_ecx), 385 DEFINE_GPR_i386(edx, rdx, NULL, gcc_edx, dwarf_edx, LLDB_INVALID_REGNUM, gdb_edx), 386 DEFINE_GPR_i386(edi, rdi, NULL, gcc_edi, dwarf_edi, LLDB_INVALID_REGNUM, gdb_edi), 387 DEFINE_GPR_i386(esi, rsi, NULL, gcc_esi, dwarf_esi, LLDB_INVALID_REGNUM, gdb_esi), 388 DEFINE_GPR_i386(ebp, rbp, "fp", gcc_ebp, dwarf_ebp, LLDB_REGNUM_GENERIC_FP, gdb_ebp), 389 DEFINE_GPR_i386(esp, rsp, "sp", gcc_esp, dwarf_esp, LLDB_REGNUM_GENERIC_SP, gdb_esp), 390 DEFINE_GPR_i386(eip, rip, "pc", gcc_eip, dwarf_eip, LLDB_REGNUM_GENERIC_PC, gdb_eip), 391 DEFINE_GPR_i386(eflags, rflags, "flags", gcc_eflags, dwarf_eflags, LLDB_REGNUM_GENERIC_FLAGS, gdb_eflags), 392 // i387 Floating point registers. 393 DEFINE_FPR(fcw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fcw), 394 DEFINE_FPR(fsw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fsw), 395 DEFINE_FPR(ftw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ftw), 396 DEFINE_FPR(fop, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fop), 397 DEFINE_FPR(ip, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ip), 398 // FIXME: Extract segment from ip. 399 DEFINE_FPR(ip, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_cs_64), 400 DEFINE_FPR(dp, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_dp), 401 // FIXME: Extract segment from dp. 402 DEFINE_FPR(dp, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ds_64), 403 DEFINE_FPR(mxcsr, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_mxcsr), 404 DEFINE_FPR(mxcsrmask, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM), 405 406 // FP registers. 407 DEFINE_FP(stmm, 0), 408 DEFINE_FP(stmm, 1), 409 DEFINE_FP(stmm, 2), 410 DEFINE_FP(stmm, 3), 411 DEFINE_FP(stmm, 4), 412 DEFINE_FP(stmm, 5), 413 DEFINE_FP(stmm, 6), 414 DEFINE_FP(stmm, 7), 415 416 // XMM registers 417 DEFINE_XMM(xmm, 0), 418 DEFINE_XMM(xmm, 1), 419 DEFINE_XMM(xmm, 2), 420 DEFINE_XMM(xmm, 3), 421 DEFINE_XMM(xmm, 4), 422 DEFINE_XMM(xmm, 5), 423 DEFINE_XMM(xmm, 6), 424 DEFINE_XMM(xmm, 7), 425 DEFINE_XMM(xmm, 8), 426 DEFINE_XMM(xmm, 9), 427 DEFINE_XMM(xmm, 10), 428 DEFINE_XMM(xmm, 11), 429 DEFINE_XMM(xmm, 12), 430 DEFINE_XMM(xmm, 13), 431 DEFINE_XMM(xmm, 14), 432 DEFINE_XMM(xmm, 15), 433 434 // Copy of YMM registers assembled from xmm and ymmh 435 DEFINE_YMM(ymm, 0), 436 DEFINE_YMM(ymm, 1), 437 DEFINE_YMM(ymm, 2), 438 DEFINE_YMM(ymm, 3), 439 DEFINE_YMM(ymm, 4), 440 DEFINE_YMM(ymm, 5), 441 DEFINE_YMM(ymm, 6), 442 DEFINE_YMM(ymm, 7), 443 DEFINE_YMM(ymm, 8), 444 DEFINE_YMM(ymm, 9), 445 DEFINE_YMM(ymm, 10), 446 DEFINE_YMM(ymm, 11), 447 DEFINE_YMM(ymm, 12), 448 DEFINE_YMM(ymm, 13), 449 DEFINE_YMM(ymm, 14), 450 DEFINE_YMM(ymm, 15), 451 452 // Debug registers for lldb internal use 453 DEFINE_DR(dr, 0), 454 DEFINE_DR(dr, 1), 455 DEFINE_DR(dr, 2), 456 DEFINE_DR(dr, 3), 457 DEFINE_DR(dr, 4), 458 DEFINE_DR(dr, 5), 459 DEFINE_DR(dr, 6), 460 DEFINE_DR(dr, 7) 461}; 462 463static bool IsGPR(unsigned reg) 464{ 465 return reg <= k_last_gpr; // GPR's come first. 466} 467 468static bool IsAVX(unsigned reg) 469{ 470 return (k_first_avx <= reg && reg <= k_last_avx); 471} 472static bool IsFPR(unsigned reg) 473{ 474 return (k_first_fpr <= reg && reg <= k_last_fpr); 475} 476 477 478bool RegisterContext_x86_64::IsFPR(unsigned reg, FPRType fpr_type) 479{ 480 bool generic_fpr = ::IsFPR(reg); 481 if (fpr_type == eXSAVE) 482 return generic_fpr || IsAVX(reg); 483 484 return generic_fpr; 485} 486 487RegisterContext_x86_64::RegisterContext_x86_64(Thread &thread, 488 uint32_t concrete_frame_idx) 489 : RegisterContextPOSIX(thread, concrete_frame_idx) 490{ 491 // Initialize m_iovec to point to the buffer and buffer size 492 // using the conventions of Berkeley style UIO structures, as required 493 // by PTRACE extensions. 494 m_iovec.iov_base = &m_fpr.xstate.xsave; 495 m_iovec.iov_len = sizeof(m_fpr.xstate.xsave); 496 497 ::memset(&m_fpr, 0, sizeof(RegisterContext_x86_64::FPR)); 498 499 // TODO: Use assembly to call cpuid on the inferior and query ebx or ecx 500 m_fpr_type = eXSAVE; // extended floating-point registers, if available 501 if (false == ReadFPR()) 502 m_fpr_type = eFXSAVE; // assume generic floating-point registers 503} 504 505RegisterContext_x86_64::~RegisterContext_x86_64() 506{ 507} 508 509ProcessMonitor & 510RegisterContext_x86_64::GetMonitor() 511{ 512 ProcessSP base = CalculateProcess(); 513 ProcessPOSIX *process = static_cast<ProcessPOSIX*>(base.get()); 514 return process->GetMonitor(); 515} 516 517void 518RegisterContext_x86_64::Invalidate() 519{ 520} 521 522void 523RegisterContext_x86_64::InvalidateAllRegisters() 524{ 525} 526 527unsigned 528RegisterContext_x86_64::GetRegisterOffset(unsigned reg) 529{ 530 assert(reg < k_num_registers && "Invalid register number."); 531 return GetRegisterInfo()[reg].byte_offset; 532} 533 534unsigned 535RegisterContext_x86_64::GetRegisterSize(unsigned reg) 536{ 537 assert(reg < k_num_registers && "Invalid register number."); 538 return GetRegisterInfo()[reg].byte_size; 539} 540 541size_t 542RegisterContext_x86_64::GetRegisterCount() 543{ 544 size_t num_registers = k_num_gpr_registers + k_num_fpr_registers; 545 if (m_fpr_type == eXSAVE) 546 return num_registers + k_num_avx_registers; 547 return num_registers; 548} 549 550const RegisterInfo * 551RegisterContext_x86_64::GetRegisterInfo() 552{ 553 // Commonly, this method is overridden and g_register_infos is copied and specialized. 554 // So, use GetRegisterInfo() rather than g_register_infos in this scope. 555 return g_register_infos; 556} 557 558const RegisterInfo * 559RegisterContext_x86_64::GetRegisterInfoAtIndex(size_t reg) 560{ 561 if (reg < k_num_registers) 562 return &GetRegisterInfo()[reg]; 563 else 564 return NULL; 565} 566 567size_t 568RegisterContext_x86_64::GetRegisterSetCount() 569{ 570 size_t sets = 0; 571 for (size_t set = 0; set < k_num_register_sets; ++set) 572 if (IsRegisterSetAvailable(set)) 573 ++sets; 574 575 return sets; 576} 577 578const RegisterSet * 579RegisterContext_x86_64::GetRegisterSet(size_t set) 580{ 581 if (IsRegisterSetAvailable(set)) 582 return &g_reg_sets[set]; 583 else 584 return NULL; 585} 586 587unsigned 588RegisterContext_x86_64::GetRegisterIndexFromOffset(unsigned offset) 589{ 590 unsigned reg; 591 for (reg = 0; reg < k_num_registers; reg++) 592 { 593 if (GetRegisterInfo()[reg].byte_offset == offset) 594 break; 595 } 596 assert(reg < k_num_registers && "Invalid register offset."); 597 return reg; 598} 599 600const char * 601RegisterContext_x86_64::GetRegisterName(unsigned reg) 602{ 603 assert(reg < k_num_registers && "Invalid register offset."); 604 return GetRegisterInfo()[reg].name; 605} 606 607lldb::ByteOrder 608RegisterContext_x86_64::GetByteOrder() 609{ 610 // Get the target process whose privileged thread was used for the register read. 611 lldb::ByteOrder byte_order = eByteOrderInvalid; 612 Process *process = CalculateProcess().get(); 613 614 if (process) 615 byte_order = process->GetByteOrder(); 616 return byte_order; 617} 618 619// Parse ymm registers and into xmm.bytes and ymmh.bytes. 620bool RegisterContext_x86_64::CopyYMMtoXSTATE(uint32_t reg, lldb::ByteOrder byte_order) 621{ 622 if (!IsAVX(reg)) 623 return false; 624 625 if (byte_order == eByteOrderLittle) { 626 ::memcpy(m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 627 m_ymm_set.ymm[reg - fpu_ymm0].bytes, 628 sizeof(RegisterContext_x86_64::XMMReg)); 629 ::memcpy(m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 630 m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 631 sizeof(RegisterContext_x86_64::YMMHReg)); 632 return true; 633 } 634 635 if (byte_order == eByteOrderBig) { 636 ::memcpy(m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 637 m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 638 sizeof(RegisterContext_x86_64::XMMReg)); 639 ::memcpy(m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 640 m_ymm_set.ymm[reg - fpu_ymm0].bytes, 641 sizeof(RegisterContext_x86_64::YMMHReg)); 642 return true; 643 } 644 return false; // unsupported or invalid byte order 645} 646 647// Concatenate xmm.bytes with ymmh.bytes 648bool RegisterContext_x86_64::CopyXSTATEtoYMM(uint32_t reg, lldb::ByteOrder byte_order) 649{ 650 if (!IsAVX(reg)) 651 return false; 652 653 if (byte_order == eByteOrderLittle) { 654 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes, 655 m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 656 sizeof(RegisterContext_x86_64::XMMReg)); 657 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 658 m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 659 sizeof(RegisterContext_x86_64::YMMHReg)); 660 return true; 661 } 662 if (byte_order == eByteOrderBig) { 663 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 664 m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 665 sizeof(RegisterContext_x86_64::XMMReg)); 666 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes, 667 m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 668 sizeof(RegisterContext_x86_64::YMMHReg)); 669 return true; 670 } 671 return false; // unsupported or invalid byte order 672} 673 674bool 675RegisterContext_x86_64::IsRegisterSetAvailable(size_t set_index) 676{ 677 // Note: Extended register sets are assumed to be at the end of g_reg_sets... 678 size_t num_sets = k_num_register_sets - k_num_extended_register_sets; 679 if (m_fpr_type == eXSAVE) // ...and to start with AVX registers. 680 ++num_sets; 681 682 return (set_index < num_sets); 683} 684 685bool 686RegisterContext_x86_64::ReadRegister(const RegisterInfo *reg_info, RegisterValue &value) 687{ 688 if (!reg_info) 689 return false; 690 691 const uint32_t reg = reg_info->kinds[eRegisterKindLLDB]; 692 693 if (IsFPR(reg, m_fpr_type)) { 694 if (!ReadFPR()) 695 return false; 696 } 697 else { 698 ProcessMonitor &monitor = GetMonitor(); 699 bool success = monitor.ReadRegisterValue(m_thread.GetID(), GetRegisterOffset(reg), 700 GetRegisterName(reg), GetRegisterSize(reg), value); 701 702 // If an i386 register should be parsed from an x86_64 register... 703 if (success && reg >= k_first_i386 && reg <= k_last_i386) 704 if (value.GetByteSize() > reg_info->byte_size) 705 value.SetType(reg_info); // ...use the type specified by reg_info rather than the uint64_t default 706 return success; 707 } 708 709 if (reg_info->encoding == eEncodingVector) { 710 ByteOrder byte_order = GetByteOrder(); 711 712 if (byte_order != ByteOrder::eByteOrderInvalid) { 713 if (reg >= fpu_stmm0 && reg <= fpu_stmm7) { 714 value.SetBytes(m_fpr.xstate.fxsave.stmm[reg - fpu_stmm0].bytes, reg_info->byte_size, byte_order); 715 } 716 if (reg >= fpu_xmm0 && reg <= fpu_xmm15) { 717 value.SetBytes(m_fpr.xstate.fxsave.xmm[reg - fpu_xmm0].bytes, reg_info->byte_size, byte_order); 718 } 719 if (reg >= fpu_ymm0 && reg <= fpu_ymm15) { 720 // Concatenate ymm using the register halves in xmm.bytes and ymmh.bytes 721 if (m_fpr_type == eXSAVE && CopyXSTATEtoYMM(reg, byte_order)) 722 value.SetBytes(m_ymm_set.ymm[reg - fpu_ymm0].bytes, reg_info->byte_size, byte_order); 723 else 724 return false; 725 } 726 return value.GetType() == RegisterValue::eTypeBytes; 727 } 728 return false; 729 } 730 731 // Note that lldb uses slightly different naming conventions from sys/user.h 732 switch (reg) 733 { 734 default: 735 return false; 736 case fpu_dp: 737 value = m_fpr.xstate.fxsave.dp; 738 break; 739 case fpu_fcw: 740 value = m_fpr.xstate.fxsave.fcw; 741 break; 742 case fpu_fsw: 743 value = m_fpr.xstate.fxsave.fsw; 744 break; 745 case fpu_ip: 746 value = m_fpr.xstate.fxsave.ip; 747 break; 748 case fpu_fop: 749 value = m_fpr.xstate.fxsave.fop; 750 break; 751 case fpu_ftw: 752 value = m_fpr.xstate.fxsave.ftw; 753 break; 754 case fpu_mxcsr: 755 value = m_fpr.xstate.fxsave.mxcsr; 756 break; 757 case fpu_mxcsrmask: 758 value = m_fpr.xstate.fxsave.mxcsrmask; 759 break; 760 } 761 return true; 762} 763 764bool 765RegisterContext_x86_64::ReadAllRegisterValues(DataBufferSP &data_sp) 766{ 767 bool success = false; 768 data_sp.reset (new DataBufferHeap (REG_CONTEXT_SIZE, 0)); 769 if (data_sp && ReadGPR () && ReadFPR ()) 770 { 771 uint8_t *dst = data_sp->GetBytes(); 772 success = dst != 0; 773 774 if (success) { 775 ::memcpy (dst, &m_gpr, GetGPRSize()); 776 dst += GetGPRSize(); 777 } 778 if (m_fpr_type == eFXSAVE) 779 ::memcpy (dst, &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave)); 780 781 if (m_fpr_type == eXSAVE) { 782 ByteOrder byte_order = GetByteOrder(); 783 784 // Assemble the YMM register content from the register halves. 785 for (uint32_t reg = fpu_ymm0; success && reg <= fpu_ymm15; ++reg) 786 success = CopyXSTATEtoYMM(reg, byte_order); 787 788 if (success) { 789 // Copy the extended register state including the assembled ymm registers. 790 ::memcpy (dst, &m_fpr, sizeof(m_fpr)); 791 } 792 } 793 } 794 return success; 795} 796 797bool 798RegisterContext_x86_64::WriteRegister(const lldb_private::RegisterInfo *reg_info, 799 const lldb_private::RegisterValue &value) 800{ 801 const uint32_t reg = reg_info->kinds[eRegisterKindLLDB]; 802 if (IsGPR(reg)) { 803 ProcessMonitor &monitor = GetMonitor(); 804 return monitor.WriteRegisterValue(m_thread.GetID(), GetRegisterOffset(reg), GetRegisterName(reg), value); 805 } 806 807 if (IsFPR(reg, m_fpr_type)) { 808 switch (reg) 809 { 810 default: 811 if (reg_info->encoding != eEncodingVector) 812 return false; 813 814 if (reg >= fpu_stmm0 && reg <= fpu_stmm7) 815 ::memcpy (m_fpr.xstate.fxsave.stmm[reg - fpu_stmm0].bytes, value.GetBytes(), value.GetByteSize()); 816 817 if (reg >= fpu_xmm0 && reg <= fpu_xmm15) 818 ::memcpy (m_fpr.xstate.fxsave.xmm[reg - fpu_xmm0].bytes, value.GetBytes(), value.GetByteSize()); 819 820 if (reg >= fpu_ymm0 && reg <= fpu_ymm15) { 821 if (m_fpr_type != eXSAVE) 822 return false; // the target processor does not support AVX 823 824 // Store ymm register content, and split into the register halves in xmm.bytes and ymmh.bytes 825 ::memcpy (m_ymm_set.ymm[reg - fpu_ymm0].bytes, value.GetBytes(), value.GetByteSize()); 826 if (false == CopyYMMtoXSTATE(reg, GetByteOrder())) 827 return false; 828 } 829 break; 830 case fpu_dp: 831 m_fpr.xstate.fxsave.dp = value.GetAsUInt64(); 832 break; 833 case fpu_fcw: 834 m_fpr.xstate.fxsave.fcw = value.GetAsUInt16(); 835 break; 836 case fpu_fsw: 837 m_fpr.xstate.fxsave.fsw = value.GetAsUInt16(); 838 break; 839 case fpu_ip: 840 m_fpr.xstate.fxsave.ip = value.GetAsUInt64(); 841 break; 842 case fpu_fop: 843 m_fpr.xstate.fxsave.fop = value.GetAsUInt16(); 844 break; 845 case fpu_ftw: 846 m_fpr.xstate.fxsave.ftw = value.GetAsUInt16(); 847 break; 848 case fpu_mxcsr: 849 m_fpr.xstate.fxsave.mxcsr = value.GetAsUInt32(); 850 break; 851 case fpu_mxcsrmask: 852 m_fpr.xstate.fxsave.mxcsrmask = value.GetAsUInt32(); 853 break; 854 } 855 if (WriteFPR()) { 856 if (IsAVX(reg)) 857 return CopyYMMtoXSTATE(reg, GetByteOrder()); 858 return true; 859 } 860 } 861 return false; 862} 863 864bool 865RegisterContext_x86_64::WriteAllRegisterValues(const DataBufferSP &data_sp) 866{ 867 bool success = false; 868 if (data_sp && data_sp->GetByteSize() == REG_CONTEXT_SIZE) 869 { 870 uint8_t *src = data_sp->GetBytes(); 871 if (src) { 872 ::memcpy (&m_gpr, src, GetGPRSize()); 873 874 if (WriteGPR()) { 875 src += GetGPRSize(); 876 if (m_fpr_type == eFXSAVE) 877 ::memcpy (&m_fpr.xstate.fxsave, src, sizeof(m_fpr.xstate.fxsave)); 878 if (m_fpr_type == eXSAVE) 879 ::memcpy (&m_fpr.xstate.xsave, src, sizeof(m_fpr.xstate.xsave)); 880 881 success = WriteFPR(); 882 if (success) { 883 success = true; 884 885 if (m_fpr_type == eXSAVE) { 886 ByteOrder byte_order = GetByteOrder(); 887 888 // Parse the YMM register content from the register halves. 889 for (uint32_t reg = fpu_ymm0; success && reg <= fpu_ymm15; ++reg) 890 success = CopyYMMtoXSTATE(reg, byte_order); 891 } 892 } 893 } 894 } 895 } 896 return success; 897} 898 899bool 900RegisterContext_x86_64::ReadRegister(const unsigned reg, 901 RegisterValue &value) 902{ 903 ProcessMonitor &monitor = GetMonitor(); 904 return monitor.ReadRegisterValue(m_thread.GetID(), 905 GetRegisterOffset(reg), 906 GetRegisterName(reg), 907 GetRegisterSize(reg), 908 value); 909} 910 911bool 912RegisterContext_x86_64::WriteRegister(const unsigned reg, 913 const RegisterValue &value) 914{ 915 ProcessMonitor &monitor = GetMonitor(); 916 return monitor.WriteRegisterValue(m_thread.GetID(), 917 GetRegisterOffset(reg), 918 GetRegisterName(reg), 919 value); 920} 921 922bool 923RegisterContext_x86_64::UpdateAfterBreakpoint() 924{ 925 // PC points one byte past the int3 responsible for the breakpoint. 926 lldb::addr_t pc; 927 928 if ((pc = GetPC()) == LLDB_INVALID_ADDRESS) 929 return false; 930 931 SetPC(pc - 1); 932 return true; 933} 934 935uint32_t 936RegisterContext_x86_64::ConvertRegisterKindToRegisterNumber(uint32_t kind, 937 uint32_t num) 938{ 939 const Process *process = CalculateProcess().get(); 940 if (process) 941 { 942 const ArchSpec arch = process->GetTarget().GetArchitecture();; 943 switch (arch.GetCore()) 944 { 945 default: 946 assert(false && "CPU type not supported!"); 947 break; 948 949 case ArchSpec::eCore_x86_32_i386: 950 case ArchSpec::eCore_x86_32_i486: 951 case ArchSpec::eCore_x86_32_i486sx: 952 { 953 if (kind == eRegisterKindGeneric) 954 { 955 switch (num) 956 { 957 case LLDB_REGNUM_GENERIC_PC: return gpr_eip; 958 case LLDB_REGNUM_GENERIC_SP: return gpr_esp; 959 case LLDB_REGNUM_GENERIC_FP: return gpr_ebp; 960 case LLDB_REGNUM_GENERIC_FLAGS: return gpr_eflags; 961 case LLDB_REGNUM_GENERIC_RA: 962 default: 963 return LLDB_INVALID_REGNUM; 964 } 965 } 966 967 if (kind == eRegisterKindGCC || kind == eRegisterKindDWARF) 968 { 969 switch (num) 970 { 971 case dwarf_eax: return gpr_eax; 972 case dwarf_edx: return gpr_edx; 973 case dwarf_ecx: return gpr_ecx; 974 case dwarf_ebx: return gpr_ebx; 975 case dwarf_esi: return gpr_esi; 976 case dwarf_edi: return gpr_edi; 977 case dwarf_ebp: return gpr_ebp; 978 case dwarf_esp: return gpr_esp; 979 case dwarf_eip: return gpr_eip; 980 case dwarf_xmm0: return fpu_xmm0; 981 case dwarf_xmm1: return fpu_xmm1; 982 case dwarf_xmm2: return fpu_xmm2; 983 case dwarf_xmm3: return fpu_xmm3; 984 case dwarf_xmm4: return fpu_xmm4; 985 case dwarf_xmm5: return fpu_xmm5; 986 case dwarf_xmm6: return fpu_xmm6; 987 case dwarf_xmm7: return fpu_xmm7; 988 case dwarf_stmm0: return fpu_stmm0; 989 case dwarf_stmm1: return fpu_stmm1; 990 case dwarf_stmm2: return fpu_stmm2; 991 case dwarf_stmm3: return fpu_stmm3; 992 case dwarf_stmm4: return fpu_stmm4; 993 case dwarf_stmm5: return fpu_stmm5; 994 case dwarf_stmm6: return fpu_stmm6; 995 case dwarf_stmm7: return fpu_stmm7; 996 default: 997 return LLDB_INVALID_REGNUM; 998 } 999 } 1000 1001 if (kind == eRegisterKindGDB) 1002 { 1003 switch (num) 1004 { 1005 case gdb_eax : return gpr_eax; 1006 case gdb_ebx : return gpr_ebx; 1007 case gdb_ecx : return gpr_ecx; 1008 case gdb_edx : return gpr_edx; 1009 case gdb_esi : return gpr_esi; 1010 case gdb_edi : return gpr_edi; 1011 case gdb_ebp : return gpr_ebp; 1012 case gdb_esp : return gpr_esp; 1013 case gdb_eip : return gpr_eip; 1014 case gdb_eflags : return gpr_eflags; 1015 case gdb_cs : return gpr_cs; 1016 case gdb_ss : return gpr_ss; 1017 case gdb_ds : return gpr_ds; 1018 case gdb_es : return gpr_es; 1019 case gdb_fs : return gpr_fs; 1020 case gdb_gs : return gpr_gs; 1021 case gdb_stmm0 : return fpu_stmm0; 1022 case gdb_stmm1 : return fpu_stmm1; 1023 case gdb_stmm2 : return fpu_stmm2; 1024 case gdb_stmm3 : return fpu_stmm3; 1025 case gdb_stmm4 : return fpu_stmm4; 1026 case gdb_stmm5 : return fpu_stmm5; 1027 case gdb_stmm6 : return fpu_stmm6; 1028 case gdb_stmm7 : return fpu_stmm7; 1029 case gdb_fcw : return fpu_fcw; 1030 case gdb_fsw : return fpu_fsw; 1031 case gdb_ftw : return fpu_ftw; 1032 case gdb_fpu_cs : return fpu_cs; 1033 case gdb_ip : return fpu_ip; 1034 case gdb_fpu_ds : return fpu_ds; //fpu_fos 1035 case gdb_dp : return fpu_dp; //fpu_foo 1036 case gdb_fop : return fpu_fop; 1037 case gdb_xmm0 : return fpu_xmm0; 1038 case gdb_xmm1 : return fpu_xmm1; 1039 case gdb_xmm2 : return fpu_xmm2; 1040 case gdb_xmm3 : return fpu_xmm3; 1041 case gdb_xmm4 : return fpu_xmm4; 1042 case gdb_xmm5 : return fpu_xmm5; 1043 case gdb_xmm6 : return fpu_xmm6; 1044 case gdb_xmm7 : return fpu_xmm7; 1045 case gdb_mxcsr : return fpu_mxcsr; 1046 default: 1047 return LLDB_INVALID_REGNUM; 1048 } 1049 } 1050 else if (kind == eRegisterKindLLDB) 1051 { 1052 return num; 1053 } 1054 1055 break; 1056 } 1057 1058 case ArchSpec::eCore_x86_64_x86_64: 1059 { 1060 if (kind == eRegisterKindGeneric) 1061 { 1062 switch (num) 1063 { 1064 case LLDB_REGNUM_GENERIC_PC: return gpr_rip; 1065 case LLDB_REGNUM_GENERIC_SP: return gpr_rsp; 1066 case LLDB_REGNUM_GENERIC_FP: return gpr_rbp; 1067 case LLDB_REGNUM_GENERIC_FLAGS: return gpr_rflags; 1068 case LLDB_REGNUM_GENERIC_RA: 1069 default: 1070 return LLDB_INVALID_REGNUM; 1071 } 1072 } 1073 1074 if (kind == eRegisterKindGCC || kind == eRegisterKindDWARF) 1075 { 1076 switch (num) 1077 { 1078 case gcc_dwarf_gpr_rax: return gpr_rax; 1079 case gcc_dwarf_gpr_rdx: return gpr_rdx; 1080 case gcc_dwarf_gpr_rcx: return gpr_rcx; 1081 case gcc_dwarf_gpr_rbx: return gpr_rbx; 1082 case gcc_dwarf_gpr_rsi: return gpr_rsi; 1083 case gcc_dwarf_gpr_rdi: return gpr_rdi; 1084 case gcc_dwarf_gpr_rbp: return gpr_rbp; 1085 case gcc_dwarf_gpr_rsp: return gpr_rsp; 1086 case gcc_dwarf_gpr_r8: return gpr_r8; 1087 case gcc_dwarf_gpr_r9: return gpr_r9; 1088 case gcc_dwarf_gpr_r10: return gpr_r10; 1089 case gcc_dwarf_gpr_r11: return gpr_r11; 1090 case gcc_dwarf_gpr_r12: return gpr_r12; 1091 case gcc_dwarf_gpr_r13: return gpr_r13; 1092 case gcc_dwarf_gpr_r14: return gpr_r14; 1093 case gcc_dwarf_gpr_r15: return gpr_r15; 1094 case gcc_dwarf_gpr_rip: return gpr_rip; 1095 case gcc_dwarf_fpu_xmm0: return fpu_xmm0; 1096 case gcc_dwarf_fpu_xmm1: return fpu_xmm1; 1097 case gcc_dwarf_fpu_xmm2: return fpu_xmm2; 1098 case gcc_dwarf_fpu_xmm3: return fpu_xmm3; 1099 case gcc_dwarf_fpu_xmm4: return fpu_xmm4; 1100 case gcc_dwarf_fpu_xmm5: return fpu_xmm5; 1101 case gcc_dwarf_fpu_xmm6: return fpu_xmm6; 1102 case gcc_dwarf_fpu_xmm7: return fpu_xmm7; 1103 case gcc_dwarf_fpu_xmm8: return fpu_xmm8; 1104 case gcc_dwarf_fpu_xmm9: return fpu_xmm9; 1105 case gcc_dwarf_fpu_xmm10: return fpu_xmm10; 1106 case gcc_dwarf_fpu_xmm11: return fpu_xmm11; 1107 case gcc_dwarf_fpu_xmm12: return fpu_xmm12; 1108 case gcc_dwarf_fpu_xmm13: return fpu_xmm13; 1109 case gcc_dwarf_fpu_xmm14: return fpu_xmm14; 1110 case gcc_dwarf_fpu_xmm15: return fpu_xmm15; 1111 case gcc_dwarf_fpu_stmm0: return fpu_stmm0; 1112 case gcc_dwarf_fpu_stmm1: return fpu_stmm1; 1113 case gcc_dwarf_fpu_stmm2: return fpu_stmm2; 1114 case gcc_dwarf_fpu_stmm3: return fpu_stmm3; 1115 case gcc_dwarf_fpu_stmm4: return fpu_stmm4; 1116 case gcc_dwarf_fpu_stmm5: return fpu_stmm5; 1117 case gcc_dwarf_fpu_stmm6: return fpu_stmm6; 1118 case gcc_dwarf_fpu_stmm7: return fpu_stmm7; 1119 case gcc_dwarf_fpu_ymm0: return fpu_ymm0; 1120 case gcc_dwarf_fpu_ymm1: return fpu_ymm1; 1121 case gcc_dwarf_fpu_ymm2: return fpu_ymm2; 1122 case gcc_dwarf_fpu_ymm3: return fpu_ymm3; 1123 case gcc_dwarf_fpu_ymm4: return fpu_ymm4; 1124 case gcc_dwarf_fpu_ymm5: return fpu_ymm5; 1125 case gcc_dwarf_fpu_ymm6: return fpu_ymm6; 1126 case gcc_dwarf_fpu_ymm7: return fpu_ymm7; 1127 case gcc_dwarf_fpu_ymm8: return fpu_ymm8; 1128 case gcc_dwarf_fpu_ymm9: return fpu_ymm9; 1129 case gcc_dwarf_fpu_ymm10: return fpu_ymm10; 1130 case gcc_dwarf_fpu_ymm11: return fpu_ymm11; 1131 case gcc_dwarf_fpu_ymm12: return fpu_ymm12; 1132 case gcc_dwarf_fpu_ymm13: return fpu_ymm13; 1133 case gcc_dwarf_fpu_ymm14: return fpu_ymm14; 1134 case gcc_dwarf_fpu_ymm15: return fpu_ymm15; 1135 default: 1136 return LLDB_INVALID_REGNUM; 1137 } 1138 } 1139 1140 if (kind == eRegisterKindGDB) 1141 { 1142 switch (num) 1143 { 1144 case gdb_gpr_rax : return gpr_rax; 1145 case gdb_gpr_rbx : return gpr_rbx; 1146 case gdb_gpr_rcx : return gpr_rcx; 1147 case gdb_gpr_rdx : return gpr_rdx; 1148 case gdb_gpr_rsi : return gpr_rsi; 1149 case gdb_gpr_rdi : return gpr_rdi; 1150 case gdb_gpr_rbp : return gpr_rbp; 1151 case gdb_gpr_rsp : return gpr_rsp; 1152 case gdb_gpr_r8 : return gpr_r8; 1153 case gdb_gpr_r9 : return gpr_r9; 1154 case gdb_gpr_r10 : return gpr_r10; 1155 case gdb_gpr_r11 : return gpr_r11; 1156 case gdb_gpr_r12 : return gpr_r12; 1157 case gdb_gpr_r13 : return gpr_r13; 1158 case gdb_gpr_r14 : return gpr_r14; 1159 case gdb_gpr_r15 : return gpr_r15; 1160 case gdb_gpr_rip : return gpr_rip; 1161 case gdb_gpr_rflags : return gpr_rflags; 1162 case gdb_gpr_cs : return gpr_cs; 1163 case gdb_gpr_ss : return gpr_ss; 1164 case gdb_gpr_ds : return gpr_ds; 1165 case gdb_gpr_es : return gpr_es; 1166 case gdb_gpr_fs : return gpr_fs; 1167 case gdb_gpr_gs : return gpr_gs; 1168 case gdb_fpu_stmm0 : return fpu_stmm0; 1169 case gdb_fpu_stmm1 : return fpu_stmm1; 1170 case gdb_fpu_stmm2 : return fpu_stmm2; 1171 case gdb_fpu_stmm3 : return fpu_stmm3; 1172 case gdb_fpu_stmm4 : return fpu_stmm4; 1173 case gdb_fpu_stmm5 : return fpu_stmm5; 1174 case gdb_fpu_stmm6 : return fpu_stmm6; 1175 case gdb_fpu_stmm7 : return fpu_stmm7; 1176 case gdb_fpu_fcw : return fpu_fcw; 1177 case gdb_fpu_fsw : return fpu_fsw; 1178 case gdb_fpu_ftw : return fpu_ftw; 1179 case gdb_fpu_cs_64 : return fpu_cs; 1180 case gdb_fpu_ip : return fpu_ip; 1181 case gdb_fpu_ds_64 : return fpu_ds; 1182 case gdb_fpu_dp : return fpu_dp; 1183 case gdb_fpu_fop : return fpu_fop; 1184 case gdb_fpu_xmm0 : return fpu_xmm0; 1185 case gdb_fpu_xmm1 : return fpu_xmm1; 1186 case gdb_fpu_xmm2 : return fpu_xmm2; 1187 case gdb_fpu_xmm3 : return fpu_xmm3; 1188 case gdb_fpu_xmm4 : return fpu_xmm4; 1189 case gdb_fpu_xmm5 : return fpu_xmm5; 1190 case gdb_fpu_xmm6 : return fpu_xmm6; 1191 case gdb_fpu_xmm7 : return fpu_xmm7; 1192 case gdb_fpu_xmm8 : return fpu_xmm8; 1193 case gdb_fpu_xmm9 : return fpu_xmm9; 1194 case gdb_fpu_xmm10 : return fpu_xmm10; 1195 case gdb_fpu_xmm11 : return fpu_xmm11; 1196 case gdb_fpu_xmm12 : return fpu_xmm12; 1197 case gdb_fpu_xmm13 : return fpu_xmm13; 1198 case gdb_fpu_xmm14 : return fpu_xmm14; 1199 case gdb_fpu_xmm15 : return fpu_xmm15; 1200 case gdb_fpu_mxcsr : return fpu_mxcsr; 1201 case gdb_fpu_ymm0 : return fpu_ymm0; 1202 case gdb_fpu_ymm1 : return fpu_ymm1; 1203 case gdb_fpu_ymm2 : return fpu_ymm2; 1204 case gdb_fpu_ymm3 : return fpu_ymm3; 1205 case gdb_fpu_ymm4 : return fpu_ymm4; 1206 case gdb_fpu_ymm5 : return fpu_ymm5; 1207 case gdb_fpu_ymm6 : return fpu_ymm6; 1208 case gdb_fpu_ymm7 : return fpu_ymm7; 1209 case gdb_fpu_ymm8 : return fpu_ymm8; 1210 case gdb_fpu_ymm9 : return fpu_ymm9; 1211 case gdb_fpu_ymm10 : return fpu_ymm10; 1212 case gdb_fpu_ymm11 : return fpu_ymm11; 1213 case gdb_fpu_ymm12 : return fpu_ymm12; 1214 case gdb_fpu_ymm13 : return fpu_ymm13; 1215 case gdb_fpu_ymm14 : return fpu_ymm14; 1216 case gdb_fpu_ymm15 : return fpu_ymm15; 1217 default: 1218 return LLDB_INVALID_REGNUM; 1219 } 1220 } 1221 else if (kind == eRegisterKindLLDB) 1222 { 1223 return num; 1224 } 1225 } 1226 } 1227 } 1228 1229 return LLDB_INVALID_REGNUM; 1230} 1231 1232uint32_t 1233RegisterContext_x86_64::NumSupportedHardwareWatchpoints() 1234{ 1235 // Available debug address registers: dr0, dr1, dr2, dr3 1236 return 4; 1237} 1238 1239bool 1240RegisterContext_x86_64::IsWatchpointVacant(uint32_t hw_index) 1241{ 1242 bool is_vacant = false; 1243 RegisterValue value; 1244 1245 if (ReadRegister(dr7, value)) 1246 { 1247 uint64_t val = value.GetAsUInt64(); 1248 is_vacant = (val & (3 << 2*hw_index)) == 0; 1249 } 1250 1251 return is_vacant; 1252} 1253 1254static uint32_t 1255size_and_rw_bits(size_t size, bool read, bool write) 1256{ 1257 uint32_t rw; 1258 if (read) { 1259 rw = 0x3; // READ or READ/WRITE 1260 } else if (write) { 1261 rw = 0x1; // WRITE 1262 } else { 1263 assert(0 && "read and write cannot both be false"); 1264 } 1265 1266 switch (size) { 1267 case 1: 1268 return rw; 1269 case 2: 1270 return (0x1 << 2) | rw; 1271 case 4: 1272 return (0x3 << 2) | rw; 1273 case 8: 1274 return (0x2 << 2) | rw; 1275 default: 1276 assert(0 && "invalid size, must be one of 1, 2, 4, or 8"); 1277 } 1278} 1279 1280uint32_t 1281RegisterContext_x86_64::SetHardwareWatchpoint(addr_t addr, size_t size, 1282 bool read, bool write) 1283{ 1284 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 1285 1286 if (num_hw_watchpoints == 0) 1287 return LLDB_INVALID_INDEX32; 1288 1289 if (!(size == 1 || size == 2 || size == 4 || size == 8)) 1290 return LLDB_INVALID_INDEX32; 1291 1292 if (read == false && write == false) 1293 return LLDB_INVALID_INDEX32; 1294 1295 uint32_t hw_index = 0; 1296 for (hw_index = 0; hw_index < num_hw_watchpoints; ++hw_index) 1297 { 1298 if (IsWatchpointVacant(hw_index)) 1299 break; 1300 } 1301 1302 // Set both dr7 (debug control register) and dri (debug address register). 1303 1304 // dr7{7-0} encodes the local/gloabl enable bits: 1305 // global enable --. .-- local enable 1306 // | | 1307 // v v 1308 // dr0 -> bits{1-0} 1309 // dr1 -> bits{3-2} 1310 // dr2 -> bits{5-4} 1311 // dr3 -> bits{7-6} 1312 // 1313 // dr7{31-16} encodes the rw/len bits: 1314 // b_x+3, b_x+2, b_x+1, b_x 1315 // where bits{x+1, x} => rw 1316 // 0b00: execute, 0b01: write, 0b11: read-or-write, 1317 // 0b10: io read-or-write (unused) 1318 // and bits{x+3, x+2} => len 1319 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 1320 // 1321 // dr0 -> bits{19-16} 1322 // dr1 -> bits{23-20} 1323 // dr2 -> bits{27-24} 1324 // dr3 -> bits{31-28} 1325 if (hw_index < num_hw_watchpoints) 1326 { 1327 RegisterValue current_dr7_bits; 1328 1329 if (ReadRegister(dr7, current_dr7_bits)) 1330 { 1331 uint64_t new_dr7_bits = current_dr7_bits.GetAsUInt64() | 1332 (1 << (2*hw_index) | 1333 size_and_rw_bits(size, read, write) << 1334 (16+4*hw_index)); 1335 1336 if (WriteRegister(dr0 + hw_index, RegisterValue(addr)) && 1337 WriteRegister(dr7, RegisterValue(new_dr7_bits))) 1338 return hw_index; 1339 } 1340 } 1341 1342 return LLDB_INVALID_INDEX32; 1343} 1344 1345bool 1346RegisterContext_x86_64::ClearHardwareWatchpoint(uint32_t hw_index) 1347{ 1348 if (hw_index < NumSupportedHardwareWatchpoints()) 1349 { 1350 RegisterValue current_dr7_bits; 1351 1352 if (ReadRegister(dr7, current_dr7_bits)) 1353 { 1354 uint64_t new_dr7_bits = current_dr7_bits.GetAsUInt64() & ~(3 << (2*hw_index)); 1355 1356 if (WriteRegister(dr7, RegisterValue(new_dr7_bits))) 1357 return true; 1358 } 1359 } 1360 1361 return false; 1362} 1363 1364bool 1365RegisterContext_x86_64::IsWatchpointHit(uint32_t hw_index) 1366{ 1367 bool is_hit = false; 1368 1369 if (hw_index < NumSupportedHardwareWatchpoints()) 1370 { 1371 RegisterValue value; 1372 1373 if (ReadRegister(dr6, value)) 1374 { 1375 uint64_t val = value.GetAsUInt64(); 1376 is_hit = val & (1 << hw_index); 1377 } 1378 } 1379 1380 return is_hit; 1381} 1382 1383addr_t 1384RegisterContext_x86_64::GetWatchpointAddress(uint32_t hw_index) 1385{ 1386 addr_t wp_monitor_addr = LLDB_INVALID_ADDRESS; 1387 1388 if (hw_index < NumSupportedHardwareWatchpoints()) 1389 { 1390 if (!IsWatchpointVacant(hw_index)) 1391 { 1392 RegisterValue value; 1393 1394 if (ReadRegister(dr0 + hw_index, value)) 1395 wp_monitor_addr = value.GetAsUInt64(); 1396 } 1397 } 1398 1399 return wp_monitor_addr; 1400} 1401 1402 1403bool 1404RegisterContext_x86_64::ClearWatchpointHits() 1405{ 1406 return WriteRegister(dr6, RegisterValue((uint64_t)0)); 1407} 1408 1409bool 1410RegisterContext_x86_64::HardwareSingleStep(bool enable) 1411{ 1412 enum { TRACE_BIT = 0x100 }; 1413 uint64_t rflags; 1414 1415 if ((rflags = ReadRegisterAsUnsigned(gpr_rflags, -1UL)) == -1UL) 1416 return false; 1417 1418 if (enable) 1419 { 1420 if (rflags & TRACE_BIT) 1421 return true; 1422 1423 rflags |= TRACE_BIT; 1424 } 1425 else 1426 { 1427 if (!(rflags & TRACE_BIT)) 1428 return false; 1429 1430 rflags &= ~TRACE_BIT; 1431 } 1432 1433 return WriteRegisterFromUnsigned(gpr_rflags, rflags); 1434} 1435 1436bool 1437RegisterContext_x86_64::ReadGPR() 1438{ 1439 ProcessMonitor &monitor = GetMonitor(); 1440 return monitor.ReadGPR(m_thread.GetID(), &m_gpr, GetGPRSize()); 1441} 1442 1443bool 1444RegisterContext_x86_64::ReadFPR() 1445{ 1446 ProcessMonitor &monitor = GetMonitor(); 1447 if (m_fpr_type == eFXSAVE) 1448 return monitor.ReadFPR(m_thread.GetID(), &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave)); 1449 1450 if (m_fpr_type == eXSAVE) 1451 return monitor.ReadRegisterSet(m_thread.GetID(), &m_iovec, sizeof(m_fpr.xstate.xsave), NT_X86_XSTATE); 1452 return false; 1453} 1454 1455bool 1456RegisterContext_x86_64::WriteGPR() 1457{ 1458 ProcessMonitor &monitor = GetMonitor(); 1459 return monitor.WriteGPR(m_thread.GetID(), &m_gpr, GetGPRSize()); 1460} 1461 1462bool 1463RegisterContext_x86_64::WriteFPR() 1464{ 1465 ProcessMonitor &monitor = GetMonitor(); 1466 if (m_fpr_type == eFXSAVE) 1467 return monitor.WriteFPR(m_thread.GetID(), &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave)); 1468 1469 if (m_fpr_type == eXSAVE) 1470 return monitor.WriteRegisterSet(m_thread.GetID(), &m_iovec, sizeof(m_fpr.xstate.xsave), NT_X86_XSTATE); 1471 return false; 1472} 1473 1474