DNBArchImplX86_64.cpp revision 6e0101c86555a06b3bd4cb6104b35abfae0b0057
1//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  Created by Greg Clayton on 6/25/07.
11//
12//===----------------------------------------------------------------------===//
13
14#if defined (__i386__) || defined (__x86_64__)
15
16#include <sys/cdefs.h>
17
18#include "MacOSX/x86_64/DNBArchImplX86_64.h"
19#include "DNBLog.h"
20#include "MachThread.h"
21#include "MachProcess.h"
22#include <mach/mach.h>
23#include <stdlib.h>
24
25#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
26enum debugState {
27    debugStateUnknown,
28    debugStateOff,
29    debugStateOn
30};
31
32static debugState sFPUDebugState = debugStateUnknown;
33static debugState sAVXForceState = debugStateUnknown;
34
35static bool DebugFPURegs ()
36{
37    if (sFPUDebugState == debugStateUnknown)
38    {
39        if (getenv("DNB_DEBUG_FPU_REGS"))
40            sFPUDebugState = debugStateOn;
41        else
42            sFPUDebugState = debugStateOff;
43    }
44
45    return (sFPUDebugState == debugStateOn);
46}
47
48static bool ForceAVXRegs ()
49{
50    if (sFPUDebugState == debugStateUnknown)
51    {
52        if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
53            sAVXForceState = debugStateOn;
54        else
55            sAVXForceState = debugStateOff;
56    }
57
58    return (sAVXForceState == debugStateOn);
59}
60
61#define DEBUG_FPU_REGS (DebugFPURegs())
62#define FORCE_AVX_REGS (ForceAVXRegs())
63#else
64#define DEBUG_FPU_REGS (0)
65#define FORCE_AVX_REGS (0)
66#endif
67
68enum DNBArchImplX86_64::AVXPresence DNBArchImplX86_64::s_has_avx = DNBArchImplX86_64::kAVXNotPresent;
69
70uint64_t
71DNBArchImplX86_64::GetPC(uint64_t failValue)
72{
73    // Get program counter
74    if (GetGPRState(false) == KERN_SUCCESS)
75        return m_state.context.gpr.__rip;
76    return failValue;
77}
78
79kern_return_t
80DNBArchImplX86_64::SetPC(uint64_t value)
81{
82    // Get program counter
83    kern_return_t err = GetGPRState(false);
84    if (err == KERN_SUCCESS)
85    {
86        m_state.context.gpr.__rip = value;
87        err = SetGPRState();
88    }
89    return err == KERN_SUCCESS;
90}
91
92uint64_t
93DNBArchImplX86_64::GetSP(uint64_t failValue)
94{
95    // Get stack pointer
96    if (GetGPRState(false) == KERN_SUCCESS)
97        return m_state.context.gpr.__rsp;
98    return failValue;
99}
100
101// Uncomment the value below to verify the values in the debugger.
102//#define DEBUG_GPR_VALUES 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
103
104kern_return_t
105DNBArchImplX86_64::GetGPRState(bool force)
106{
107    if (force || m_state.GetError(e_regSetGPR, Read))
108    {
109        kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
110        DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (GetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
111
112#if DEBUG_GPR_VALUES
113        m_state.context.gpr.__rax = ('a' << 8) + 'x';
114        m_state.context.gpr.__rbx = ('b' << 8) + 'x';
115        m_state.context.gpr.__rcx = ('c' << 8) + 'x';
116        m_state.context.gpr.__rdx = ('d' << 8) + 'x';
117        m_state.context.gpr.__rdi = ('d' << 8) + 'i';
118        m_state.context.gpr.__rsi = ('s' << 8) + 'i';
119        m_state.context.gpr.__rbp = ('b' << 8) + 'p';
120        m_state.context.gpr.__rsp = ('s' << 8) + 'p';
121        m_state.context.gpr.__r8  = ('r' << 8) + '8';
122        m_state.context.gpr.__r9  = ('r' << 8) + '9';
123        m_state.context.gpr.__r10 = ('r' << 8) + 'a';
124        m_state.context.gpr.__r11 = ('r' << 8) + 'b';
125        m_state.context.gpr.__r12 = ('r' << 8) + 'c';
126        m_state.context.gpr.__r13 = ('r' << 8) + 'd';
127        m_state.context.gpr.__r14 = ('r' << 8) + 'e';
128        m_state.context.gpr.__r15 = ('r' << 8) + 'f';
129        m_state.context.gpr.__rip = ('i' << 8) + 'p';
130        m_state.context.gpr.__rflags = ('f' << 8) + 'l';
131        m_state.context.gpr.__cs = ('c' << 8) + 's';
132        m_state.context.gpr.__fs = ('f' << 8) + 's';
133        m_state.context.gpr.__gs = ('g' << 8) + 's';
134        m_state.SetError(e_regSetGPR, Read, 0);
135#else
136        mach_msg_type_number_t count = e_regSetWordSizeGPR;
137        m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
138        DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
139                          "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
140                          "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
141                          "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
142                          "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
143                          "\n\trip = %16.16llx"
144                          "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
145                          m_thread->ThreadID(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT,
146                          m_state.GetError(e_regSetGPR, Read),
147                          m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
148                          m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
149                          m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
150                          m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
151                          m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
152                          m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
153                          m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs);
154
155        //      DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
156        //                        "\n\trax = %16.16llx"
157        //                        "\n\trbx = %16.16llx"
158        //                        "\n\trcx = %16.16llx"
159        //                        "\n\trdx = %16.16llx"
160        //                        "\n\trdi = %16.16llx"
161        //                        "\n\trsi = %16.16llx"
162        //                        "\n\trbp = %16.16llx"
163        //                        "\n\trsp = %16.16llx"
164        //                        "\n\t r8 = %16.16llx"
165        //                        "\n\t r9 = %16.16llx"
166        //                        "\n\tr10 = %16.16llx"
167        //                        "\n\tr11 = %16.16llx"
168        //                        "\n\tr12 = %16.16llx"
169        //                        "\n\tr13 = %16.16llx"
170        //                        "\n\tr14 = %16.16llx"
171        //                        "\n\tr15 = %16.16llx"
172        //                        "\n\trip = %16.16llx"
173        //                        "\n\tflg = %16.16llx"
174        //                        "\n\t cs = %16.16llx"
175        //                        "\n\t fs = %16.16llx"
176        //                        "\n\t gs = %16.16llx",
177        //                        m_thread->ThreadID(),
178        //                        x86_THREAD_STATE64,
179        //                        x86_THREAD_STATE64_COUNT,
180        //                        m_state.GetError(e_regSetGPR, Read),
181        //                        m_state.context.gpr.__rax,
182        //                        m_state.context.gpr.__rbx,
183        //                        m_state.context.gpr.__rcx,
184        //                        m_state.context.gpr.__rdx,
185        //                        m_state.context.gpr.__rdi,
186        //                        m_state.context.gpr.__rsi,
187        //                        m_state.context.gpr.__rbp,
188        //                        m_state.context.gpr.__rsp,
189        //                        m_state.context.gpr.__r8,
190        //                        m_state.context.gpr.__r9,
191        //                        m_state.context.gpr.__r10,
192        //                        m_state.context.gpr.__r11,
193        //                        m_state.context.gpr.__r12,
194        //                        m_state.context.gpr.__r13,
195        //                        m_state.context.gpr.__r14,
196        //                        m_state.context.gpr.__r15,
197        //                        m_state.context.gpr.__rip,
198        //                        m_state.context.gpr.__rflags,
199        //                        m_state.context.gpr.__cs,
200        //                        m_state.context.gpr.__fs,
201        //                        m_state.context.gpr.__gs);
202#endif
203    }
204    return m_state.GetError(e_regSetGPR, Read);
205}
206
207// Uncomment the value below to verify the values in the debugger.
208//#define DEBUG_FPU_REGS 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
209
210kern_return_t
211DNBArchImplX86_64::GetFPUState(bool force)
212{
213    if (force || m_state.GetError(e_regSetFPU, Read))
214    {
215        if (DEBUG_FPU_REGS) {
216            if (CPUHasAVX() || FORCE_AVX_REGS)
217            {
218                m_state.context.fpu.avx.__fpu_reserved[0] = -1;
219                m_state.context.fpu.avx.__fpu_reserved[1] = -1;
220                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
221                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
222                m_state.context.fpu.avx.__fpu_ftw = 1;
223                m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
224                m_state.context.fpu.avx.__fpu_fop = 2;
225                m_state.context.fpu.avx.__fpu_ip = 3;
226                m_state.context.fpu.avx.__fpu_cs = 4;
227                m_state.context.fpu.avx.__fpu_rsrv2 = 5;
228                m_state.context.fpu.avx.__fpu_dp = 6;
229                m_state.context.fpu.avx.__fpu_ds = 7;
230                m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
231                m_state.context.fpu.avx.__fpu_mxcsr = 8;
232                m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
233                int i;
234                for (i=0; i<16; ++i)
235                {
236                    if (i<10)
237                    {
238                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
239                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
240                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
241                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
242                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
243                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
244                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
245                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
246                    }
247                    else
248                    {
249                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
250                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
251                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
252                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
253                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
254                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
255                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
256                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
257                    }
258
259                    m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
260                    m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
261                    m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
262                    m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
263                    m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
264                    m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
265                    m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
266                    m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
267                    m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8';
268                    m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9';
269                    m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A';
270                    m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B';
271                    m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C';
272                    m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D';
273                    m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E';
274                    m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F';
275
276                    m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
277                    m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
278                    m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
279                    m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
280                    m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
281                    m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
282                    m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
283                    m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
284                    m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8';
285                    m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9';
286                    m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A';
287                    m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B';
288                    m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C';
289                    m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D';
290                    m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E';
291                    m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F';
292                }
293                for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
294                    m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
295                m_state.context.fpu.avx.__fpu_reserved1 = -1;
296                for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
297                    m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
298                m_state.SetError(e_regSetFPU, Read, 0);
299            }
300            else
301            {
302                m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
303                m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
304                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
305                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
306                m_state.context.fpu.no_avx.__fpu_ftw = 1;
307                m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
308                m_state.context.fpu.no_avx.__fpu_fop = 2;
309                m_state.context.fpu.no_avx.__fpu_ip = 3;
310                m_state.context.fpu.no_avx.__fpu_cs = 4;
311                m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
312                m_state.context.fpu.no_avx.__fpu_dp = 6;
313                m_state.context.fpu.no_avx.__fpu_ds = 7;
314                m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
315                m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
316                m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
317                int i;
318                for (i=0; i<16; ++i)
319                {
320                    if (i<10)
321                    {
322                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
323                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
324                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
325                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
326                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
327                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
328                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
329                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
330                    }
331                    else
332                    {
333                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
334                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
335                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
336                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
337                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
338                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
339                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
340                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
341                    }
342
343                    m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
344                    m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
345                    m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
346                    m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
347                    m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
348                    m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
349                    m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
350                    m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
351                    m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
352                    m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
353                    m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
354                    m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
355                    m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
356                    m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
357                    m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
358                    m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
359                }
360                for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
361                    m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
362                m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
363                m_state.SetError(e_regSetFPU, Read, 0);
364            }
365        }
366        else
367        {
368            if (CPUHasAVX() || FORCE_AVX_REGS)
369            {
370                mach_msg_type_number_t count = e_regSetWordSizeAVX;
371                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
372                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in) carp) => 0x%8.8x",
373                                  m_thread->ThreadID(), x86_AVX_STATE64, (uint32_t)count,
374                                  e_regSetWordSizeAVX, m_state.GetError(e_regSetFPU, Read));
375            }
376            else
377            {
378                mach_msg_type_number_t count = e_regSetWordSizeFPU;
379                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
380                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x",
381                                  m_thread->ThreadID(), __x86_64_FLOAT_STATE, (uint32_t)count,
382                                  e_regSetWordSizeFPU, m_state.GetError(e_regSetFPU, Read));
383            }
384        }
385    }
386    return m_state.GetError(e_regSetFPU, Read);
387}
388
389kern_return_t
390DNBArchImplX86_64::GetEXCState(bool force)
391{
392    if (force || m_state.GetError(e_regSetEXC, Read))
393    {
394        mach_msg_type_number_t count = e_regSetWordSizeEXC;
395        m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
396    }
397    return m_state.GetError(e_regSetEXC, Read);
398}
399
400kern_return_t
401DNBArchImplX86_64::SetGPRState()
402{
403    kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
404    DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
405
406    m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
407    DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
408                      "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
409                      "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
410                      "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
411                      "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
412                      "\n\trip = %16.16llx"
413                      "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
414                      m_thread->ThreadID(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR,
415                      m_state.GetError(e_regSetGPR, Write),
416                      m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
417                      m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
418                      m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
419                      m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
420                      m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
421                      m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
422                      m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs);
423    return m_state.GetError(e_regSetGPR, Write);
424}
425
426kern_return_t
427DNBArchImplX86_64::SetFPUState()
428{
429    if (DEBUG_FPU_REGS)
430    {
431        m_state.SetError(e_regSetFPU, Write, 0);
432        return m_state.GetError(e_regSetFPU, Write);
433    }
434    else
435    {
436        if (CPUHasAVX() || FORCE_AVX_REGS)
437        {
438            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
439            return m_state.GetError(e_regSetFPU, Write);
440        }
441        else
442        {
443            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU));
444            return m_state.GetError(e_regSetFPU, Write);
445        }
446    }
447}
448
449kern_return_t
450DNBArchImplX86_64::SetEXCState()
451{
452    m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
453    return m_state.GetError(e_regSetEXC, Write);
454}
455
456kern_return_t
457DNBArchImplX86_64::GetDBGState(bool force)
458{
459    if (force || m_state.GetError(e_regSetDBG, Read))
460    {
461        mach_msg_type_number_t count = e_regSetWordSizeDBG;
462        m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
463    }
464    return m_state.GetError(e_regSetDBG, Read);
465}
466
467kern_return_t
468DNBArchImplX86_64::SetDBGState()
469{
470    m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
471    return m_state.GetError(e_regSetDBG, Write);
472}
473
474void
475DNBArchImplX86_64::ThreadWillResume()
476{
477    // Do we need to step this thread? If so, let the mach thread tell us so.
478    if (m_thread->IsStepping())
479    {
480        // This is the primary thread, let the arch do anything it needs
481        EnableHardwareSingleStep(true);
482    }
483
484    // Reset the debug status register, if necessary, before we resume.
485    kern_return_t kret = GetDBGState(false);
486    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
487    if (kret != KERN_SUCCESS)
488        return;
489
490    DBG &debug_state = m_state.context.dbg;
491    bool need_reset = false;
492    uint32_t i, num = NumSupportedHardwareWatchpoints();
493    for (i = 0; i < num; ++i)
494        if (IsWatchpointHit(debug_state, i))
495            need_reset = true;
496
497    if (need_reset)
498    {
499        ClearWatchpointHits(debug_state);
500        kret = SetDBGState();
501        DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
502    }
503}
504
505bool
506DNBArchImplX86_64::ThreadDidStop()
507{
508    bool success = true;
509
510    m_state.InvalidateAllRegisterStates();
511
512    // Are we stepping a single instruction?
513    if (GetGPRState(true) == KERN_SUCCESS)
514    {
515        // We are single stepping, was this the primary thread?
516        if (m_thread->IsStepping())
517        {
518            // This was the primary thread, we need to clear the trace
519            // bit if so.
520            success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
521        }
522        else
523        {
524            // The MachThread will automatically restore the suspend count
525            // in ThreadDidStop(), so we don't need to do anything here if
526            // we weren't the primary thread the last time
527        }
528    }
529    return success;
530}
531
532bool
533DNBArchImplX86_64::NotifyException(MachException::Data& exc)
534{
535    switch (exc.exc_type)
536    {
537        case EXC_BAD_ACCESS:
538            break;
539        case EXC_BAD_INSTRUCTION:
540            break;
541        case EXC_ARITHMETIC:
542            break;
543        case EXC_EMULATION:
544            break;
545        case EXC_SOFTWARE:
546            break;
547        case EXC_BREAKPOINT:
548            if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
549            {
550                // exc_code = EXC_I386_BPT
551                //
552                nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
553                if (pc != INVALID_NUB_ADDRESS && pc > 0)
554                {
555                    pc -= 1;
556                    // Check for a breakpoint at one byte prior to the current PC value
557                    // since the PC will be just past the trap.
558
559                    nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc);
560                    if (NUB_BREAK_ID_IS_VALID(breakID))
561                    {
562                        // Backup the PC for i386 since the trap was taken and the PC
563                        // is at the address following the single byte trap instruction.
564                        if (m_state.context.gpr.__rip > 0)
565                        {
566                            m_state.context.gpr.__rip = pc;
567                            // Write the new PC back out
568                            SetGPRState ();
569                        }
570                    }
571                    return true;
572                }
573            }
574            else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1)
575            {
576                // exc_code = EXC_I386_SGL
577                //
578                // Check whether this corresponds to a watchpoint hit event.
579                // If yes, set the exc_sub_code to the data break address.
580                nub_addr_t addr = 0;
581                uint32_t hw_index = GetHardwareWatchpointHit(addr);
582                if (hw_index != INVALID_NUB_HW_INDEX)
583                {
584                    exc.exc_data[1] = addr;
585                    // Piggyback the hw_index in the exc.data.
586                    exc.exc_data.push_back(hw_index);
587                }
588
589                return true;
590            }
591            break;
592        case EXC_SYSCALL:
593            break;
594        case EXC_MACH_SYSCALL:
595            break;
596        case EXC_RPC_ALERT:
597            break;
598    }
599    return false;
600}
601
602uint32_t
603DNBArchImplX86_64::NumSupportedHardwareWatchpoints()
604{
605    // Available debug address registers: dr0, dr1, dr2, dr3.
606    return 4;
607}
608
609static uint32_t
610size_and_rw_bits(nub_size_t size, bool read, bool write)
611{
612    uint32_t rw;
613    if (read) {
614        rw = 0x3; // READ or READ/WRITE
615    } else if (write) {
616        rw = 0x1; // WRITE
617    } else {
618        assert(0 && "read and write cannot both be false");
619    }
620
621    switch (size) {
622    case 1:
623        return rw;
624    case 2:
625        return (0x1 << 2) | rw;
626    case 4:
627        return (0x3 << 2) | rw;
628    case 8:
629        return (0x2 << 2) | rw;
630    default:
631        assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
632    }
633}
634void
635DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
636{
637    // Set both dr7 (debug control register) and dri (debug address register).
638
639    // dr7{7-0} encodes the local/gloabl enable bits:
640    //  global enable --. .-- local enable
641    //                  | |
642    //                  v v
643    //      dr0 -> bits{1-0}
644    //      dr1 -> bits{3-2}
645    //      dr2 -> bits{5-4}
646    //      dr3 -> bits{7-6}
647    //
648    // dr7{31-16} encodes the rw/len bits:
649    //  b_x+3, b_x+2, b_x+1, b_x
650    //      where bits{x+1, x} => rw
651    //            0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
652    //      and bits{x+3, x+2} => len
653    //            0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
654    //
655    //      dr0 -> bits{19-16}
656    //      dr1 -> bits{23-20}
657    //      dr2 -> bits{27-24}
658    //      dr3 -> bits{31-28}
659    debug_state.__dr7 |= (1 << (2*hw_index) |
660                          size_and_rw_bits(size, read, write) << (16+4*hw_index));
661    switch (hw_index) {
662    case 0:
663        debug_state.__dr0 = addr; break;
664    case 1:
665        debug_state.__dr1 = addr; break;
666    case 2:
667        debug_state.__dr2 = addr; break;
668    case 3:
669        debug_state.__dr3 = addr; break;
670    default:
671        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
672    }
673    return;
674}
675
676void
677DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
678{
679    debug_state.__dr7 &= ~(3 << (2*hw_index));
680    switch (hw_index) {
681    case 0:
682        debug_state.__dr0 = 0; break;
683    case 1:
684        debug_state.__dr1 = 0; break;
685    case 2:
686        debug_state.__dr2 = 0; break;
687    case 3:
688        debug_state.__dr3 = 0; break;
689    default:
690        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
691    }
692    return;
693}
694
695bool
696DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
697{
698    // Check dr7 (debug control register) for local/global enable bits:
699    //  global enable --. .-- local enable
700    //                  | |
701    //                  v v
702    //      dr0 -> bits{1-0}
703    //      dr1 -> bits{3-2}
704    //      dr2 -> bits{5-4}
705    //      dr3 -> bits{7-6}
706    return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
707}
708
709// Resets local copy of debug status register to wait for the next debug excpetion.
710void
711DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state)
712{
713    // See also IsWatchpointHit().
714    debug_state.__dr6 = 0;
715    return;
716}
717
718bool
719DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
720{
721    // Check dr6 (debug status register) whether a watchpoint hits:
722    //          is watchpoint hit?
723    //                  |
724    //                  v
725    //      dr0 -> bits{0}
726    //      dr1 -> bits{1}
727    //      dr2 -> bits{2}
728    //      dr3 -> bits{3}
729    return (debug_state.__dr6 & (1 << hw_index));
730}
731
732nub_addr_t
733DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
734{
735    switch (hw_index) {
736    case 0:
737        return debug_state.__dr0;
738    case 1:
739        return debug_state.__dr1;
740    case 2:
741        return debug_state.__dr2;
742    case 3:
743        return debug_state.__dr3;
744    default:
745        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
746    }
747}
748
749uint32_t
750DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write)
751{
752    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = %8.8p, size = %u, read = %u, write = %u)", addr, size, read, write);
753
754    const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
755
756    // Can only watch 1, 2, 4, or 8 bytes.
757    if (!(size == 1 || size == 2 || size == 4 || size == 8))
758        return INVALID_NUB_HW_INDEX;
759
760    // We must watch for either read or write
761    if (read == false && write == false)
762        return INVALID_NUB_HW_INDEX;
763
764    // Read the debug state
765    kern_return_t kret = GetDBGState(false);
766
767    if (kret == KERN_SUCCESS)
768    {
769        // Check to make sure we have the needed hardware support
770        uint32_t i = 0;
771
772        DBG &debug_state = m_state.context.dbg;
773        for (i = 0; i < num_hw_watchpoints; ++i)
774        {
775            if (IsWatchpointVacant(debug_state, i))
776                break;
777        }
778
779        // See if we found an available hw breakpoint slot above
780        if (i < num_hw_watchpoints)
781        {
782            // Modify our local copy of the debug state, first.
783            SetWatchpoint(debug_state, i, addr, size, read, write);
784            // Now set the watch point in the inferior.
785            kret = SetDBGState();
786            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
787
788            if (kret == KERN_SUCCESS)
789                return i;
790        }
791        else
792        {
793            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
794        }
795    }
796    return INVALID_NUB_HW_INDEX;
797}
798
799bool
800DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index)
801{
802    kern_return_t kret = GetDBGState(false);
803
804    const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
805    if (kret == KERN_SUCCESS)
806    {
807        DBG &debug_state = m_state.context.dbg;
808        if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
809        {
810            // Modify our local copy of the debug state, first.
811            ClearWatchpoint(debug_state, hw_index);
812            // Now disable the watch point in the inferior.
813            kret = SetDBGState();
814            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
815                             hw_index);
816
817            if (kret == KERN_SUCCESS)
818                return true;
819        }
820    }
821    return false;
822}
823
824// Iterate through the debug status register; return the index of the first hit.
825uint32_t
826DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr)
827{
828    // Read the debug state
829    kern_return_t kret = GetDBGState(true);
830    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
831    if (kret == KERN_SUCCESS)
832    {
833        DBG &debug_state = m_state.context.dbg;
834        uint32_t i, num = NumSupportedHardwareWatchpoints();
835        for (i = 0; i < num; ++i)
836        {
837            if (IsWatchpointHit(debug_state, i))
838            {
839                addr = GetWatchAddress(debug_state, i);
840                DNBLogThreadedIf(LOG_WATCHPOINTS,
841                                 "DNBArchImplX86_64::GetHardwareWatchpointHit() found => %u (addr = %8.8p).",
842                                 i, addr);
843                return i;
844            }
845        }
846    }
847    return INVALID_NUB_HW_INDEX;
848}
849
850// Set the single step bit in the processor status register.
851kern_return_t
852DNBArchImplX86_64::EnableHardwareSingleStep (bool enable)
853{
854    if (GetGPRState(false) == KERN_SUCCESS)
855    {
856        const uint32_t trace_bit = 0x100u;
857        if (enable)
858            m_state.context.gpr.__rflags |= trace_bit;
859        else
860            m_state.context.gpr.__rflags &= ~trace_bit;
861        return SetGPRState();
862    }
863    return m_state.GetError(e_regSetGPR, Read);
864}
865
866
867//----------------------------------------------------------------------
868// Register information defintions
869//----------------------------------------------------------------------
870
871enum
872{
873    gpr_rax = 0,
874    gpr_rbx,
875    gpr_rcx,
876    gpr_rdx,
877    gpr_rdi,
878    gpr_rsi,
879    gpr_rbp,
880    gpr_rsp,
881    gpr_r8,
882    gpr_r9,
883    gpr_r10,
884    gpr_r11,
885    gpr_r12,
886    gpr_r13,
887    gpr_r14,
888    gpr_r15,
889    gpr_rip,
890    gpr_rflags,
891    gpr_cs,
892    gpr_fs,
893    gpr_gs,
894    k_num_gpr_regs
895};
896
897enum {
898    fpu_fcw,
899    fpu_fsw,
900    fpu_ftw,
901    fpu_fop,
902    fpu_ip,
903    fpu_cs,
904    fpu_dp,
905    fpu_ds,
906    fpu_mxcsr,
907    fpu_mxcsrmask,
908    fpu_stmm0,
909    fpu_stmm1,
910    fpu_stmm2,
911    fpu_stmm3,
912    fpu_stmm4,
913    fpu_stmm5,
914    fpu_stmm6,
915    fpu_stmm7,
916    fpu_xmm0,
917    fpu_xmm1,
918    fpu_xmm2,
919    fpu_xmm3,
920    fpu_xmm4,
921    fpu_xmm5,
922    fpu_xmm6,
923    fpu_xmm7,
924    fpu_xmm8,
925    fpu_xmm9,
926    fpu_xmm10,
927    fpu_xmm11,
928    fpu_xmm12,
929    fpu_xmm13,
930    fpu_xmm14,
931    fpu_xmm15,
932    fpu_ymm0,
933    fpu_ymm1,
934    fpu_ymm2,
935    fpu_ymm3,
936    fpu_ymm4,
937    fpu_ymm5,
938    fpu_ymm6,
939    fpu_ymm7,
940    fpu_ymm8,
941    fpu_ymm9,
942    fpu_ymm10,
943    fpu_ymm11,
944    fpu_ymm12,
945    fpu_ymm13,
946    fpu_ymm14,
947    fpu_ymm15,
948    k_num_fpu_regs,
949
950    // Aliases
951    fpu_fctrl = fpu_fcw,
952    fpu_fstat = fpu_fsw,
953    fpu_ftag  = fpu_ftw,
954    fpu_fiseg = fpu_cs,
955    fpu_fioff = fpu_ip,
956    fpu_foseg = fpu_ds,
957    fpu_fooff = fpu_dp
958};
959
960enum {
961    exc_trapno,
962    exc_err,
963    exc_faultvaddr,
964    k_num_exc_regs,
965};
966
967
968enum gcc_dwarf_regnums
969{
970    gcc_dwarf_rax = 0,
971    gcc_dwarf_rdx = 1,
972    gcc_dwarf_rcx = 2,
973    gcc_dwarf_rbx = 3,
974    gcc_dwarf_rsi = 4,
975    gcc_dwarf_rdi = 5,
976    gcc_dwarf_rbp = 6,
977    gcc_dwarf_rsp = 7,
978    gcc_dwarf_r8,
979    gcc_dwarf_r9,
980    gcc_dwarf_r10,
981    gcc_dwarf_r11,
982    gcc_dwarf_r12,
983    gcc_dwarf_r13,
984    gcc_dwarf_r14,
985    gcc_dwarf_r15,
986    gcc_dwarf_rip,
987    gcc_dwarf_xmm0,
988    gcc_dwarf_xmm1,
989    gcc_dwarf_xmm2,
990    gcc_dwarf_xmm3,
991    gcc_dwarf_xmm4,
992    gcc_dwarf_xmm5,
993    gcc_dwarf_xmm6,
994    gcc_dwarf_xmm7,
995    gcc_dwarf_xmm8,
996    gcc_dwarf_xmm9,
997    gcc_dwarf_xmm10,
998    gcc_dwarf_xmm11,
999    gcc_dwarf_xmm12,
1000    gcc_dwarf_xmm13,
1001    gcc_dwarf_xmm14,
1002    gcc_dwarf_xmm15,
1003    gcc_dwarf_stmm0,
1004    gcc_dwarf_stmm1,
1005    gcc_dwarf_stmm2,
1006    gcc_dwarf_stmm3,
1007    gcc_dwarf_stmm4,
1008    gcc_dwarf_stmm5,
1009    gcc_dwarf_stmm6,
1010    gcc_dwarf_stmm7,
1011    gcc_dwarf_ymm0 = gcc_dwarf_xmm0,
1012    gcc_dwarf_ymm1 = gcc_dwarf_xmm1,
1013    gcc_dwarf_ymm2 = gcc_dwarf_xmm2,
1014    gcc_dwarf_ymm3 = gcc_dwarf_xmm3,
1015    gcc_dwarf_ymm4 = gcc_dwarf_xmm4,
1016    gcc_dwarf_ymm5 = gcc_dwarf_xmm5,
1017    gcc_dwarf_ymm6 = gcc_dwarf_xmm6,
1018    gcc_dwarf_ymm7 = gcc_dwarf_xmm7,
1019    gcc_dwarf_ymm8 = gcc_dwarf_xmm8,
1020    gcc_dwarf_ymm9 = gcc_dwarf_xmm9,
1021    gcc_dwarf_ymm10 = gcc_dwarf_xmm10,
1022    gcc_dwarf_ymm11 = gcc_dwarf_xmm11,
1023    gcc_dwarf_ymm12 = gcc_dwarf_xmm12,
1024    gcc_dwarf_ymm13 = gcc_dwarf_xmm13,
1025    gcc_dwarf_ymm14 = gcc_dwarf_xmm14,
1026    gcc_dwarf_ymm15 = gcc_dwarf_xmm15
1027};
1028
1029enum gdb_regnums
1030{
1031    gdb_rax     =   0,
1032    gdb_rbx     =   1,
1033    gdb_rcx     =   2,
1034    gdb_rdx     =   3,
1035    gdb_rsi     =   4,
1036    gdb_rdi     =   5,
1037    gdb_rbp     =   6,
1038    gdb_rsp     =   7,
1039    gdb_r8      =   8,
1040    gdb_r9      =   9,
1041    gdb_r10     =  10,
1042    gdb_r11     =  11,
1043    gdb_r12     =  12,
1044    gdb_r13     =  13,
1045    gdb_r14     =  14,
1046    gdb_r15     =  15,
1047    gdb_rip     =  16,
1048    gdb_rflags  =  17,
1049    gdb_cs      =  18,
1050    gdb_ss      =  19,
1051    gdb_ds      =  20,
1052    gdb_es      =  21,
1053    gdb_fs      =  22,
1054    gdb_gs      =  23,
1055    gdb_stmm0   =  24,
1056    gdb_stmm1   =  25,
1057    gdb_stmm2   =  26,
1058    gdb_stmm3   =  27,
1059    gdb_stmm4   =  28,
1060    gdb_stmm5   =  29,
1061    gdb_stmm6   =  30,
1062    gdb_stmm7   =  31,
1063    gdb_fctrl   =  32,  gdb_fcw = gdb_fctrl,
1064    gdb_fstat   =  33,  gdb_fsw = gdb_fstat,
1065    gdb_ftag    =  34,  gdb_ftw = gdb_ftag,
1066    gdb_fiseg   =  35,  gdb_fpu_cs  = gdb_fiseg,
1067    gdb_fioff   =  36,  gdb_ip  = gdb_fioff,
1068    gdb_foseg   =  37,  gdb_fpu_ds  = gdb_foseg,
1069    gdb_fooff   =  38,  gdb_dp  = gdb_fooff,
1070    gdb_fop     =  39,
1071    gdb_xmm0    =  40,
1072    gdb_xmm1    =  41,
1073    gdb_xmm2    =  42,
1074    gdb_xmm3    =  43,
1075    gdb_xmm4    =  44,
1076    gdb_xmm5    =  45,
1077    gdb_xmm6    =  46,
1078    gdb_xmm7    =  47,
1079    gdb_xmm8    =  48,
1080    gdb_xmm9    =  49,
1081    gdb_xmm10   =  50,
1082    gdb_xmm11   =  51,
1083    gdb_xmm12   =  52,
1084    gdb_xmm13   =  53,
1085    gdb_xmm14   =  54,
1086    gdb_xmm15   =  55,
1087    gdb_mxcsr   =  56,
1088    gdb_ymm0    =  gdb_xmm0,
1089    gdb_ymm1    =  gdb_xmm1,
1090    gdb_ymm2    =  gdb_xmm2,
1091    gdb_ymm3    =  gdb_xmm3,
1092    gdb_ymm4    =  gdb_xmm4,
1093    gdb_ymm5    =  gdb_xmm5,
1094    gdb_ymm6    =  gdb_xmm6,
1095    gdb_ymm7    =  gdb_xmm7,
1096    gdb_ymm8    =  gdb_xmm8,
1097    gdb_ymm9    =  gdb_xmm9,
1098    gdb_ymm10   =  gdb_xmm10,
1099    gdb_ymm11   =  gdb_xmm11,
1100    gdb_ymm12   =  gdb_xmm12,
1101    gdb_ymm13   =  gdb_xmm13,
1102    gdb_ymm14   =  gdb_xmm14,
1103    gdb_ymm15   =  gdb_xmm15
1104};
1105
1106#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg))
1107#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx))
1108#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx))
1109#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg)     + offsetof (DNBArchImplX86_64::Context, exc))
1110
1111// This does not accurately identify the location of ymm0...7 in
1112// Context.fpu.avx.  That is because there is a bunch of padding
1113// in Context.fpu.avx that we don't need.  Offset macros lay out
1114// the register state that Debugserver transmits to the debugger
1115// -- not to interpret the thread_get_state info.
1116#define AVX_OFFSET_YMM(n)   (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
1117
1118#define GPR_SIZE(reg)       (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1119#define FPU_SIZE_UINT(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1120#define FPU_SIZE_MMST(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1121#define FPU_SIZE_XMM(reg)   (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1122#define FPU_SIZE_YMM(reg)   (32)
1123#define EXC_SIZE(reg)       (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1124
1125// These macros will auto define the register name, alt name, register size,
1126// register offset, encoding, format and native register. This ensures that
1127// the register state structures are defined correctly and have the correct
1128// sizes and offsets.
1129#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg }
1130#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg }
1131#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg }
1132#define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, gdb_##reg }
1133
1134// General purpose registers for 64 bit
1135const DNBRegisterInfo
1136DNBArchImplX86_64::g_gpr_registers[] =
1137{
1138    DEFINE_GPR      (rax),
1139    DEFINE_GPR      (rbx),
1140    DEFINE_GPR_ALT  (rcx , "arg4", GENERIC_REGNUM_ARG4),
1141    DEFINE_GPR_ALT  (rdx , "arg3", GENERIC_REGNUM_ARG3),
1142    DEFINE_GPR_ALT  (rdi , "arg1", GENERIC_REGNUM_ARG1),
1143    DEFINE_GPR_ALT  (rsi , "arg2", GENERIC_REGNUM_ARG2),
1144    DEFINE_GPR_ALT  (rbp , "fp"  , GENERIC_REGNUM_FP),
1145    DEFINE_GPR_ALT  (rsp , "sp"  , GENERIC_REGNUM_SP),
1146    DEFINE_GPR_ALT  (r8  , "arg5", GENERIC_REGNUM_ARG5),
1147    DEFINE_GPR_ALT  (r9  , "arg6", GENERIC_REGNUM_ARG6),
1148    DEFINE_GPR      (r10),
1149    DEFINE_GPR      (r11),
1150    DEFINE_GPR      (r12),
1151    DEFINE_GPR      (r13),
1152    DEFINE_GPR      (r14),
1153    DEFINE_GPR      (r15),
1154    DEFINE_GPR_ALT  (rip , "pc", GENERIC_REGNUM_PC),
1155    DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS),
1156    DEFINE_GPR_ALT2 (cs,        NULL),
1157    DEFINE_GPR_ALT2 (fs,        NULL),
1158    DEFINE_GPR_ALT2 (gs,        NULL),
1159};
1160
1161// Floating point registers 64 bit
1162const DNBRegisterInfo
1163DNBArchImplX86_64::g_fpu_registers_no_avx[] =
1164{
1165    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , FPU_OFFSET(fcw)       , -1, -1, -1, -1 },
1166    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , FPU_OFFSET(fsw)       , -1, -1, -1, -1 },
1167    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , FPU_OFFSET(ftw)       , -1, -1, -1, -1 },
1168    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , FPU_OFFSET(fop)       , -1, -1, -1, -1 },
1169    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , FPU_OFFSET(ip)        , -1, -1, -1, -1 },
1170    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , FPU_OFFSET(cs)        , -1, -1, -1, -1 },
1171    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , FPU_OFFSET(dp)        , -1, -1, -1, -1 },
1172    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , FPU_OFFSET(ds)        , -1, -1, -1, -1 },
1173    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , FPU_OFFSET(mxcsr)     , -1, -1, -1, -1 },
1174    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1, -1, -1, -1 },
1175
1176    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 },
1177    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 },
1178    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 },
1179    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 },
1180    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 },
1181    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 },
1182    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 },
1183    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 },
1184
1185    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 },
1186    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 },
1187    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 },
1188    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 },
1189    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 },
1190    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 },
1191    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 },
1192    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 },
1193    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8  },
1194    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9  },
1195    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 },
1196    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 },
1197    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 },
1198    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 },
1199    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 },
1200    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 },
1201};
1202
1203const DNBRegisterInfo
1204DNBArchImplX86_64::g_fpu_registers_avx[] =
1205{
1206    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , AVX_OFFSET(fcw)       , -1, -1, -1, -1 },
1207    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , AVX_OFFSET(fsw)       , -1, -1, -1, -1 },
1208    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , AVX_OFFSET(ftw)       , -1, -1, -1, -1 },
1209    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , AVX_OFFSET(fop)       , -1, -1, -1, -1 },
1210    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , AVX_OFFSET(ip)        , -1, -1, -1, -1 },
1211    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , AVX_OFFSET(cs)        , -1, -1, -1, -1 },
1212    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , AVX_OFFSET(dp)        , -1, -1, -1, -1 },
1213    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , AVX_OFFSET(ds)        , -1, -1, -1, -1 },
1214    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , AVX_OFFSET(mxcsr)     , -1, -1, -1, -1 },
1215    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1, -1, -1, -1 },
1216
1217    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 },
1218    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 },
1219    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 },
1220    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 },
1221    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 },
1222    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 },
1223    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 },
1224    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 },
1225
1226    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 },
1227    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 },
1228    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 },
1229    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 },
1230    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 },
1231    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 },
1232    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 },
1233    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 },
1234    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8  },
1235    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9  },
1236    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 },
1237    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 },
1238    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 },
1239    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 },
1240    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 },
1241    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 },
1242
1243    { e_regSetFPU, fpu_ymm0 , "ymm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0)   , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1, gdb_ymm0 },
1244    { e_regSetFPU, fpu_ymm1 , "ymm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1)   , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1, gdb_ymm1 },
1245    { e_regSetFPU, fpu_ymm2 , "ymm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2)   , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1, gdb_ymm2 },
1246    { e_regSetFPU, fpu_ymm3 , "ymm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3)   , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1, gdb_ymm3 },
1247    { e_regSetFPU, fpu_ymm4 , "ymm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4)   , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1, gdb_ymm4 },
1248    { e_regSetFPU, fpu_ymm5 , "ymm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5)   , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1, gdb_ymm5 },
1249    { e_regSetFPU, fpu_ymm6 , "ymm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6)   , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1, gdb_ymm6 },
1250    { e_regSetFPU, fpu_ymm7 , "ymm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7)   , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1, gdb_ymm7 },
1251    { e_regSetFPU, fpu_ymm8 , "ymm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8)   , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1, gdb_ymm8  },
1252    { e_regSetFPU, fpu_ymm9 , "ymm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9)   , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1, gdb_ymm9  },
1253    { e_regSetFPU, fpu_ymm10, "ymm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10)  , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1, gdb_ymm10 },
1254    { e_regSetFPU, fpu_ymm11, "ymm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11)  , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1, gdb_ymm11 },
1255    { e_regSetFPU, fpu_ymm12, "ymm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12)  , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1, gdb_ymm12 },
1256    { e_regSetFPU, fpu_ymm13, "ymm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13)  , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1, gdb_ymm13 },
1257    { e_regSetFPU, fpu_ymm14, "ymm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14)  , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1, gdb_ymm14 },
1258    { e_regSetFPU, fpu_ymm15, "ymm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15)  , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1, gdb_ymm15 }
1259};
1260
1261// Exception registers
1262
1263const DNBRegisterInfo
1264DNBArchImplX86_64::g_exc_registers[] =
1265{
1266    { e_regSetEXC, exc_trapno,      "trapno"    , NULL, Uint, Hex, EXC_SIZE (trapno)    , EXC_OFFSET (trapno)       , -1, -1, -1, -1 },
1267    { e_regSetEXC, exc_err,         "err"       , NULL, Uint, Hex, EXC_SIZE (err)       , EXC_OFFSET (err)          , -1, -1, -1, -1 },
1268    { e_regSetEXC, exc_faultvaddr,  "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr)   , -1, -1, -1, -1 }
1269};
1270
1271// Number of registers in each register set
1272const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
1273const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
1274const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
1275const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
1276const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
1277const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
1278
1279//----------------------------------------------------------------------
1280// Register set definitions. The first definitions at register set index
1281// of zero is for all registers, followed by other registers sets. The
1282// register information for the all register set need not be filled in.
1283//----------------------------------------------------------------------
1284const DNBRegisterSetInfo
1285DNBArchImplX86_64::g_reg_sets_no_avx[] =
1286{
1287    { "x86_64 Registers",           NULL,               k_num_all_registers_no_avx },
1288    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1289    { "Floating Point Registers",   g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
1290    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1291};
1292
1293const DNBRegisterSetInfo
1294DNBArchImplX86_64::g_reg_sets_avx[] =
1295{
1296    { "x86_64 Registers",           NULL,               k_num_all_registers_avx },
1297    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1298    { "Floating Point Registers",   g_fpu_registers_avx, k_num_fpu_registers_avx },
1299    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1300};
1301
1302// Total number of register sets for this architecture
1303const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo);
1304
1305
1306DNBArchProtocol *
1307DNBArchImplX86_64::Create (MachThread *thread)
1308{
1309    return new DNBArchImplX86_64 (thread);
1310}
1311
1312const uint8_t * const
1313DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size)
1314{
1315    static const uint8_t g_breakpoint_opcode[] = { 0xCC };
1316    if (byte_size == 1)
1317        return g_breakpoint_opcode;
1318    return NULL;
1319}
1320
1321const DNBRegisterSetInfo *
1322DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
1323{
1324    *num_reg_sets = k_num_register_sets;
1325
1326    if (CPUHasAVX() || FORCE_AVX_REGS)
1327        return g_reg_sets_avx;
1328    else
1329        return g_reg_sets_no_avx;
1330}
1331
1332void
1333DNBArchImplX86_64::Initialize()
1334{
1335    DNBArchPluginInfo arch_plugin_info =
1336    {
1337        CPU_TYPE_X86_64,
1338        DNBArchImplX86_64::Create,
1339        DNBArchImplX86_64::GetRegisterSetInfo,
1340        DNBArchImplX86_64::SoftwareBreakpointOpcode
1341    };
1342
1343    // Register this arch plug-in with the main protocol class
1344    DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
1345}
1346
1347bool
1348DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value)
1349{
1350    if (set == REGISTER_SET_GENERIC)
1351    {
1352        switch (reg)
1353        {
1354            case GENERIC_REGNUM_PC:     // Program Counter
1355                set = e_regSetGPR;
1356                reg = gpr_rip;
1357                break;
1358
1359            case GENERIC_REGNUM_SP:     // Stack Pointer
1360                set = e_regSetGPR;
1361                reg = gpr_rsp;
1362                break;
1363
1364            case GENERIC_REGNUM_FP:     // Frame Pointer
1365                set = e_regSetGPR;
1366                reg = gpr_rbp;
1367                break;
1368
1369            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1370                set = e_regSetGPR;
1371                reg = gpr_rflags;
1372                break;
1373
1374            case GENERIC_REGNUM_RA:     // Return Address
1375            default:
1376                return false;
1377        }
1378    }
1379
1380    if (GetRegisterState(set, false) != KERN_SUCCESS)
1381        return false;
1382
1383    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1384    if (regInfo)
1385    {
1386        value->info = *regInfo;
1387        switch (set)
1388        {
1389            case e_regSetGPR:
1390                if (reg < k_num_gpr_registers)
1391                {
1392                    value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg];
1393                    return true;
1394                }
1395                break;
1396
1397            case e_regSetFPU:
1398                if (CPUHasAVX() || FORCE_AVX_REGS)
1399                {
1400                    switch (reg)
1401                    {
1402                    case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw));    return true;
1403                    case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw));    return true;
1404                    case fpu_ftw:       value->value.uint8  = m_state.context.fpu.avx.__fpu_ftw;                      return true;
1405                    case fpu_fop:       value->value.uint16 = m_state.context.fpu.avx.__fpu_fop;                      return true;
1406                    case fpu_ip:        value->value.uint32 = m_state.context.fpu.avx.__fpu_ip;                       return true;
1407                    case fpu_cs:        value->value.uint16 = m_state.context.fpu.avx.__fpu_cs;                       return true;
1408                    case fpu_dp:        value->value.uint32 = m_state.context.fpu.avx.__fpu_dp;                       return true;
1409                    case fpu_ds:        value->value.uint16 = m_state.context.fpu.avx.__fpu_ds;                       return true;
1410                    case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr;                    return true;
1411                    case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask;                return true;
1412
1413                    case fpu_stmm0:
1414                    case fpu_stmm1:
1415                    case fpu_stmm2:
1416                    case fpu_stmm3:
1417                    case fpu_stmm4:
1418                    case fpu_stmm5:
1419                    case fpu_stmm6:
1420                    case fpu_stmm7:
1421                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1422                        return true;
1423
1424                    case fpu_xmm0:
1425                    case fpu_xmm1:
1426                    case fpu_xmm2:
1427                    case fpu_xmm3:
1428                    case fpu_xmm4:
1429                    case fpu_xmm5:
1430                    case fpu_xmm6:
1431                    case fpu_xmm7:
1432                    case fpu_xmm8:
1433                    case fpu_xmm9:
1434                    case fpu_xmm10:
1435                    case fpu_xmm11:
1436                    case fpu_xmm12:
1437                    case fpu_xmm13:
1438                    case fpu_xmm14:
1439                    case fpu_xmm15:
1440                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1441                        return true;
1442
1443                    case fpu_ymm0:
1444                    case fpu_ymm1:
1445                    case fpu_ymm2:
1446                    case fpu_ymm3:
1447                    case fpu_ymm4:
1448                    case fpu_ymm5:
1449                    case fpu_ymm6:
1450                    case fpu_ymm7:
1451                    case fpu_ymm8:
1452                    case fpu_ymm9:
1453                    case fpu_ymm10:
1454                    case fpu_ymm11:
1455                    case fpu_ymm12:
1456                    case fpu_ymm13:
1457                    case fpu_ymm14:
1458                    case fpu_ymm15:
1459                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
1460                        memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
1461                        return true;
1462                    }
1463                }
1464                else
1465                {
1466                    switch (reg)
1467                    {
1468                        case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw));    return true;
1469                        case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw));    return true;
1470                        case fpu_ftw:       value->value.uint8  = m_state.context.fpu.no_avx.__fpu_ftw;                      return true;
1471                        case fpu_fop:       value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop;                      return true;
1472                        case fpu_ip:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip;                       return true;
1473                        case fpu_cs:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs;                       return true;
1474                        case fpu_dp:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp;                       return true;
1475                        case fpu_ds:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds;                       return true;
1476                        case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr;                    return true;
1477                        case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask;                return true;
1478
1479                        case fpu_stmm0:
1480                        case fpu_stmm1:
1481                        case fpu_stmm2:
1482                        case fpu_stmm3:
1483                        case fpu_stmm4:
1484                        case fpu_stmm5:
1485                        case fpu_stmm6:
1486                        case fpu_stmm7:
1487                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1488                            return true;
1489
1490                        case fpu_xmm0:
1491                        case fpu_xmm1:
1492                        case fpu_xmm2:
1493                        case fpu_xmm3:
1494                        case fpu_xmm4:
1495                        case fpu_xmm5:
1496                        case fpu_xmm6:
1497                        case fpu_xmm7:
1498                        case fpu_xmm8:
1499                        case fpu_xmm9:
1500                        case fpu_xmm10:
1501                        case fpu_xmm11:
1502                        case fpu_xmm12:
1503                        case fpu_xmm13:
1504                        case fpu_xmm14:
1505                        case fpu_xmm15:
1506                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1507                            return true;
1508                    }
1509                }
1510                break;
1511
1512            case e_regSetEXC:
1513                switch (reg)
1514                {
1515                case exc_trapno:    value->value.uint32 = m_state.context.exc.__trapno; return true;
1516                case exc_err:       value->value.uint32 = m_state.context.exc.__err; return true;
1517                case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true;
1518                }
1519                break;
1520        }
1521    }
1522    return false;
1523}
1524
1525
1526bool
1527DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value)
1528{
1529    if (set == REGISTER_SET_GENERIC)
1530    {
1531        switch (reg)
1532        {
1533            case GENERIC_REGNUM_PC:     // Program Counter
1534                set = e_regSetGPR;
1535                reg = gpr_rip;
1536                break;
1537
1538            case GENERIC_REGNUM_SP:     // Stack Pointer
1539                set = e_regSetGPR;
1540                reg = gpr_rsp;
1541                break;
1542
1543            case GENERIC_REGNUM_FP:     // Frame Pointer
1544                set = e_regSetGPR;
1545                reg = gpr_rbp;
1546                break;
1547
1548            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1549                set = e_regSetGPR;
1550                reg = gpr_rflags;
1551                break;
1552
1553            case GENERIC_REGNUM_RA:     // Return Address
1554            default:
1555                return false;
1556        }
1557    }
1558
1559    if (GetRegisterState(set, false) != KERN_SUCCESS)
1560        return false;
1561
1562    bool success = false;
1563    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1564    if (regInfo)
1565    {
1566        switch (set)
1567        {
1568            case e_regSetGPR:
1569                if (reg < k_num_gpr_registers)
1570                {
1571                    ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64;
1572                    success = true;
1573                }
1574                break;
1575
1576            case e_regSetFPU:
1577                if (CPUHasAVX() || FORCE_AVX_REGS)
1578                {
1579                    switch (reg)
1580                    {
1581                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1582                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1583                    case fpu_ftw:       m_state.context.fpu.avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1584                    case fpu_fop:       m_state.context.fpu.avx.__fpu_fop = value->value.uint16;                      success = true; break;
1585                    case fpu_ip:        m_state.context.fpu.avx.__fpu_ip = value->value.uint32;                       success = true; break;
1586                    case fpu_cs:        m_state.context.fpu.avx.__fpu_cs = value->value.uint16;                       success = true; break;
1587                    case fpu_dp:        m_state.context.fpu.avx.__fpu_dp = value->value.uint32;                       success = true; break;
1588                    case fpu_ds:        m_state.context.fpu.avx.__fpu_ds = value->value.uint16;                       success = true; break;
1589                    case fpu_mxcsr:     m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1590                    case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1591
1592                    case fpu_stmm0:
1593                    case fpu_stmm1:
1594                    case fpu_stmm2:
1595                    case fpu_stmm3:
1596                    case fpu_stmm4:
1597                    case fpu_stmm5:
1598                    case fpu_stmm6:
1599                    case fpu_stmm7:
1600                        memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1601                        success = true;
1602                        break;
1603
1604                    case fpu_xmm0:
1605                    case fpu_xmm1:
1606                    case fpu_xmm2:
1607                    case fpu_xmm3:
1608                    case fpu_xmm4:
1609                    case fpu_xmm5:
1610                    case fpu_xmm6:
1611                    case fpu_xmm7:
1612                    case fpu_xmm8:
1613                    case fpu_xmm9:
1614                    case fpu_xmm10:
1615                    case fpu_xmm11:
1616                    case fpu_xmm12:
1617                    case fpu_xmm13:
1618                    case fpu_xmm14:
1619                    case fpu_xmm15:
1620                        memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1621                        success = true;
1622                        break;
1623
1624                    case fpu_ymm0:
1625                    case fpu_ymm1:
1626                    case fpu_ymm2:
1627                    case fpu_ymm3:
1628                    case fpu_ymm4:
1629                    case fpu_ymm5:
1630                    case fpu_ymm6:
1631                    case fpu_ymm7:
1632                    case fpu_ymm8:
1633                    case fpu_ymm9:
1634                    case fpu_ymm10:
1635                    case fpu_ymm11:
1636                    case fpu_ymm12:
1637                    case fpu_ymm13:
1638                    case fpu_ymm14:
1639                    case fpu_ymm15:
1640                        memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16);
1641                        memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16);
1642                        return true;
1643                    }
1644                }
1645                else
1646                {
1647                    switch (reg)
1648                    {
1649                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1650                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1651                    case fpu_ftw:       m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1652                    case fpu_fop:       m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16;                      success = true; break;
1653                    case fpu_ip:        m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32;                       success = true; break;
1654                    case fpu_cs:        m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16;                       success = true; break;
1655                    case fpu_dp:        m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32;                       success = true; break;
1656                    case fpu_ds:        m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16;                       success = true; break;
1657                    case fpu_mxcsr:     m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1658                    case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1659
1660                    case fpu_stmm0:
1661                    case fpu_stmm1:
1662                    case fpu_stmm2:
1663                    case fpu_stmm3:
1664                    case fpu_stmm4:
1665                    case fpu_stmm5:
1666                    case fpu_stmm6:
1667                    case fpu_stmm7:
1668                        memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1669                        success = true;
1670                        break;
1671
1672                    case fpu_xmm0:
1673                    case fpu_xmm1:
1674                    case fpu_xmm2:
1675                    case fpu_xmm3:
1676                    case fpu_xmm4:
1677                    case fpu_xmm5:
1678                    case fpu_xmm6:
1679                    case fpu_xmm7:
1680                    case fpu_xmm8:
1681                    case fpu_xmm9:
1682                    case fpu_xmm10:
1683                    case fpu_xmm11:
1684                    case fpu_xmm12:
1685                    case fpu_xmm13:
1686                    case fpu_xmm14:
1687                    case fpu_xmm15:
1688                        memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1689                        success = true;
1690                        break;
1691                    }
1692                }
1693                break;
1694
1695            case e_regSetEXC:
1696                switch (reg)
1697            {
1698                case exc_trapno:    m_state.context.exc.__trapno = value->value.uint32;     success = true; break;
1699                case exc_err:       m_state.context.exc.__err = value->value.uint32;        success = true; break;
1700                case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break;
1701            }
1702                break;
1703        }
1704    }
1705
1706    if (success)
1707        return SetRegisterState(set) == KERN_SUCCESS;
1708    return false;
1709}
1710
1711
1712nub_size_t
1713DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len)
1714{
1715    nub_size_t size = sizeof (m_state.context);
1716
1717    if (buf && buf_len)
1718    {
1719        if (size > buf_len)
1720            size = buf_len;
1721
1722        bool force = false;
1723        kern_return_t kret;
1724        if ((kret = GetGPRState(force)) != KERN_SUCCESS)
1725        {
1726            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to read: %u ", buf, buf_len, kret);
1727            size = 0;
1728        }
1729        else
1730        if ((kret = GetFPUState(force)) != KERN_SUCCESS)
1731        {
1732            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: %s regs failed to read: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1733            size = 0;
1734        }
1735        else
1736        if ((kret = GetEXCState(force)) != KERN_SUCCESS)
1737        {
1738            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: EXC regs failed to read: %u", buf, buf_len, kret);
1739            size = 0;
1740        }
1741        else
1742        {
1743            // Success
1744            ::memcpy (buf, &m_state.context, size);
1745        }
1746    }
1747    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1748    // Return the size of the register context even if NULL was passed in
1749    return size;
1750}
1751
1752nub_size_t
1753DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len)
1754{
1755    nub_size_t size = sizeof (m_state.context);
1756    if (buf == NULL || buf_len == 0)
1757        size = 0;
1758
1759    if (size)
1760    {
1761        if (size > buf_len)
1762            size = buf_len;
1763
1764        ::memcpy (&m_state.context, buf, size);
1765        kern_return_t kret;
1766        if ((kret = SetGPRState()) != KERN_SUCCESS)
1767            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to write: %u", buf, buf_len, kret);
1768        if ((kret = SetFPUState()) != KERN_SUCCESS)
1769            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: %s regs failed to write: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1770        if ((kret = SetEXCState()) != KERN_SUCCESS)
1771            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: EXP regs failed to write: %u", buf, buf_len, kret);
1772    }
1773    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1774    return size;
1775}
1776
1777
1778kern_return_t
1779DNBArchImplX86_64::GetRegisterState(int set, bool force)
1780{
1781    switch (set)
1782    {
1783        case e_regSetALL:    return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
1784        case e_regSetGPR:    return GetGPRState(force);
1785        case e_regSetFPU:    return GetFPUState(force);
1786        case e_regSetEXC:    return GetEXCState(force);
1787        default: break;
1788    }
1789    return KERN_INVALID_ARGUMENT;
1790}
1791
1792kern_return_t
1793DNBArchImplX86_64::SetRegisterState(int set)
1794{
1795    // Make sure we have a valid context to set.
1796    if (RegisterSetStateIsValid(set))
1797    {
1798        switch (set)
1799        {
1800            case e_regSetALL:    return SetGPRState() | SetFPUState() | SetEXCState();
1801            case e_regSetGPR:    return SetGPRState();
1802            case e_regSetFPU:    return SetFPUState();
1803            case e_regSetEXC:    return SetEXCState();
1804            default: break;
1805        }
1806    }
1807    return KERN_INVALID_ARGUMENT;
1808}
1809
1810bool
1811DNBArchImplX86_64::RegisterSetStateIsValid (int set) const
1812{
1813    return m_state.RegsAreValid(set);
1814}
1815
1816
1817
1818#endif    // #if defined (__i386__) || defined (__x86_64__)
1819