DNBArchImplX86_64.cpp revision 5693e875ac7ac8d6ce1614da40e0b56f3f58716c
1//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  Created by Greg Clayton on 6/25/07.
11//
12//===----------------------------------------------------------------------===//
13
14#if defined (__i386__) || defined (__x86_64__)
15
16#include <sys/cdefs.h>
17#include <sys/types.h>
18#include <sys/sysctl.h>
19
20#include "MacOSX/x86_64/DNBArchImplX86_64.h"
21#include "DNBLog.h"
22#include "MachThread.h"
23#include "MachProcess.h"
24#include <mach/mach.h>
25#include <stdlib.h>
26
27#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
28enum debugState {
29    debugStateUnknown,
30    debugStateOff,
31    debugStateOn
32};
33
34static debugState sFPUDebugState = debugStateUnknown;
35static debugState sAVXForceState = debugStateUnknown;
36
37static bool DebugFPURegs ()
38{
39    if (sFPUDebugState == debugStateUnknown)
40    {
41        if (getenv("DNB_DEBUG_FPU_REGS"))
42            sFPUDebugState = debugStateOn;
43        else
44            sFPUDebugState = debugStateOff;
45    }
46
47    return (sFPUDebugState == debugStateOn);
48}
49
50static bool ForceAVXRegs ()
51{
52    if (sFPUDebugState == debugStateUnknown)
53    {
54        if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
55            sAVXForceState = debugStateOn;
56        else
57            sAVXForceState = debugStateOff;
58    }
59
60    return (sAVXForceState == debugStateOn);
61}
62
63#define DEBUG_FPU_REGS (DebugFPURegs())
64#define FORCE_AVX_REGS (ForceAVXRegs())
65#else
66#define DEBUG_FPU_REGS (0)
67#define FORCE_AVX_REGS (0)
68#endif
69
70
71extern "C" bool
72CPUHasAVX()
73{
74    enum AVXPresence
75    {
76        eAVXUnknown     = -1,
77        eAVXNotPresent  =  0,
78        eAVXPresent     =  1
79    };
80
81    static AVXPresence g_has_avx = eAVXUnknown;
82    if (g_has_avx == eAVXUnknown)
83    {
84        g_has_avx = eAVXNotPresent;
85
86        // Only xnu-2020 or later has AVX support, any versions before
87        // this have a busted thread_get_state RPC where it would truncate
88        // the thread state buffer (<rdar://problem/10122874>). So we need to
89        // verify the kernel version number manually or disable AVX support.
90        int mib[2];
91        char buffer[1024];
92        size_t length = sizeof(buffer);
93        uint64_t xnu_version = 0;
94        mib[0] = CTL_KERN;
95        mib[1] = KERN_VERSION;
96        int err = ::sysctl(mib, 2, &buffer, &length, NULL, 0);
97        if (err == 0)
98        {
99            const char *xnu = strstr (buffer, "xnu-");
100            if (xnu)
101            {
102                const char *xnu_version_cstr = xnu + 4;
103                xnu_version = strtoull (xnu_version_cstr, NULL, 0);
104                if (xnu_version >= 2020 && xnu_version != ULLONG_MAX)
105                {
106                    if (::HasAVX())
107                    {
108                        g_has_avx = eAVXPresent;
109                    }
110                }
111            }
112        }
113        DNBLogThreadedIf (LOG_THREAD, "CPUHasAVX(): g_has_avx = %i (err = %i, errno = %i, xnu_version = %llu)\n", g_has_avx, err, errno, xnu_version);
114    }
115
116    return (g_has_avx == eAVXPresent);
117}
118
119uint64_t
120DNBArchImplX86_64::GetPC(uint64_t failValue)
121{
122    // Get program counter
123    if (GetGPRState(false) == KERN_SUCCESS)
124        return m_state.context.gpr.__rip;
125    return failValue;
126}
127
128kern_return_t
129DNBArchImplX86_64::SetPC(uint64_t value)
130{
131    // Get program counter
132    kern_return_t err = GetGPRState(false);
133    if (err == KERN_SUCCESS)
134    {
135        m_state.context.gpr.__rip = value;
136        err = SetGPRState();
137    }
138    return err == KERN_SUCCESS;
139}
140
141uint64_t
142DNBArchImplX86_64::GetSP(uint64_t failValue)
143{
144    // Get stack pointer
145    if (GetGPRState(false) == KERN_SUCCESS)
146        return m_state.context.gpr.__rsp;
147    return failValue;
148}
149
150// Uncomment the value below to verify the values in the debugger.
151//#define DEBUG_GPR_VALUES 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
152
153kern_return_t
154DNBArchImplX86_64::GetGPRState(bool force)
155{
156    if (force || m_state.GetError(e_regSetGPR, Read))
157    {
158        kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
159        DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (GetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
160
161#if DEBUG_GPR_VALUES
162        m_state.context.gpr.__rax = ('a' << 8) + 'x';
163        m_state.context.gpr.__rbx = ('b' << 8) + 'x';
164        m_state.context.gpr.__rcx = ('c' << 8) + 'x';
165        m_state.context.gpr.__rdx = ('d' << 8) + 'x';
166        m_state.context.gpr.__rdi = ('d' << 8) + 'i';
167        m_state.context.gpr.__rsi = ('s' << 8) + 'i';
168        m_state.context.gpr.__rbp = ('b' << 8) + 'p';
169        m_state.context.gpr.__rsp = ('s' << 8) + 'p';
170        m_state.context.gpr.__r8  = ('r' << 8) + '8';
171        m_state.context.gpr.__r9  = ('r' << 8) + '9';
172        m_state.context.gpr.__r10 = ('r' << 8) + 'a';
173        m_state.context.gpr.__r11 = ('r' << 8) + 'b';
174        m_state.context.gpr.__r12 = ('r' << 8) + 'c';
175        m_state.context.gpr.__r13 = ('r' << 8) + 'd';
176        m_state.context.gpr.__r14 = ('r' << 8) + 'e';
177        m_state.context.gpr.__r15 = ('r' << 8) + 'f';
178        m_state.context.gpr.__rip = ('i' << 8) + 'p';
179        m_state.context.gpr.__rflags = ('f' << 8) + 'l';
180        m_state.context.gpr.__cs = ('c' << 8) + 's';
181        m_state.context.gpr.__fs = ('f' << 8) + 's';
182        m_state.context.gpr.__gs = ('g' << 8) + 's';
183        m_state.SetError(e_regSetGPR, Read, 0);
184#else
185        mach_msg_type_number_t count = e_regSetWordSizeGPR;
186        m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
187        DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
188                          "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
189                          "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
190                          "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
191                          "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
192                          "\n\trip = %16.16llx"
193                          "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
194                          m_thread->ThreadID(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT,
195                          m_state.GetError(e_regSetGPR, Read),
196                          m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
197                          m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
198                          m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
199                          m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
200                          m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
201                          m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
202                          m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs);
203
204        //      DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
205        //                        "\n\trax = %16.16llx"
206        //                        "\n\trbx = %16.16llx"
207        //                        "\n\trcx = %16.16llx"
208        //                        "\n\trdx = %16.16llx"
209        //                        "\n\trdi = %16.16llx"
210        //                        "\n\trsi = %16.16llx"
211        //                        "\n\trbp = %16.16llx"
212        //                        "\n\trsp = %16.16llx"
213        //                        "\n\t r8 = %16.16llx"
214        //                        "\n\t r9 = %16.16llx"
215        //                        "\n\tr10 = %16.16llx"
216        //                        "\n\tr11 = %16.16llx"
217        //                        "\n\tr12 = %16.16llx"
218        //                        "\n\tr13 = %16.16llx"
219        //                        "\n\tr14 = %16.16llx"
220        //                        "\n\tr15 = %16.16llx"
221        //                        "\n\trip = %16.16llx"
222        //                        "\n\tflg = %16.16llx"
223        //                        "\n\t cs = %16.16llx"
224        //                        "\n\t fs = %16.16llx"
225        //                        "\n\t gs = %16.16llx",
226        //                        m_thread->ThreadID(),
227        //                        x86_THREAD_STATE64,
228        //                        x86_THREAD_STATE64_COUNT,
229        //                        m_state.GetError(e_regSetGPR, Read),
230        //                        m_state.context.gpr.__rax,
231        //                        m_state.context.gpr.__rbx,
232        //                        m_state.context.gpr.__rcx,
233        //                        m_state.context.gpr.__rdx,
234        //                        m_state.context.gpr.__rdi,
235        //                        m_state.context.gpr.__rsi,
236        //                        m_state.context.gpr.__rbp,
237        //                        m_state.context.gpr.__rsp,
238        //                        m_state.context.gpr.__r8,
239        //                        m_state.context.gpr.__r9,
240        //                        m_state.context.gpr.__r10,
241        //                        m_state.context.gpr.__r11,
242        //                        m_state.context.gpr.__r12,
243        //                        m_state.context.gpr.__r13,
244        //                        m_state.context.gpr.__r14,
245        //                        m_state.context.gpr.__r15,
246        //                        m_state.context.gpr.__rip,
247        //                        m_state.context.gpr.__rflags,
248        //                        m_state.context.gpr.__cs,
249        //                        m_state.context.gpr.__fs,
250        //                        m_state.context.gpr.__gs);
251#endif
252    }
253    return m_state.GetError(e_regSetGPR, Read);
254}
255
256// Uncomment the value below to verify the values in the debugger.
257//#define DEBUG_FPU_REGS 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
258
259kern_return_t
260DNBArchImplX86_64::GetFPUState(bool force)
261{
262    if (force || m_state.GetError(e_regSetFPU, Read))
263    {
264        if (DEBUG_FPU_REGS) {
265            if (CPUHasAVX() || FORCE_AVX_REGS)
266            {
267                m_state.context.fpu.avx.__fpu_reserved[0] = -1;
268                m_state.context.fpu.avx.__fpu_reserved[1] = -1;
269                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
270                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
271                m_state.context.fpu.avx.__fpu_ftw = 1;
272                m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
273                m_state.context.fpu.avx.__fpu_fop = 2;
274                m_state.context.fpu.avx.__fpu_ip = 3;
275                m_state.context.fpu.avx.__fpu_cs = 4;
276                m_state.context.fpu.avx.__fpu_rsrv2 = 5;
277                m_state.context.fpu.avx.__fpu_dp = 6;
278                m_state.context.fpu.avx.__fpu_ds = 7;
279                m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
280                m_state.context.fpu.avx.__fpu_mxcsr = 8;
281                m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
282                int i;
283                for (i=0; i<16; ++i)
284                {
285                    if (i<10)
286                    {
287                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
288                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
289                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
290                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
291                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
292                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
293                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
294                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
295                    }
296                    else
297                    {
298                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
299                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
300                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
301                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
302                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
303                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
304                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
305                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
306                    }
307
308                    m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
309                    m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
310                    m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
311                    m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
312                    m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
313                    m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
314                    m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
315                    m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
316                    m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8';
317                    m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9';
318                    m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A';
319                    m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B';
320                    m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C';
321                    m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D';
322                    m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E';
323                    m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F';
324
325                    m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
326                    m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
327                    m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
328                    m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
329                    m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
330                    m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
331                    m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
332                    m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
333                    m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8';
334                    m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9';
335                    m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A';
336                    m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B';
337                    m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C';
338                    m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D';
339                    m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E';
340                    m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F';
341                }
342                for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
343                    m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
344                m_state.context.fpu.avx.__fpu_reserved1 = -1;
345                for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
346                    m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
347                m_state.SetError(e_regSetFPU, Read, 0);
348            }
349            else
350            {
351                m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
352                m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
353                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
354                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
355                m_state.context.fpu.no_avx.__fpu_ftw = 1;
356                m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
357                m_state.context.fpu.no_avx.__fpu_fop = 2;
358                m_state.context.fpu.no_avx.__fpu_ip = 3;
359                m_state.context.fpu.no_avx.__fpu_cs = 4;
360                m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
361                m_state.context.fpu.no_avx.__fpu_dp = 6;
362                m_state.context.fpu.no_avx.__fpu_ds = 7;
363                m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
364                m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
365                m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
366                int i;
367                for (i=0; i<16; ++i)
368                {
369                    if (i<10)
370                    {
371                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
372                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
373                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
374                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
375                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
376                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
377                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
378                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
379                    }
380                    else
381                    {
382                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
383                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
384                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
385                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
386                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
387                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
388                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
389                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
390                    }
391
392                    m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
393                    m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
394                    m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
395                    m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
396                    m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
397                    m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
398                    m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
399                    m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
400                    m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
401                    m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
402                    m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
403                    m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
404                    m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
405                    m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
406                    m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
407                    m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
408                }
409                for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
410                    m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
411                m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
412                m_state.SetError(e_regSetFPU, Read, 0);
413            }
414        }
415        else
416        {
417            if (CPUHasAVX() || FORCE_AVX_REGS)
418            {
419                mach_msg_type_number_t count = e_regSetWordSizeAVX;
420                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
421                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in) carp) => 0x%8.8x",
422                                  m_thread->ThreadID(), __x86_64_AVX_STATE, (uint32_t)count,
423                                  e_regSetWordSizeAVX, m_state.GetError(e_regSetFPU, Read));
424            }
425            else
426            {
427                mach_msg_type_number_t count = e_regSetWordSizeFPU;
428                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
429                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x",
430                                  m_thread->ThreadID(), __x86_64_FLOAT_STATE, (uint32_t)count,
431                                  e_regSetWordSizeFPU, m_state.GetError(e_regSetFPU, Read));
432            }
433        }
434    }
435    return m_state.GetError(e_regSetFPU, Read);
436}
437
438kern_return_t
439DNBArchImplX86_64::GetEXCState(bool force)
440{
441    if (force || m_state.GetError(e_regSetEXC, Read))
442    {
443        mach_msg_type_number_t count = e_regSetWordSizeEXC;
444        m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
445    }
446    return m_state.GetError(e_regSetEXC, Read);
447}
448
449kern_return_t
450DNBArchImplX86_64::SetGPRState()
451{
452    kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
453    DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
454
455    m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
456    DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
457                      "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
458                      "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
459                      "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
460                      "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
461                      "\n\trip = %16.16llx"
462                      "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
463                      m_thread->ThreadID(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR,
464                      m_state.GetError(e_regSetGPR, Write),
465                      m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
466                      m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
467                      m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
468                      m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
469                      m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
470                      m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
471                      m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs);
472    return m_state.GetError(e_regSetGPR, Write);
473}
474
475kern_return_t
476DNBArchImplX86_64::SetFPUState()
477{
478    if (DEBUG_FPU_REGS)
479    {
480        m_state.SetError(e_regSetFPU, Write, 0);
481        return m_state.GetError(e_regSetFPU, Write);
482    }
483    else
484    {
485        if (CPUHasAVX() || FORCE_AVX_REGS)
486        {
487            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
488            return m_state.GetError(e_regSetFPU, Write);
489        }
490        else
491        {
492            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU));
493            return m_state.GetError(e_regSetFPU, Write);
494        }
495    }
496}
497
498kern_return_t
499DNBArchImplX86_64::SetEXCState()
500{
501    m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
502    return m_state.GetError(e_regSetEXC, Write);
503}
504
505kern_return_t
506DNBArchImplX86_64::GetDBGState(bool force)
507{
508    if (force || m_state.GetError(e_regSetDBG, Read))
509    {
510        mach_msg_type_number_t count = e_regSetWordSizeDBG;
511        m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
512    }
513    return m_state.GetError(e_regSetDBG, Read);
514}
515
516kern_return_t
517DNBArchImplX86_64::SetDBGState()
518{
519    m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
520    return m_state.GetError(e_regSetDBG, Write);
521}
522
523void
524DNBArchImplX86_64::ThreadWillResume()
525{
526    // Do we need to step this thread? If so, let the mach thread tell us so.
527    if (m_thread->IsStepping())
528    {
529        // This is the primary thread, let the arch do anything it needs
530        EnableHardwareSingleStep(true);
531    }
532
533    // Reset the debug status register, if necessary, before we resume.
534    kern_return_t kret = GetDBGState(false);
535    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
536    if (kret != KERN_SUCCESS)
537        return;
538
539    DBG &debug_state = m_state.context.dbg;
540    bool need_reset = false;
541    uint32_t i, num = NumSupportedHardwareWatchpoints();
542    for (i = 0; i < num; ++i)
543        if (IsWatchpointHit(debug_state, i))
544            need_reset = true;
545
546    if (need_reset)
547    {
548        ClearWatchpointHits(debug_state);
549        kret = SetDBGState();
550        DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
551    }
552}
553
554bool
555DNBArchImplX86_64::ThreadDidStop()
556{
557    bool success = true;
558
559    m_state.InvalidateAllRegisterStates();
560
561    // Are we stepping a single instruction?
562    if (GetGPRState(true) == KERN_SUCCESS)
563    {
564        // We are single stepping, was this the primary thread?
565        if (m_thread->IsStepping())
566        {
567            // This was the primary thread, we need to clear the trace
568            // bit if so.
569            success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
570        }
571        else
572        {
573            // The MachThread will automatically restore the suspend count
574            // in ThreadDidStop(), so we don't need to do anything here if
575            // we weren't the primary thread the last time
576        }
577    }
578    return success;
579}
580
581bool
582DNBArchImplX86_64::NotifyException(MachException::Data& exc)
583{
584    switch (exc.exc_type)
585    {
586        case EXC_BAD_ACCESS:
587            break;
588        case EXC_BAD_INSTRUCTION:
589            break;
590        case EXC_ARITHMETIC:
591            break;
592        case EXC_EMULATION:
593            break;
594        case EXC_SOFTWARE:
595            break;
596        case EXC_BREAKPOINT:
597            if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
598            {
599                // exc_code = EXC_I386_BPT
600                //
601                nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
602                if (pc != INVALID_NUB_ADDRESS && pc > 0)
603                {
604                    pc -= 1;
605                    // Check for a breakpoint at one byte prior to the current PC value
606                    // since the PC will be just past the trap.
607
608                    nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc);
609                    if (NUB_BREAK_ID_IS_VALID(breakID))
610                    {
611                        // Backup the PC for i386 since the trap was taken and the PC
612                        // is at the address following the single byte trap instruction.
613                        if (m_state.context.gpr.__rip > 0)
614                        {
615                            m_state.context.gpr.__rip = pc;
616                            // Write the new PC back out
617                            SetGPRState ();
618                        }
619                    }
620                    return true;
621                }
622            }
623            else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1)
624            {
625                // exc_code = EXC_I386_SGL
626                //
627                // Check whether this corresponds to a watchpoint hit event.
628                // If yes, set the exc_sub_code to the data break address.
629                nub_addr_t addr = 0;
630                uint32_t hw_index = GetHardwareWatchpointHit(addr);
631                if (hw_index != INVALID_NUB_HW_INDEX)
632                {
633                    exc.exc_data[1] = addr;
634                    // Piggyback the hw_index in the exc.data.
635                    exc.exc_data.push_back(hw_index);
636                }
637
638                return true;
639            }
640            break;
641        case EXC_SYSCALL:
642            break;
643        case EXC_MACH_SYSCALL:
644            break;
645        case EXC_RPC_ALERT:
646            break;
647    }
648    return false;
649}
650
651uint32_t
652DNBArchImplX86_64::NumSupportedHardwareWatchpoints()
653{
654    // Available debug address registers: dr0, dr1, dr2, dr3.
655    return 4;
656}
657
658static uint32_t
659size_and_rw_bits(nub_size_t size, bool read, bool write)
660{
661    uint32_t rw;
662    if (read) {
663        rw = 0x3; // READ or READ/WRITE
664    } else if (write) {
665        rw = 0x1; // WRITE
666    } else {
667        assert(0 && "read and write cannot both be false");
668    }
669
670    switch (size) {
671    case 1:
672        return rw;
673    case 2:
674        return (0x1 << 2) | rw;
675    case 4:
676        return (0x3 << 2) | rw;
677    case 8:
678        return (0x2 << 2) | rw;
679    default:
680        assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
681    }
682}
683void
684DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
685{
686    // Set both dr7 (debug control register) and dri (debug address register).
687
688    // dr7{7-0} encodes the local/gloabl enable bits:
689    //  global enable --. .-- local enable
690    //                  | |
691    //                  v v
692    //      dr0 -> bits{1-0}
693    //      dr1 -> bits{3-2}
694    //      dr2 -> bits{5-4}
695    //      dr3 -> bits{7-6}
696    //
697    // dr7{31-16} encodes the rw/len bits:
698    //  b_x+3, b_x+2, b_x+1, b_x
699    //      where bits{x+1, x} => rw
700    //            0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
701    //      and bits{x+3, x+2} => len
702    //            0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
703    //
704    //      dr0 -> bits{19-16}
705    //      dr1 -> bits{23-20}
706    //      dr2 -> bits{27-24}
707    //      dr3 -> bits{31-28}
708    debug_state.__dr7 |= (1 << (2*hw_index) |
709                          size_and_rw_bits(size, read, write) << (16+4*hw_index));
710    switch (hw_index) {
711    case 0:
712        debug_state.__dr0 = addr; break;
713    case 1:
714        debug_state.__dr1 = addr; break;
715    case 2:
716        debug_state.__dr2 = addr; break;
717    case 3:
718        debug_state.__dr3 = addr; break;
719    default:
720        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
721    }
722    return;
723}
724
725void
726DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
727{
728    debug_state.__dr7 &= ~(3 << (2*hw_index));
729    switch (hw_index) {
730    case 0:
731        debug_state.__dr0 = 0; break;
732    case 1:
733        debug_state.__dr1 = 0; break;
734    case 2:
735        debug_state.__dr2 = 0; break;
736    case 3:
737        debug_state.__dr3 = 0; break;
738    default:
739        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
740    }
741    return;
742}
743
744bool
745DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
746{
747    // Check dr7 (debug control register) for local/global enable bits:
748    //  global enable --. .-- local enable
749    //                  | |
750    //                  v v
751    //      dr0 -> bits{1-0}
752    //      dr1 -> bits{3-2}
753    //      dr2 -> bits{5-4}
754    //      dr3 -> bits{7-6}
755    return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
756}
757
758// Resets local copy of debug status register to wait for the next debug excpetion.
759void
760DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state)
761{
762    // See also IsWatchpointHit().
763    debug_state.__dr6 = 0;
764    return;
765}
766
767bool
768DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
769{
770    // Check dr6 (debug status register) whether a watchpoint hits:
771    //          is watchpoint hit?
772    //                  |
773    //                  v
774    //      dr0 -> bits{0}
775    //      dr1 -> bits{1}
776    //      dr2 -> bits{2}
777    //      dr3 -> bits{3}
778    return (debug_state.__dr6 & (1 << hw_index));
779}
780
781nub_addr_t
782DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
783{
784    switch (hw_index) {
785    case 0:
786        return debug_state.__dr0;
787    case 1:
788        return debug_state.__dr1;
789    case 2:
790        return debug_state.__dr2;
791    case 3:
792        return debug_state.__dr3;
793    default:
794        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
795    }
796}
797
798bool
799DNBArchImplX86_64::StartTransForHWP()
800{
801    if (m_2pc_trans_state != Trans_Done || m_2pc_trans_state != Trans_Rolled_Back)
802        DNBLogError ("%s inconsistent state detected, expected %d or %d, got: %d", __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state);
803    m_2pc_dbg_checkpoint = m_state.context.dbg;
804    m_2pc_trans_state = Trans_Pending;
805    return true;
806}
807bool
808DNBArchImplX86_64::RollbackTransForHWP()
809{
810    m_state.context.dbg = m_2pc_dbg_checkpoint;
811    if (m_2pc_trans_state != Trans_Pending)
812        DNBLogError ("%s inconsistent state detected, expected %d, got: %d", __FUNCTION__, Trans_Pending, m_2pc_trans_state);
813    m_2pc_trans_state = Trans_Rolled_Back;
814    kern_return_t kret = SetDBGState();
815    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret);
816
817    if (kret == KERN_SUCCESS)
818        return true;
819    else
820        return false;
821}
822bool
823DNBArchImplX86_64::FinishTransForHWP()
824{
825    m_2pc_trans_state = Trans_Done;
826    return true;
827}
828DNBArchImplX86_64::DBG
829DNBArchImplX86_64::GetDBGCheckpoint()
830{
831    return m_2pc_dbg_checkpoint;
832}
833
834uint32_t
835DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write)
836{
837    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = 0x%llx, size = %zu, read = %u, write = %u)", (uint64_t)addr, size, read, write);
838
839    const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
840
841    // Can only watch 1, 2, 4, or 8 bytes.
842    if (!(size == 1 || size == 2 || size == 4 || size == 8))
843        return INVALID_NUB_HW_INDEX;
844
845    // We must watch for either read or write
846    if (read == false && write == false)
847        return INVALID_NUB_HW_INDEX;
848
849    // Read the debug state
850    kern_return_t kret = GetDBGState(false);
851
852    if (kret == KERN_SUCCESS)
853    {
854        // Check to make sure we have the needed hardware support
855        uint32_t i = 0;
856
857        DBG &debug_state = m_state.context.dbg;
858        for (i = 0; i < num_hw_watchpoints; ++i)
859        {
860            if (IsWatchpointVacant(debug_state, i))
861                break;
862        }
863
864        // See if we found an available hw breakpoint slot above
865        if (i < num_hw_watchpoints)
866        {
867            StartTransForHWP();
868
869            // Modify our local copy of the debug state, first.
870            SetWatchpoint(debug_state, i, addr, size, read, write);
871            // Now set the watch point in the inferior.
872            kret = SetDBGState();
873            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
874
875            if (kret == KERN_SUCCESS)
876                return i;
877            else // Revert to the previous debug state voluntarily.  The transaction coordinator knows that we have failed.
878                m_state.context.dbg = GetDBGCheckpoint();
879        }
880        else
881        {
882            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
883        }
884    }
885    return INVALID_NUB_HW_INDEX;
886}
887
888bool
889DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index)
890{
891    kern_return_t kret = GetDBGState(false);
892
893    const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
894    if (kret == KERN_SUCCESS)
895    {
896        DBG &debug_state = m_state.context.dbg;
897        if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
898        {
899            StartTransForHWP();
900
901            // Modify our local copy of the debug state, first.
902            ClearWatchpoint(debug_state, hw_index);
903            // Now disable the watch point in the inferior.
904            kret = SetDBGState();
905            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
906                             hw_index);
907
908            if (kret == KERN_SUCCESS)
909                return true;
910            else // Revert to the previous debug state voluntarily.  The transaction coordinator knows that we have failed.
911                m_state.context.dbg = GetDBGCheckpoint();
912        }
913    }
914    return false;
915}
916
917DNBArchImplX86_64::DBG DNBArchImplX86_64::Global_Debug_State = {0,0,0,0,0,0,0,0};
918bool DNBArchImplX86_64::Valid_Global_Debug_State = false;
919
920// Use this callback from MachThread, which in turn was called from MachThreadList, to update
921// the global view of the hardware watchpoint state, so that when new thread comes along, they
922// get to inherit the existing hardware watchpoint state.
923void
924DNBArchImplX86_64::HardwareWatchpointStateChanged ()
925{
926    Global_Debug_State = m_state.context.dbg;
927    Valid_Global_Debug_State = true;
928}
929
930// Iterate through the debug status register; return the index of the first hit.
931uint32_t
932DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr)
933{
934    // Read the debug state
935    kern_return_t kret = GetDBGState(true);
936    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
937    if (kret == KERN_SUCCESS)
938    {
939        DBG &debug_state = m_state.context.dbg;
940        uint32_t i, num = NumSupportedHardwareWatchpoints();
941        for (i = 0; i < num; ++i)
942        {
943            if (IsWatchpointHit(debug_state, i))
944            {
945                addr = GetWatchAddress(debug_state, i);
946                DNBLogThreadedIf(LOG_WATCHPOINTS,
947                                 "DNBArchImplX86_64::GetHardwareWatchpointHit() found => %u (addr = 0x%llx).",
948                                 i,
949                                 (uint64_t)addr);
950                return i;
951            }
952        }
953    }
954    return INVALID_NUB_HW_INDEX;
955}
956
957// Set the single step bit in the processor status register.
958kern_return_t
959DNBArchImplX86_64::EnableHardwareSingleStep (bool enable)
960{
961    if (GetGPRState(false) == KERN_SUCCESS)
962    {
963        const uint32_t trace_bit = 0x100u;
964        if (enable)
965            m_state.context.gpr.__rflags |= trace_bit;
966        else
967            m_state.context.gpr.__rflags &= ~trace_bit;
968        return SetGPRState();
969    }
970    return m_state.GetError(e_regSetGPR, Read);
971}
972
973
974//----------------------------------------------------------------------
975// Register information defintions
976//----------------------------------------------------------------------
977
978enum
979{
980    gpr_rax = 0,
981    gpr_rbx,
982    gpr_rcx,
983    gpr_rdx,
984    gpr_rdi,
985    gpr_rsi,
986    gpr_rbp,
987    gpr_rsp,
988    gpr_r8,
989    gpr_r9,
990    gpr_r10,
991    gpr_r11,
992    gpr_r12,
993    gpr_r13,
994    gpr_r14,
995    gpr_r15,
996    gpr_rip,
997    gpr_rflags,
998    gpr_cs,
999    gpr_fs,
1000    gpr_gs,
1001    k_num_gpr_regs
1002};
1003
1004enum {
1005    fpu_fcw,
1006    fpu_fsw,
1007    fpu_ftw,
1008    fpu_fop,
1009    fpu_ip,
1010    fpu_cs,
1011    fpu_dp,
1012    fpu_ds,
1013    fpu_mxcsr,
1014    fpu_mxcsrmask,
1015    fpu_stmm0,
1016    fpu_stmm1,
1017    fpu_stmm2,
1018    fpu_stmm3,
1019    fpu_stmm4,
1020    fpu_stmm5,
1021    fpu_stmm6,
1022    fpu_stmm7,
1023    fpu_xmm0,
1024    fpu_xmm1,
1025    fpu_xmm2,
1026    fpu_xmm3,
1027    fpu_xmm4,
1028    fpu_xmm5,
1029    fpu_xmm6,
1030    fpu_xmm7,
1031    fpu_xmm8,
1032    fpu_xmm9,
1033    fpu_xmm10,
1034    fpu_xmm11,
1035    fpu_xmm12,
1036    fpu_xmm13,
1037    fpu_xmm14,
1038    fpu_xmm15,
1039    fpu_ymm0,
1040    fpu_ymm1,
1041    fpu_ymm2,
1042    fpu_ymm3,
1043    fpu_ymm4,
1044    fpu_ymm5,
1045    fpu_ymm6,
1046    fpu_ymm7,
1047    fpu_ymm8,
1048    fpu_ymm9,
1049    fpu_ymm10,
1050    fpu_ymm11,
1051    fpu_ymm12,
1052    fpu_ymm13,
1053    fpu_ymm14,
1054    fpu_ymm15,
1055    k_num_fpu_regs,
1056
1057    // Aliases
1058    fpu_fctrl = fpu_fcw,
1059    fpu_fstat = fpu_fsw,
1060    fpu_ftag  = fpu_ftw,
1061    fpu_fiseg = fpu_cs,
1062    fpu_fioff = fpu_ip,
1063    fpu_foseg = fpu_ds,
1064    fpu_fooff = fpu_dp
1065};
1066
1067enum {
1068    exc_trapno,
1069    exc_err,
1070    exc_faultvaddr,
1071    k_num_exc_regs,
1072};
1073
1074
1075enum gcc_dwarf_regnums
1076{
1077    gcc_dwarf_rax = 0,
1078    gcc_dwarf_rdx = 1,
1079    gcc_dwarf_rcx = 2,
1080    gcc_dwarf_rbx = 3,
1081    gcc_dwarf_rsi = 4,
1082    gcc_dwarf_rdi = 5,
1083    gcc_dwarf_rbp = 6,
1084    gcc_dwarf_rsp = 7,
1085    gcc_dwarf_r8,
1086    gcc_dwarf_r9,
1087    gcc_dwarf_r10,
1088    gcc_dwarf_r11,
1089    gcc_dwarf_r12,
1090    gcc_dwarf_r13,
1091    gcc_dwarf_r14,
1092    gcc_dwarf_r15,
1093    gcc_dwarf_rip,
1094    gcc_dwarf_xmm0,
1095    gcc_dwarf_xmm1,
1096    gcc_dwarf_xmm2,
1097    gcc_dwarf_xmm3,
1098    gcc_dwarf_xmm4,
1099    gcc_dwarf_xmm5,
1100    gcc_dwarf_xmm6,
1101    gcc_dwarf_xmm7,
1102    gcc_dwarf_xmm8,
1103    gcc_dwarf_xmm9,
1104    gcc_dwarf_xmm10,
1105    gcc_dwarf_xmm11,
1106    gcc_dwarf_xmm12,
1107    gcc_dwarf_xmm13,
1108    gcc_dwarf_xmm14,
1109    gcc_dwarf_xmm15,
1110    gcc_dwarf_stmm0,
1111    gcc_dwarf_stmm1,
1112    gcc_dwarf_stmm2,
1113    gcc_dwarf_stmm3,
1114    gcc_dwarf_stmm4,
1115    gcc_dwarf_stmm5,
1116    gcc_dwarf_stmm6,
1117    gcc_dwarf_stmm7,
1118    gcc_dwarf_ymm0 = gcc_dwarf_xmm0,
1119    gcc_dwarf_ymm1 = gcc_dwarf_xmm1,
1120    gcc_dwarf_ymm2 = gcc_dwarf_xmm2,
1121    gcc_dwarf_ymm3 = gcc_dwarf_xmm3,
1122    gcc_dwarf_ymm4 = gcc_dwarf_xmm4,
1123    gcc_dwarf_ymm5 = gcc_dwarf_xmm5,
1124    gcc_dwarf_ymm6 = gcc_dwarf_xmm6,
1125    gcc_dwarf_ymm7 = gcc_dwarf_xmm7,
1126    gcc_dwarf_ymm8 = gcc_dwarf_xmm8,
1127    gcc_dwarf_ymm9 = gcc_dwarf_xmm9,
1128    gcc_dwarf_ymm10 = gcc_dwarf_xmm10,
1129    gcc_dwarf_ymm11 = gcc_dwarf_xmm11,
1130    gcc_dwarf_ymm12 = gcc_dwarf_xmm12,
1131    gcc_dwarf_ymm13 = gcc_dwarf_xmm13,
1132    gcc_dwarf_ymm14 = gcc_dwarf_xmm14,
1133    gcc_dwarf_ymm15 = gcc_dwarf_xmm15
1134};
1135
1136enum gdb_regnums
1137{
1138    gdb_rax     =   0,
1139    gdb_rbx     =   1,
1140    gdb_rcx     =   2,
1141    gdb_rdx     =   3,
1142    gdb_rsi     =   4,
1143    gdb_rdi     =   5,
1144    gdb_rbp     =   6,
1145    gdb_rsp     =   7,
1146    gdb_r8      =   8,
1147    gdb_r9      =   9,
1148    gdb_r10     =  10,
1149    gdb_r11     =  11,
1150    gdb_r12     =  12,
1151    gdb_r13     =  13,
1152    gdb_r14     =  14,
1153    gdb_r15     =  15,
1154    gdb_rip     =  16,
1155    gdb_rflags  =  17,
1156    gdb_cs      =  18,
1157    gdb_ss      =  19,
1158    gdb_ds      =  20,
1159    gdb_es      =  21,
1160    gdb_fs      =  22,
1161    gdb_gs      =  23,
1162    gdb_stmm0   =  24,
1163    gdb_stmm1   =  25,
1164    gdb_stmm2   =  26,
1165    gdb_stmm3   =  27,
1166    gdb_stmm4   =  28,
1167    gdb_stmm5   =  29,
1168    gdb_stmm6   =  30,
1169    gdb_stmm7   =  31,
1170    gdb_fctrl   =  32,  gdb_fcw = gdb_fctrl,
1171    gdb_fstat   =  33,  gdb_fsw = gdb_fstat,
1172    gdb_ftag    =  34,  gdb_ftw = gdb_ftag,
1173    gdb_fiseg   =  35,  gdb_fpu_cs  = gdb_fiseg,
1174    gdb_fioff   =  36,  gdb_ip  = gdb_fioff,
1175    gdb_foseg   =  37,  gdb_fpu_ds  = gdb_foseg,
1176    gdb_fooff   =  38,  gdb_dp  = gdb_fooff,
1177    gdb_fop     =  39,
1178    gdb_xmm0    =  40,
1179    gdb_xmm1    =  41,
1180    gdb_xmm2    =  42,
1181    gdb_xmm3    =  43,
1182    gdb_xmm4    =  44,
1183    gdb_xmm5    =  45,
1184    gdb_xmm6    =  46,
1185    gdb_xmm7    =  47,
1186    gdb_xmm8    =  48,
1187    gdb_xmm9    =  49,
1188    gdb_xmm10   =  50,
1189    gdb_xmm11   =  51,
1190    gdb_xmm12   =  52,
1191    gdb_xmm13   =  53,
1192    gdb_xmm14   =  54,
1193    gdb_xmm15   =  55,
1194    gdb_mxcsr   =  56,
1195    gdb_ymm0    =  gdb_xmm0,
1196    gdb_ymm1    =  gdb_xmm1,
1197    gdb_ymm2    =  gdb_xmm2,
1198    gdb_ymm3    =  gdb_xmm3,
1199    gdb_ymm4    =  gdb_xmm4,
1200    gdb_ymm5    =  gdb_xmm5,
1201    gdb_ymm6    =  gdb_xmm6,
1202    gdb_ymm7    =  gdb_xmm7,
1203    gdb_ymm8    =  gdb_xmm8,
1204    gdb_ymm9    =  gdb_xmm9,
1205    gdb_ymm10   =  gdb_xmm10,
1206    gdb_ymm11   =  gdb_xmm11,
1207    gdb_ymm12   =  gdb_xmm12,
1208    gdb_ymm13   =  gdb_xmm13,
1209    gdb_ymm14   =  gdb_xmm14,
1210    gdb_ymm15   =  gdb_xmm15
1211};
1212
1213#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg))
1214#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx))
1215#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx))
1216#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg)     + offsetof (DNBArchImplX86_64::Context, exc))
1217
1218// This does not accurately identify the location of ymm0...7 in
1219// Context.fpu.avx.  That is because there is a bunch of padding
1220// in Context.fpu.avx that we don't need.  Offset macros lay out
1221// the register state that Debugserver transmits to the debugger
1222// -- not to interpret the thread_get_state info.
1223#define AVX_OFFSET_YMM(n)   (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
1224
1225#define GPR_SIZE(reg)       (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1226#define FPU_SIZE_UINT(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1227#define FPU_SIZE_MMST(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1228#define FPU_SIZE_XMM(reg)   (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1229#define FPU_SIZE_YMM(reg)   (32)
1230#define EXC_SIZE(reg)       (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1231
1232// These macros will auto define the register name, alt name, register size,
1233// register offset, encoding, format and native register. This ensures that
1234// the register state structures are defined correctly and have the correct
1235// sizes and offsets.
1236#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg }
1237#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg }
1238#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg }
1239#define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, gdb_##reg }
1240
1241// General purpose registers for 64 bit
1242const DNBRegisterInfo
1243DNBArchImplX86_64::g_gpr_registers[] =
1244{
1245    DEFINE_GPR      (rax),
1246    DEFINE_GPR      (rbx),
1247    DEFINE_GPR_ALT  (rcx , "arg4", GENERIC_REGNUM_ARG4),
1248    DEFINE_GPR_ALT  (rdx , "arg3", GENERIC_REGNUM_ARG3),
1249    DEFINE_GPR_ALT  (rdi , "arg1", GENERIC_REGNUM_ARG1),
1250    DEFINE_GPR_ALT  (rsi , "arg2", GENERIC_REGNUM_ARG2),
1251    DEFINE_GPR_ALT  (rbp , "fp"  , GENERIC_REGNUM_FP),
1252    DEFINE_GPR_ALT  (rsp , "sp"  , GENERIC_REGNUM_SP),
1253    DEFINE_GPR_ALT  (r8  , "arg5", GENERIC_REGNUM_ARG5),
1254    DEFINE_GPR_ALT  (r9  , "arg6", GENERIC_REGNUM_ARG6),
1255    DEFINE_GPR      (r10),
1256    DEFINE_GPR      (r11),
1257    DEFINE_GPR      (r12),
1258    DEFINE_GPR      (r13),
1259    DEFINE_GPR      (r14),
1260    DEFINE_GPR      (r15),
1261    DEFINE_GPR_ALT  (rip , "pc", GENERIC_REGNUM_PC),
1262    DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS),
1263    DEFINE_GPR_ALT2 (cs,        NULL),
1264    DEFINE_GPR_ALT2 (fs,        NULL),
1265    DEFINE_GPR_ALT2 (gs,        NULL),
1266};
1267
1268// Floating point registers 64 bit
1269const DNBRegisterInfo
1270DNBArchImplX86_64::g_fpu_registers_no_avx[] =
1271{
1272    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , FPU_OFFSET(fcw)       , -1U, -1U, -1U, -1U },
1273    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , FPU_OFFSET(fsw)       , -1U, -1U, -1U, -1U },
1274    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , FPU_OFFSET(ftw)       , -1U, -1U, -1U, -1U },
1275    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , FPU_OFFSET(fop)       , -1U, -1U, -1U, -1U },
1276    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , FPU_OFFSET(ip)        , -1U, -1U, -1U, -1U },
1277    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , FPU_OFFSET(cs)        , -1U, -1U, -1U, -1U },
1278    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , FPU_OFFSET(dp)        , -1U, -1U, -1U, -1U },
1279    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , FPU_OFFSET(ds)        , -1U, -1U, -1U, -1U },
1280    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , FPU_OFFSET(mxcsr)     , -1U, -1U, -1U, -1U },
1281    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U },
1282
1283    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1U, gdb_stmm0 },
1284    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1U, gdb_stmm1 },
1285    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1U, gdb_stmm2 },
1286    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1U, gdb_stmm3 },
1287    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1U, gdb_stmm4 },
1288    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1U, gdb_stmm5 },
1289    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1U, gdb_stmm6 },
1290    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1U, gdb_stmm7 },
1291
1292    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1U, gdb_xmm0 },
1293    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1U, gdb_xmm1 },
1294    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1U, gdb_xmm2 },
1295    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1U, gdb_xmm3 },
1296    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1U, gdb_xmm4 },
1297    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1U, gdb_xmm5 },
1298    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1U, gdb_xmm6 },
1299    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1U, gdb_xmm7 },
1300    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1U, gdb_xmm8  },
1301    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1U, gdb_xmm9  },
1302    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1U, gdb_xmm10 },
1303    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1U, gdb_xmm11 },
1304    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1U, gdb_xmm12 },
1305    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1U, gdb_xmm13 },
1306    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1U, gdb_xmm14 },
1307    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1U, gdb_xmm15 },
1308};
1309
1310const DNBRegisterInfo
1311DNBArchImplX86_64::g_fpu_registers_avx[] =
1312{
1313    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , AVX_OFFSET(fcw)       , -1U, -1U, -1U, -1U },
1314    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , AVX_OFFSET(fsw)       , -1U, -1U, -1U, -1U },
1315    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , AVX_OFFSET(ftw)       , -1U, -1U, -1U, -1U },
1316    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , AVX_OFFSET(fop)       , -1U, -1U, -1U, -1U },
1317    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , AVX_OFFSET(ip)        , -1U, -1U, -1U, -1U },
1318    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , AVX_OFFSET(cs)        , -1U, -1U, -1U, -1U },
1319    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , AVX_OFFSET(dp)        , -1U, -1U, -1U, -1U },
1320    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , AVX_OFFSET(ds)        , -1U, -1U, -1U, -1U },
1321    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , AVX_OFFSET(mxcsr)     , -1U, -1U, -1U, -1U },
1322    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U },
1323
1324    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1U, gdb_stmm0 },
1325    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1U, gdb_stmm1 },
1326    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1U, gdb_stmm2 },
1327    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1U, gdb_stmm3 },
1328    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1U, gdb_stmm4 },
1329    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1U, gdb_stmm5 },
1330    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1U, gdb_stmm6 },
1331    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1U, gdb_stmm7 },
1332
1333    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1U, gdb_xmm0 },
1334    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1U, gdb_xmm1 },
1335    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1U, gdb_xmm2 },
1336    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1U, gdb_xmm3 },
1337    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1U, gdb_xmm4 },
1338    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1U, gdb_xmm5 },
1339    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1U, gdb_xmm6 },
1340    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1U, gdb_xmm7 },
1341    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1U, gdb_xmm8  },
1342    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1U, gdb_xmm9  },
1343    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1U, gdb_xmm10 },
1344    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1U, gdb_xmm11 },
1345    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1U, gdb_xmm12 },
1346    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1U, gdb_xmm13 },
1347    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1U, gdb_xmm14 },
1348    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1U, gdb_xmm15 },
1349
1350    { e_regSetFPU, fpu_ymm0 , "ymm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0)   , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1U, gdb_ymm0 },
1351    { e_regSetFPU, fpu_ymm1 , "ymm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1)   , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1U, gdb_ymm1 },
1352    { e_regSetFPU, fpu_ymm2 , "ymm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2)   , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1U, gdb_ymm2 },
1353    { e_regSetFPU, fpu_ymm3 , "ymm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3)   , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1U, gdb_ymm3 },
1354    { e_regSetFPU, fpu_ymm4 , "ymm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4)   , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1U, gdb_ymm4 },
1355    { e_regSetFPU, fpu_ymm5 , "ymm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5)   , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1U, gdb_ymm5 },
1356    { e_regSetFPU, fpu_ymm6 , "ymm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6)   , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1U, gdb_ymm6 },
1357    { e_regSetFPU, fpu_ymm7 , "ymm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7)   , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1U, gdb_ymm7 },
1358    { e_regSetFPU, fpu_ymm8 , "ymm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8)   , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1U, gdb_ymm8  },
1359    { e_regSetFPU, fpu_ymm9 , "ymm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9)   , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1U, gdb_ymm9  },
1360    { e_regSetFPU, fpu_ymm10, "ymm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10)  , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1U, gdb_ymm10 },
1361    { e_regSetFPU, fpu_ymm11, "ymm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11)  , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1U, gdb_ymm11 },
1362    { e_regSetFPU, fpu_ymm12, "ymm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12)  , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1U, gdb_ymm12 },
1363    { e_regSetFPU, fpu_ymm13, "ymm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13)  , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1U, gdb_ymm13 },
1364    { e_regSetFPU, fpu_ymm14, "ymm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14)  , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1U, gdb_ymm14 },
1365    { e_regSetFPU, fpu_ymm15, "ymm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15)  , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1U, gdb_ymm15 }
1366};
1367
1368// Exception registers
1369
1370const DNBRegisterInfo
1371DNBArchImplX86_64::g_exc_registers[] =
1372{
1373    { e_regSetEXC, exc_trapno,      "trapno"    , NULL, Uint, Hex, EXC_SIZE (trapno)    , EXC_OFFSET (trapno)       , -1U, -1U, -1U, -1U },
1374    { e_regSetEXC, exc_err,         "err"       , NULL, Uint, Hex, EXC_SIZE (err)       , EXC_OFFSET (err)          , -1U, -1U, -1U, -1U },
1375    { e_regSetEXC, exc_faultvaddr,  "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr)   , -1U, -1U, -1U, -1U }
1376};
1377
1378// Number of registers in each register set
1379const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
1380const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
1381const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
1382const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
1383const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
1384const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
1385
1386//----------------------------------------------------------------------
1387// Register set definitions. The first definitions at register set index
1388// of zero is for all registers, followed by other registers sets. The
1389// register information for the all register set need not be filled in.
1390//----------------------------------------------------------------------
1391const DNBRegisterSetInfo
1392DNBArchImplX86_64::g_reg_sets_no_avx[] =
1393{
1394    { "x86_64 Registers",           NULL,               k_num_all_registers_no_avx },
1395    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1396    { "Floating Point Registers",   g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
1397    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1398};
1399
1400const DNBRegisterSetInfo
1401DNBArchImplX86_64::g_reg_sets_avx[] =
1402{
1403    { "x86_64 Registers",           NULL,               k_num_all_registers_avx },
1404    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1405    { "Floating Point Registers",   g_fpu_registers_avx, k_num_fpu_registers_avx },
1406    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1407};
1408
1409// Total number of register sets for this architecture
1410const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo);
1411
1412
1413DNBArchProtocol *
1414DNBArchImplX86_64::Create (MachThread *thread)
1415{
1416    DNBArchImplX86_64 *obj = new DNBArchImplX86_64 (thread);
1417
1418    // When new thread comes along, it tries to inherit from the global debug state, if it is valid.
1419    if (Valid_Global_Debug_State)
1420    {
1421        obj->m_state.context.dbg = Global_Debug_State;
1422        kern_return_t kret = obj->SetDBGState();
1423        DNBLogThreadedIf(LOG_WATCHPOINTS,
1424                         "DNBArchImplX86_64::Create() Inherit and SetDBGState() => 0x%8.8x.", kret);
1425    }
1426    return obj;
1427}
1428
1429const uint8_t * const
1430DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size)
1431{
1432    static const uint8_t g_breakpoint_opcode[] = { 0xCC };
1433    if (byte_size == 1)
1434        return g_breakpoint_opcode;
1435    return NULL;
1436}
1437
1438const DNBRegisterSetInfo *
1439DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
1440{
1441    *num_reg_sets = k_num_register_sets;
1442
1443    if (CPUHasAVX() || FORCE_AVX_REGS)
1444        return g_reg_sets_avx;
1445    else
1446        return g_reg_sets_no_avx;
1447}
1448
1449void
1450DNBArchImplX86_64::Initialize()
1451{
1452    DNBArchPluginInfo arch_plugin_info =
1453    {
1454        CPU_TYPE_X86_64,
1455        DNBArchImplX86_64::Create,
1456        DNBArchImplX86_64::GetRegisterSetInfo,
1457        DNBArchImplX86_64::SoftwareBreakpointOpcode
1458    };
1459
1460    // Register this arch plug-in with the main protocol class
1461    DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
1462}
1463
1464bool
1465DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value)
1466{
1467    if (set == REGISTER_SET_GENERIC)
1468    {
1469        switch (reg)
1470        {
1471            case GENERIC_REGNUM_PC:     // Program Counter
1472                set = e_regSetGPR;
1473                reg = gpr_rip;
1474                break;
1475
1476            case GENERIC_REGNUM_SP:     // Stack Pointer
1477                set = e_regSetGPR;
1478                reg = gpr_rsp;
1479                break;
1480
1481            case GENERIC_REGNUM_FP:     // Frame Pointer
1482                set = e_regSetGPR;
1483                reg = gpr_rbp;
1484                break;
1485
1486            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1487                set = e_regSetGPR;
1488                reg = gpr_rflags;
1489                break;
1490
1491            case GENERIC_REGNUM_RA:     // Return Address
1492            default:
1493                return false;
1494        }
1495    }
1496
1497    if (GetRegisterState(set, false) != KERN_SUCCESS)
1498        return false;
1499
1500    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1501    if (regInfo)
1502    {
1503        value->info = *regInfo;
1504        switch (set)
1505        {
1506            case e_regSetGPR:
1507                if (reg < k_num_gpr_registers)
1508                {
1509                    value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg];
1510                    return true;
1511                }
1512                break;
1513
1514            case e_regSetFPU:
1515                if (CPUHasAVX() || FORCE_AVX_REGS)
1516                {
1517                    switch (reg)
1518                    {
1519                    case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw));    return true;
1520                    case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw));    return true;
1521                    case fpu_ftw:       value->value.uint8  = m_state.context.fpu.avx.__fpu_ftw;                      return true;
1522                    case fpu_fop:       value->value.uint16 = m_state.context.fpu.avx.__fpu_fop;                      return true;
1523                    case fpu_ip:        value->value.uint32 = m_state.context.fpu.avx.__fpu_ip;                       return true;
1524                    case fpu_cs:        value->value.uint16 = m_state.context.fpu.avx.__fpu_cs;                       return true;
1525                    case fpu_dp:        value->value.uint32 = m_state.context.fpu.avx.__fpu_dp;                       return true;
1526                    case fpu_ds:        value->value.uint16 = m_state.context.fpu.avx.__fpu_ds;                       return true;
1527                    case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr;                    return true;
1528                    case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask;                return true;
1529
1530                    case fpu_stmm0:
1531                    case fpu_stmm1:
1532                    case fpu_stmm2:
1533                    case fpu_stmm3:
1534                    case fpu_stmm4:
1535                    case fpu_stmm5:
1536                    case fpu_stmm6:
1537                    case fpu_stmm7:
1538                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1539                        return true;
1540
1541                    case fpu_xmm0:
1542                    case fpu_xmm1:
1543                    case fpu_xmm2:
1544                    case fpu_xmm3:
1545                    case fpu_xmm4:
1546                    case fpu_xmm5:
1547                    case fpu_xmm6:
1548                    case fpu_xmm7:
1549                    case fpu_xmm8:
1550                    case fpu_xmm9:
1551                    case fpu_xmm10:
1552                    case fpu_xmm11:
1553                    case fpu_xmm12:
1554                    case fpu_xmm13:
1555                    case fpu_xmm14:
1556                    case fpu_xmm15:
1557                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1558                        return true;
1559
1560                    case fpu_ymm0:
1561                    case fpu_ymm1:
1562                    case fpu_ymm2:
1563                    case fpu_ymm3:
1564                    case fpu_ymm4:
1565                    case fpu_ymm5:
1566                    case fpu_ymm6:
1567                    case fpu_ymm7:
1568                    case fpu_ymm8:
1569                    case fpu_ymm9:
1570                    case fpu_ymm10:
1571                    case fpu_ymm11:
1572                    case fpu_ymm12:
1573                    case fpu_ymm13:
1574                    case fpu_ymm14:
1575                    case fpu_ymm15:
1576                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
1577                        memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
1578                        return true;
1579                    }
1580                }
1581                else
1582                {
1583                    switch (reg)
1584                    {
1585                        case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw));    return true;
1586                        case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw));    return true;
1587                        case fpu_ftw:       value->value.uint8  = m_state.context.fpu.no_avx.__fpu_ftw;                      return true;
1588                        case fpu_fop:       value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop;                      return true;
1589                        case fpu_ip:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip;                       return true;
1590                        case fpu_cs:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs;                       return true;
1591                        case fpu_dp:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp;                       return true;
1592                        case fpu_ds:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds;                       return true;
1593                        case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr;                    return true;
1594                        case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask;                return true;
1595
1596                        case fpu_stmm0:
1597                        case fpu_stmm1:
1598                        case fpu_stmm2:
1599                        case fpu_stmm3:
1600                        case fpu_stmm4:
1601                        case fpu_stmm5:
1602                        case fpu_stmm6:
1603                        case fpu_stmm7:
1604                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1605                            return true;
1606
1607                        case fpu_xmm0:
1608                        case fpu_xmm1:
1609                        case fpu_xmm2:
1610                        case fpu_xmm3:
1611                        case fpu_xmm4:
1612                        case fpu_xmm5:
1613                        case fpu_xmm6:
1614                        case fpu_xmm7:
1615                        case fpu_xmm8:
1616                        case fpu_xmm9:
1617                        case fpu_xmm10:
1618                        case fpu_xmm11:
1619                        case fpu_xmm12:
1620                        case fpu_xmm13:
1621                        case fpu_xmm14:
1622                        case fpu_xmm15:
1623                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1624                            return true;
1625                    }
1626                }
1627                break;
1628
1629            case e_regSetEXC:
1630                switch (reg)
1631                {
1632                case exc_trapno:    value->value.uint32 = m_state.context.exc.__trapno; return true;
1633                case exc_err:       value->value.uint32 = m_state.context.exc.__err; return true;
1634                case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true;
1635                }
1636                break;
1637        }
1638    }
1639    return false;
1640}
1641
1642
1643bool
1644DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value)
1645{
1646    if (set == REGISTER_SET_GENERIC)
1647    {
1648        switch (reg)
1649        {
1650            case GENERIC_REGNUM_PC:     // Program Counter
1651                set = e_regSetGPR;
1652                reg = gpr_rip;
1653                break;
1654
1655            case GENERIC_REGNUM_SP:     // Stack Pointer
1656                set = e_regSetGPR;
1657                reg = gpr_rsp;
1658                break;
1659
1660            case GENERIC_REGNUM_FP:     // Frame Pointer
1661                set = e_regSetGPR;
1662                reg = gpr_rbp;
1663                break;
1664
1665            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1666                set = e_regSetGPR;
1667                reg = gpr_rflags;
1668                break;
1669
1670            case GENERIC_REGNUM_RA:     // Return Address
1671            default:
1672                return false;
1673        }
1674    }
1675
1676    if (GetRegisterState(set, false) != KERN_SUCCESS)
1677        return false;
1678
1679    bool success = false;
1680    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1681    if (regInfo)
1682    {
1683        switch (set)
1684        {
1685            case e_regSetGPR:
1686                if (reg < k_num_gpr_registers)
1687                {
1688                    ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64;
1689                    success = true;
1690                }
1691                break;
1692
1693            case e_regSetFPU:
1694                if (CPUHasAVX() || FORCE_AVX_REGS)
1695                {
1696                    switch (reg)
1697                    {
1698                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1699                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1700                    case fpu_ftw:       m_state.context.fpu.avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1701                    case fpu_fop:       m_state.context.fpu.avx.__fpu_fop = value->value.uint16;                      success = true; break;
1702                    case fpu_ip:        m_state.context.fpu.avx.__fpu_ip = value->value.uint32;                       success = true; break;
1703                    case fpu_cs:        m_state.context.fpu.avx.__fpu_cs = value->value.uint16;                       success = true; break;
1704                    case fpu_dp:        m_state.context.fpu.avx.__fpu_dp = value->value.uint32;                       success = true; break;
1705                    case fpu_ds:        m_state.context.fpu.avx.__fpu_ds = value->value.uint16;                       success = true; break;
1706                    case fpu_mxcsr:     m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1707                    case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1708
1709                    case fpu_stmm0:
1710                    case fpu_stmm1:
1711                    case fpu_stmm2:
1712                    case fpu_stmm3:
1713                    case fpu_stmm4:
1714                    case fpu_stmm5:
1715                    case fpu_stmm6:
1716                    case fpu_stmm7:
1717                        memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1718                        success = true;
1719                        break;
1720
1721                    case fpu_xmm0:
1722                    case fpu_xmm1:
1723                    case fpu_xmm2:
1724                    case fpu_xmm3:
1725                    case fpu_xmm4:
1726                    case fpu_xmm5:
1727                    case fpu_xmm6:
1728                    case fpu_xmm7:
1729                    case fpu_xmm8:
1730                    case fpu_xmm9:
1731                    case fpu_xmm10:
1732                    case fpu_xmm11:
1733                    case fpu_xmm12:
1734                    case fpu_xmm13:
1735                    case fpu_xmm14:
1736                    case fpu_xmm15:
1737                        memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1738                        success = true;
1739                        break;
1740
1741                    case fpu_ymm0:
1742                    case fpu_ymm1:
1743                    case fpu_ymm2:
1744                    case fpu_ymm3:
1745                    case fpu_ymm4:
1746                    case fpu_ymm5:
1747                    case fpu_ymm6:
1748                    case fpu_ymm7:
1749                    case fpu_ymm8:
1750                    case fpu_ymm9:
1751                    case fpu_ymm10:
1752                    case fpu_ymm11:
1753                    case fpu_ymm12:
1754                    case fpu_ymm13:
1755                    case fpu_ymm14:
1756                    case fpu_ymm15:
1757                        memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16);
1758                        memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16);
1759                        return true;
1760                    }
1761                }
1762                else
1763                {
1764                    switch (reg)
1765                    {
1766                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1767                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1768                    case fpu_ftw:       m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1769                    case fpu_fop:       m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16;                      success = true; break;
1770                    case fpu_ip:        m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32;                       success = true; break;
1771                    case fpu_cs:        m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16;                       success = true; break;
1772                    case fpu_dp:        m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32;                       success = true; break;
1773                    case fpu_ds:        m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16;                       success = true; break;
1774                    case fpu_mxcsr:     m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1775                    case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1776
1777                    case fpu_stmm0:
1778                    case fpu_stmm1:
1779                    case fpu_stmm2:
1780                    case fpu_stmm3:
1781                    case fpu_stmm4:
1782                    case fpu_stmm5:
1783                    case fpu_stmm6:
1784                    case fpu_stmm7:
1785                        memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1786                        success = true;
1787                        break;
1788
1789                    case fpu_xmm0:
1790                    case fpu_xmm1:
1791                    case fpu_xmm2:
1792                    case fpu_xmm3:
1793                    case fpu_xmm4:
1794                    case fpu_xmm5:
1795                    case fpu_xmm6:
1796                    case fpu_xmm7:
1797                    case fpu_xmm8:
1798                    case fpu_xmm9:
1799                    case fpu_xmm10:
1800                    case fpu_xmm11:
1801                    case fpu_xmm12:
1802                    case fpu_xmm13:
1803                    case fpu_xmm14:
1804                    case fpu_xmm15:
1805                        memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1806                        success = true;
1807                        break;
1808                    }
1809                }
1810                break;
1811
1812            case e_regSetEXC:
1813                switch (reg)
1814            {
1815                case exc_trapno:    m_state.context.exc.__trapno = value->value.uint32;     success = true; break;
1816                case exc_err:       m_state.context.exc.__err = value->value.uint32;        success = true; break;
1817                case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break;
1818            }
1819                break;
1820        }
1821    }
1822
1823    if (success)
1824        return SetRegisterState(set) == KERN_SUCCESS;
1825    return false;
1826}
1827
1828
1829nub_size_t
1830DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len)
1831{
1832    nub_size_t size = sizeof (m_state.context);
1833
1834    if (buf && buf_len)
1835    {
1836        if (size > buf_len)
1837            size = buf_len;
1838
1839        bool force = false;
1840        kern_return_t kret;
1841        if ((kret = GetGPRState(force)) != KERN_SUCCESS)
1842        {
1843            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to read: %u ", buf, buf_len, kret);
1844            size = 0;
1845        }
1846        else
1847        if ((kret = GetFPUState(force)) != KERN_SUCCESS)
1848        {
1849            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: %s regs failed to read: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1850            size = 0;
1851        }
1852        else
1853        if ((kret = GetEXCState(force)) != KERN_SUCCESS)
1854        {
1855            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: EXC regs failed to read: %u", buf, buf_len, kret);
1856            size = 0;
1857        }
1858        else
1859        {
1860            // Success
1861            ::memcpy (buf, &m_state.context, size);
1862        }
1863    }
1864    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1865    // Return the size of the register context even if NULL was passed in
1866    return size;
1867}
1868
1869nub_size_t
1870DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len)
1871{
1872    nub_size_t size = sizeof (m_state.context);
1873    if (buf == NULL || buf_len == 0)
1874        size = 0;
1875
1876    if (size)
1877    {
1878        if (size > buf_len)
1879            size = buf_len;
1880
1881        ::memcpy (&m_state.context, buf, size);
1882        kern_return_t kret;
1883        if ((kret = SetGPRState()) != KERN_SUCCESS)
1884            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to write: %u", buf, buf_len, kret);
1885        if ((kret = SetFPUState()) != KERN_SUCCESS)
1886            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: %s regs failed to write: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1887        if ((kret = SetEXCState()) != KERN_SUCCESS)
1888            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: EXP regs failed to write: %u", buf, buf_len, kret);
1889    }
1890    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1891    return size;
1892}
1893
1894
1895kern_return_t
1896DNBArchImplX86_64::GetRegisterState(int set, bool force)
1897{
1898    switch (set)
1899    {
1900        case e_regSetALL:    return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
1901        case e_regSetGPR:    return GetGPRState(force);
1902        case e_regSetFPU:    return GetFPUState(force);
1903        case e_regSetEXC:    return GetEXCState(force);
1904        default: break;
1905    }
1906    return KERN_INVALID_ARGUMENT;
1907}
1908
1909kern_return_t
1910DNBArchImplX86_64::SetRegisterState(int set)
1911{
1912    // Make sure we have a valid context to set.
1913    if (RegisterSetStateIsValid(set))
1914    {
1915        switch (set)
1916        {
1917            case e_regSetALL:    return SetGPRState() | SetFPUState() | SetEXCState();
1918            case e_regSetGPR:    return SetGPRState();
1919            case e_regSetFPU:    return SetFPUState();
1920            case e_regSetEXC:    return SetEXCState();
1921            default: break;
1922        }
1923    }
1924    return KERN_INVALID_ARGUMENT;
1925}
1926
1927bool
1928DNBArchImplX86_64::RegisterSetStateIsValid (int set) const
1929{
1930    return m_state.RegsAreValid(set);
1931}
1932
1933
1934
1935#endif    // #if defined (__i386__) || defined (__x86_64__)
1936