DNBArchImplX86_64.cpp revision b51d99b70abdd53562f2cae84f627087721f2f55
1f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
2f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//
3f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//                     The LLVM Compiler Infrastructure
4f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//
5f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)// This file is distributed under the University of Illinois Open Source
6f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)// License. See LICENSE.TXT for details.
7f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//
8f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//===----------------------------------------------------------------------===//
9f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//
10f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//  Created by Greg Clayton on 6/25/07.
11f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//
12f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)//===----------------------------------------------------------------------===//
13f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)
14f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#if defined (__i386__) || defined (__x86_64__)
15f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)
16f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#include <sys/cdefs.h>
17f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#include <sys/types.h>
18f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#include <sys/sysctl.h>
19f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)
20f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#include "MacOSX/x86_64/DNBArchImplX86_64.h"
21f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#include "DNBLog.h"
22f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#include "MachThread.h"
23f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#include "MachProcess.h"
24f2477e01787aa58f445919b809d89e252beef54fTorne (Richard Coles)#include <mach/mach.h>
25#include <stdlib.h>
26
27#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
28enum debugState {
29    debugStateUnknown,
30    debugStateOff,
31    debugStateOn
32};
33
34static debugState sFPUDebugState = debugStateUnknown;
35static debugState sAVXForceState = debugStateUnknown;
36
37static bool DebugFPURegs ()
38{
39    if (sFPUDebugState == debugStateUnknown)
40    {
41        if (getenv("DNB_DEBUG_FPU_REGS"))
42            sFPUDebugState = debugStateOn;
43        else
44            sFPUDebugState = debugStateOff;
45    }
46
47    return (sFPUDebugState == debugStateOn);
48}
49
50static bool ForceAVXRegs ()
51{
52    if (sFPUDebugState == debugStateUnknown)
53    {
54        if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
55            sAVXForceState = debugStateOn;
56        else
57            sAVXForceState = debugStateOff;
58    }
59
60    return (sAVXForceState == debugStateOn);
61}
62
63#define DEBUG_FPU_REGS (DebugFPURegs())
64#define FORCE_AVX_REGS (ForceAVXRegs())
65#else
66#define DEBUG_FPU_REGS (0)
67#define FORCE_AVX_REGS (0)
68#endif
69
70
71extern "C" bool
72CPUHasAVX()
73{
74    enum AVXPresence
75    {
76        eAVXUnknown     = -1,
77        eAVXNotPresent  =  0,
78        eAVXPresent     =  1
79    };
80
81    static AVXPresence g_has_avx = eAVXUnknown;
82    if (g_has_avx == eAVXUnknown)
83    {
84        g_has_avx = eAVXNotPresent;
85
86        // Only xnu-2020 or later has AVX support, any versions before
87        // this have a busted thread_get_state RPC where it would truncate
88        // the thread state buffer (<rdar://problem/10122874>). So we need to
89        // verify the kernel version number manually or disable AVX support.
90        int mib[2];
91        char buffer[1024];
92        size_t length = sizeof(buffer);
93        uint64_t xnu_version = 0;
94        mib[0] = CTL_KERN;
95        mib[1] = KERN_VERSION;
96        int err = ::sysctl(mib, 2, &buffer, &length, NULL, 0);
97        if (err == 0)
98        {
99            const char *xnu = strstr (buffer, "xnu-");
100            if (xnu)
101            {
102                const char *xnu_version_cstr = xnu + 4;
103                xnu_version = strtoull (xnu_version_cstr, NULL, 0);
104                if (xnu_version >= 2020 && xnu_version != ULLONG_MAX)
105                {
106                    if (::HasAVX())
107                    {
108                        g_has_avx = eAVXPresent;
109                    }
110                }
111            }
112        }
113        DNBLogThreadedIf (LOG_THREAD, "CPUHasAVX(): g_has_avx = %i (err = %i, errno = %i, xnu_version = %llu)\n", g_has_avx, err, errno, xnu_version);
114    }
115
116    return (g_has_avx == eAVXPresent);
117}
118
119uint64_t
120DNBArchImplX86_64::GetPC(uint64_t failValue)
121{
122    // Get program counter
123    if (GetGPRState(false) == KERN_SUCCESS)
124        return m_state.context.gpr.__rip;
125    return failValue;
126}
127
128kern_return_t
129DNBArchImplX86_64::SetPC(uint64_t value)
130{
131    // Get program counter
132    kern_return_t err = GetGPRState(false);
133    if (err == KERN_SUCCESS)
134    {
135        m_state.context.gpr.__rip = value;
136        err = SetGPRState();
137    }
138    return err == KERN_SUCCESS;
139}
140
141uint64_t
142DNBArchImplX86_64::GetSP(uint64_t failValue)
143{
144    // Get stack pointer
145    if (GetGPRState(false) == KERN_SUCCESS)
146        return m_state.context.gpr.__rsp;
147    return failValue;
148}
149
150// Uncomment the value below to verify the values in the debugger.
151//#define DEBUG_GPR_VALUES 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
152
153kern_return_t
154DNBArchImplX86_64::GetGPRState(bool force)
155{
156    if (force || m_state.GetError(e_regSetGPR, Read))
157    {
158        kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
159        DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (GetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
160
161#if DEBUG_GPR_VALUES
162        m_state.context.gpr.__rax = ('a' << 8) + 'x';
163        m_state.context.gpr.__rbx = ('b' << 8) + 'x';
164        m_state.context.gpr.__rcx = ('c' << 8) + 'x';
165        m_state.context.gpr.__rdx = ('d' << 8) + 'x';
166        m_state.context.gpr.__rdi = ('d' << 8) + 'i';
167        m_state.context.gpr.__rsi = ('s' << 8) + 'i';
168        m_state.context.gpr.__rbp = ('b' << 8) + 'p';
169        m_state.context.gpr.__rsp = ('s' << 8) + 'p';
170        m_state.context.gpr.__r8  = ('r' << 8) + '8';
171        m_state.context.gpr.__r9  = ('r' << 8) + '9';
172        m_state.context.gpr.__r10 = ('r' << 8) + 'a';
173        m_state.context.gpr.__r11 = ('r' << 8) + 'b';
174        m_state.context.gpr.__r12 = ('r' << 8) + 'c';
175        m_state.context.gpr.__r13 = ('r' << 8) + 'd';
176        m_state.context.gpr.__r14 = ('r' << 8) + 'e';
177        m_state.context.gpr.__r15 = ('r' << 8) + 'f';
178        m_state.context.gpr.__rip = ('i' << 8) + 'p';
179        m_state.context.gpr.__rflags = ('f' << 8) + 'l';
180        m_state.context.gpr.__cs = ('c' << 8) + 's';
181        m_state.context.gpr.__fs = ('f' << 8) + 's';
182        m_state.context.gpr.__gs = ('g' << 8) + 's';
183        m_state.SetError(e_regSetGPR, Read, 0);
184#else
185        mach_msg_type_number_t count = e_regSetWordSizeGPR;
186        m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
187        DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
188                          "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
189                          "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
190                          "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
191                          "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
192                          "\n\trip = %16.16llx"
193                          "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
194                          m_thread->ThreadID(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT,
195                          m_state.GetError(e_regSetGPR, Read),
196                          m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
197                          m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
198                          m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
199                          m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
200                          m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
201                          m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
202                          m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs);
203
204        //      DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
205        //                        "\n\trax = %16.16llx"
206        //                        "\n\trbx = %16.16llx"
207        //                        "\n\trcx = %16.16llx"
208        //                        "\n\trdx = %16.16llx"
209        //                        "\n\trdi = %16.16llx"
210        //                        "\n\trsi = %16.16llx"
211        //                        "\n\trbp = %16.16llx"
212        //                        "\n\trsp = %16.16llx"
213        //                        "\n\t r8 = %16.16llx"
214        //                        "\n\t r9 = %16.16llx"
215        //                        "\n\tr10 = %16.16llx"
216        //                        "\n\tr11 = %16.16llx"
217        //                        "\n\tr12 = %16.16llx"
218        //                        "\n\tr13 = %16.16llx"
219        //                        "\n\tr14 = %16.16llx"
220        //                        "\n\tr15 = %16.16llx"
221        //                        "\n\trip = %16.16llx"
222        //                        "\n\tflg = %16.16llx"
223        //                        "\n\t cs = %16.16llx"
224        //                        "\n\t fs = %16.16llx"
225        //                        "\n\t gs = %16.16llx",
226        //                        m_thread->ThreadID(),
227        //                        x86_THREAD_STATE64,
228        //                        x86_THREAD_STATE64_COUNT,
229        //                        m_state.GetError(e_regSetGPR, Read),
230        //                        m_state.context.gpr.__rax,
231        //                        m_state.context.gpr.__rbx,
232        //                        m_state.context.gpr.__rcx,
233        //                        m_state.context.gpr.__rdx,
234        //                        m_state.context.gpr.__rdi,
235        //                        m_state.context.gpr.__rsi,
236        //                        m_state.context.gpr.__rbp,
237        //                        m_state.context.gpr.__rsp,
238        //                        m_state.context.gpr.__r8,
239        //                        m_state.context.gpr.__r9,
240        //                        m_state.context.gpr.__r10,
241        //                        m_state.context.gpr.__r11,
242        //                        m_state.context.gpr.__r12,
243        //                        m_state.context.gpr.__r13,
244        //                        m_state.context.gpr.__r14,
245        //                        m_state.context.gpr.__r15,
246        //                        m_state.context.gpr.__rip,
247        //                        m_state.context.gpr.__rflags,
248        //                        m_state.context.gpr.__cs,
249        //                        m_state.context.gpr.__fs,
250        //                        m_state.context.gpr.__gs);
251#endif
252    }
253    return m_state.GetError(e_regSetGPR, Read);
254}
255
256// Uncomment the value below to verify the values in the debugger.
257//#define DEBUG_FPU_REGS 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
258
259kern_return_t
260DNBArchImplX86_64::GetFPUState(bool force)
261{
262    if (force || m_state.GetError(e_regSetFPU, Read))
263    {
264        if (DEBUG_FPU_REGS) {
265            if (CPUHasAVX() || FORCE_AVX_REGS)
266            {
267                m_state.context.fpu.avx.__fpu_reserved[0] = -1;
268                m_state.context.fpu.avx.__fpu_reserved[1] = -1;
269                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
270                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
271                m_state.context.fpu.avx.__fpu_ftw = 1;
272                m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
273                m_state.context.fpu.avx.__fpu_fop = 2;
274                m_state.context.fpu.avx.__fpu_ip = 3;
275                m_state.context.fpu.avx.__fpu_cs = 4;
276                m_state.context.fpu.avx.__fpu_rsrv2 = 5;
277                m_state.context.fpu.avx.__fpu_dp = 6;
278                m_state.context.fpu.avx.__fpu_ds = 7;
279                m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
280                m_state.context.fpu.avx.__fpu_mxcsr = 8;
281                m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
282                int i;
283                for (i=0; i<16; ++i)
284                {
285                    if (i<10)
286                    {
287                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
288                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
289                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
290                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
291                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
292                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
293                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
294                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
295                    }
296                    else
297                    {
298                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
299                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
300                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
301                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
302                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
303                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
304                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
305                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
306                    }
307
308                    m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
309                    m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
310                    m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
311                    m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
312                    m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
313                    m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
314                    m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
315                    m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
316                    m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8';
317                    m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9';
318                    m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A';
319                    m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B';
320                    m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C';
321                    m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D';
322                    m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E';
323                    m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F';
324
325                    m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
326                    m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
327                    m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
328                    m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
329                    m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
330                    m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
331                    m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
332                    m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
333                    m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8';
334                    m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9';
335                    m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A';
336                    m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B';
337                    m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C';
338                    m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D';
339                    m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E';
340                    m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F';
341                }
342                for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
343                    m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
344                m_state.context.fpu.avx.__fpu_reserved1 = -1;
345                for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
346                    m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
347                m_state.SetError(e_regSetFPU, Read, 0);
348            }
349            else
350            {
351                m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
352                m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
353                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
354                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
355                m_state.context.fpu.no_avx.__fpu_ftw = 1;
356                m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
357                m_state.context.fpu.no_avx.__fpu_fop = 2;
358                m_state.context.fpu.no_avx.__fpu_ip = 3;
359                m_state.context.fpu.no_avx.__fpu_cs = 4;
360                m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
361                m_state.context.fpu.no_avx.__fpu_dp = 6;
362                m_state.context.fpu.no_avx.__fpu_ds = 7;
363                m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
364                m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
365                m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
366                int i;
367                for (i=0; i<16; ++i)
368                {
369                    if (i<10)
370                    {
371                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
372                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
373                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
374                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
375                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
376                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
377                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
378                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
379                    }
380                    else
381                    {
382                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
383                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
384                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
385                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
386                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
387                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
388                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
389                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
390                    }
391
392                    m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
393                    m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
394                    m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
395                    m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
396                    m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
397                    m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
398                    m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
399                    m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
400                    m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
401                    m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
402                    m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
403                    m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
404                    m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
405                    m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
406                    m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
407                    m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
408                }
409                for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
410                    m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
411                m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
412                m_state.SetError(e_regSetFPU, Read, 0);
413            }
414        }
415        else
416        {
417            if (CPUHasAVX() || FORCE_AVX_REGS)
418            {
419                mach_msg_type_number_t count = e_regSetWordSizeAVX;
420                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
421                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in) carp) => 0x%8.8x",
422                                  m_thread->ThreadID(), __x86_64_AVX_STATE, (uint32_t)count,
423                                  e_regSetWordSizeAVX, m_state.GetError(e_regSetFPU, Read));
424            }
425            else
426            {
427                mach_msg_type_number_t count = e_regSetWordSizeFPU;
428                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
429                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x",
430                                  m_thread->ThreadID(), __x86_64_FLOAT_STATE, (uint32_t)count,
431                                  e_regSetWordSizeFPU, m_state.GetError(e_regSetFPU, Read));
432            }
433        }
434    }
435    return m_state.GetError(e_regSetFPU, Read);
436}
437
438kern_return_t
439DNBArchImplX86_64::GetEXCState(bool force)
440{
441    if (force || m_state.GetError(e_regSetEXC, Read))
442    {
443        mach_msg_type_number_t count = e_regSetWordSizeEXC;
444        m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
445    }
446    return m_state.GetError(e_regSetEXC, Read);
447}
448
449kern_return_t
450DNBArchImplX86_64::SetGPRState()
451{
452    kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
453    DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
454
455    m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
456    DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
457                      "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
458                      "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
459                      "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
460                      "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
461                      "\n\trip = %16.16llx"
462                      "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
463                      m_thread->ThreadID(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR,
464                      m_state.GetError(e_regSetGPR, Write),
465                      m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
466                      m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
467                      m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
468                      m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
469                      m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
470                      m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
471                      m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs);
472    return m_state.GetError(e_regSetGPR, Write);
473}
474
475kern_return_t
476DNBArchImplX86_64::SetFPUState()
477{
478    if (DEBUG_FPU_REGS)
479    {
480        m_state.SetError(e_regSetFPU, Write, 0);
481        return m_state.GetError(e_regSetFPU, Write);
482    }
483    else
484    {
485        if (CPUHasAVX() || FORCE_AVX_REGS)
486        {
487            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
488            return m_state.GetError(e_regSetFPU, Write);
489        }
490        else
491        {
492            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU));
493            return m_state.GetError(e_regSetFPU, Write);
494        }
495    }
496}
497
498kern_return_t
499DNBArchImplX86_64::SetEXCState()
500{
501    m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
502    return m_state.GetError(e_regSetEXC, Write);
503}
504
505kern_return_t
506DNBArchImplX86_64::GetDBGState(bool force)
507{
508    if (force || m_state.GetError(e_regSetDBG, Read))
509    {
510        mach_msg_type_number_t count = e_regSetWordSizeDBG;
511        m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
512    }
513    return m_state.GetError(e_regSetDBG, Read);
514}
515
516kern_return_t
517DNBArchImplX86_64::SetDBGState()
518{
519    m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
520    return m_state.GetError(e_regSetDBG, Write);
521}
522
523void
524DNBArchImplX86_64::ThreadWillResume()
525{
526    // Do we need to step this thread? If so, let the mach thread tell us so.
527    if (m_thread->IsStepping())
528    {
529        // This is the primary thread, let the arch do anything it needs
530        EnableHardwareSingleStep(true);
531    }
532
533    // Reset the debug status register, if necessary, before we resume.
534    kern_return_t kret = GetDBGState(false);
535    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
536    if (kret != KERN_SUCCESS)
537        return;
538
539    DBG &debug_state = m_state.context.dbg;
540    bool need_reset = false;
541    uint32_t i, num = NumSupportedHardwareWatchpoints();
542    for (i = 0; i < num; ++i)
543        if (IsWatchpointHit(debug_state, i))
544            need_reset = true;
545
546    if (need_reset)
547    {
548        ClearWatchpointHits(debug_state);
549        kret = SetDBGState();
550        DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
551    }
552}
553
554bool
555DNBArchImplX86_64::ThreadDidStop()
556{
557    bool success = true;
558
559    m_state.InvalidateAllRegisterStates();
560
561    // Are we stepping a single instruction?
562    if (GetGPRState(true) == KERN_SUCCESS)
563    {
564        // We are single stepping, was this the primary thread?
565        if (m_thread->IsStepping())
566        {
567            // This was the primary thread, we need to clear the trace
568            // bit if so.
569            success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
570        }
571        else
572        {
573            // The MachThread will automatically restore the suspend count
574            // in ThreadDidStop(), so we don't need to do anything here if
575            // we weren't the primary thread the last time
576        }
577    }
578    return success;
579}
580
581bool
582DNBArchImplX86_64::NotifyException(MachException::Data& exc)
583{
584    switch (exc.exc_type)
585    {
586        case EXC_BAD_ACCESS:
587            break;
588        case EXC_BAD_INSTRUCTION:
589            break;
590        case EXC_ARITHMETIC:
591            break;
592        case EXC_EMULATION:
593            break;
594        case EXC_SOFTWARE:
595            break;
596        case EXC_BREAKPOINT:
597            if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
598            {
599                // exc_code = EXC_I386_BPT
600                //
601                nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
602                if (pc != INVALID_NUB_ADDRESS && pc > 0)
603                {
604                    pc -= 1;
605                    // Check for a breakpoint at one byte prior to the current PC value
606                    // since the PC will be just past the trap.
607
608                    nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc);
609                    if (NUB_BREAK_ID_IS_VALID(breakID))
610                    {
611                        // Backup the PC for i386 since the trap was taken and the PC
612                        // is at the address following the single byte trap instruction.
613                        if (m_state.context.gpr.__rip > 0)
614                        {
615                            m_state.context.gpr.__rip = pc;
616                            // Write the new PC back out
617                            SetGPRState ();
618                        }
619                    }
620                    return true;
621                }
622            }
623            else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1)
624            {
625                // exc_code = EXC_I386_SGL
626                //
627                // Check whether this corresponds to a watchpoint hit event.
628                // If yes, set the exc_sub_code to the data break address.
629                nub_addr_t addr = 0;
630                uint32_t hw_index = GetHardwareWatchpointHit(addr);
631                if (hw_index != INVALID_NUB_HW_INDEX)
632                {
633                    exc.exc_data[1] = addr;
634                    // Piggyback the hw_index in the exc.data.
635                    exc.exc_data.push_back(hw_index);
636                }
637
638                return true;
639            }
640            break;
641        case EXC_SYSCALL:
642            break;
643        case EXC_MACH_SYSCALL:
644            break;
645        case EXC_RPC_ALERT:
646            break;
647    }
648    return false;
649}
650
651uint32_t
652DNBArchImplX86_64::NumSupportedHardwareWatchpoints()
653{
654    // Available debug address registers: dr0, dr1, dr2, dr3.
655    return 4;
656}
657
658static uint32_t
659size_and_rw_bits(nub_size_t size, bool read, bool write)
660{
661    uint32_t rw;
662    if (read) {
663        rw = 0x3; // READ or READ/WRITE
664    } else if (write) {
665        rw = 0x1; // WRITE
666    } else {
667        assert(0 && "read and write cannot both be false");
668    }
669
670    switch (size) {
671    case 1:
672        return rw;
673    case 2:
674        return (0x1 << 2) | rw;
675    case 4:
676        return (0x3 << 2) | rw;
677    case 8:
678        return (0x2 << 2) | rw;
679    default:
680        assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
681    }
682}
683void
684DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
685{
686    // Set both dr7 (debug control register) and dri (debug address register).
687
688    // dr7{7-0} encodes the local/gloabl enable bits:
689    //  global enable --. .-- local enable
690    //                  | |
691    //                  v v
692    //      dr0 -> bits{1-0}
693    //      dr1 -> bits{3-2}
694    //      dr2 -> bits{5-4}
695    //      dr3 -> bits{7-6}
696    //
697    // dr7{31-16} encodes the rw/len bits:
698    //  b_x+3, b_x+2, b_x+1, b_x
699    //      where bits{x+1, x} => rw
700    //            0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
701    //      and bits{x+3, x+2} => len
702    //            0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
703    //
704    //      dr0 -> bits{19-16}
705    //      dr1 -> bits{23-20}
706    //      dr2 -> bits{27-24}
707    //      dr3 -> bits{31-28}
708    debug_state.__dr7 |= (1 << (2*hw_index) |
709                          size_and_rw_bits(size, read, write) << (16+4*hw_index));
710    switch (hw_index) {
711    case 0:
712        debug_state.__dr0 = addr; break;
713    case 1:
714        debug_state.__dr1 = addr; break;
715    case 2:
716        debug_state.__dr2 = addr; break;
717    case 3:
718        debug_state.__dr3 = addr; break;
719    default:
720        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
721    }
722    return;
723}
724
725void
726DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
727{
728    debug_state.__dr7 &= ~(3 << (2*hw_index));
729    switch (hw_index) {
730    case 0:
731        debug_state.__dr0 = 0; break;
732    case 1:
733        debug_state.__dr1 = 0; break;
734    case 2:
735        debug_state.__dr2 = 0; break;
736    case 3:
737        debug_state.__dr3 = 0; break;
738    default:
739        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
740    }
741    return;
742}
743
744bool
745DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
746{
747    // Check dr7 (debug control register) for local/global enable bits:
748    //  global enable --. .-- local enable
749    //                  | |
750    //                  v v
751    //      dr0 -> bits{1-0}
752    //      dr1 -> bits{3-2}
753    //      dr2 -> bits{5-4}
754    //      dr3 -> bits{7-6}
755    return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
756}
757
758// Resets local copy of debug status register to wait for the next debug excpetion.
759void
760DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state)
761{
762    // See also IsWatchpointHit().
763    debug_state.__dr6 = 0;
764    return;
765}
766
767bool
768DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
769{
770    // Check dr6 (debug status register) whether a watchpoint hits:
771    //          is watchpoint hit?
772    //                  |
773    //                  v
774    //      dr0 -> bits{0}
775    //      dr1 -> bits{1}
776    //      dr2 -> bits{2}
777    //      dr3 -> bits{3}
778    return (debug_state.__dr6 & (1 << hw_index));
779}
780
781nub_addr_t
782DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
783{
784    switch (hw_index) {
785    case 0:
786        return debug_state.__dr0;
787    case 1:
788        return debug_state.__dr1;
789    case 2:
790        return debug_state.__dr2;
791    case 3:
792        return debug_state.__dr3;
793    default:
794        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
795    }
796}
797
798uint32_t
799DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write)
800{
801    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = 0x%llx, size = %zu, read = %u, write = %u)", (uint64_t)addr, size, read, write);
802
803    const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
804
805    // Can only watch 1, 2, 4, or 8 bytes.
806    if (!(size == 1 || size == 2 || size == 4 || size == 8))
807        return INVALID_NUB_HW_INDEX;
808
809    // We must watch for either read or write
810    if (read == false && write == false)
811        return INVALID_NUB_HW_INDEX;
812
813    // Read the debug state
814    kern_return_t kret = GetDBGState(false);
815
816    if (kret == KERN_SUCCESS)
817    {
818        // Check to make sure we have the needed hardware support
819        uint32_t i = 0;
820
821        DBG &debug_state = m_state.context.dbg;
822        DBG dsCheckPoint = m_state.context.dbg;
823        for (i = 0; i < num_hw_watchpoints; ++i)
824        {
825            if (IsWatchpointVacant(debug_state, i))
826                break;
827        }
828
829        // See if we found an available hw breakpoint slot above
830        if (i < num_hw_watchpoints)
831        {
832            // Modify our local copy of the debug state, first.
833            SetWatchpoint(debug_state, i, addr, size, read, write);
834            // Now set the watch point in the inferior.
835            kret = SetDBGState();
836            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
837
838            if (kret == KERN_SUCCESS)
839                return i;
840            else // Recovery block.
841                m_state.context.dbg = dsCheckPoint;
842        }
843        else
844        {
845            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
846        }
847    }
848    return INVALID_NUB_HW_INDEX;
849}
850
851bool
852DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index)
853{
854    kern_return_t kret = GetDBGState(false);
855
856    const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
857    if (kret == KERN_SUCCESS)
858    {
859        DBG &debug_state = m_state.context.dbg;
860        DBG dsCheckPoint = m_state.context.dbg;
861        if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
862        {
863            // Modify our local copy of the debug state, first.
864            ClearWatchpoint(debug_state, hw_index);
865            // Now disable the watch point in the inferior.
866            kret = SetDBGState();
867            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
868                             hw_index);
869
870            if (kret == KERN_SUCCESS)
871                return true;
872            else // Recovery block.
873                m_state.context.dbg = dsCheckPoint;
874        }
875    }
876    return false;
877}
878
879DNBArchImplX86_64::DBG DNBArchImplX86_64::Global_Debug_State = {0,0,0,0,0,0,0,0};
880bool DNBArchImplX86_64::Valid_Global_Debug_State = false;
881
882// Use this callback from MachThread, which in turn was called from MachThreadList, to update
883// the global view of the hardware watchpoint state, so that when new thread comes along, they
884// get to inherit the existing hardware watchpoint state.
885void
886DNBArchImplX86_64::HardwareWatchpointStateChanged ()
887{
888    Global_Debug_State = m_state.context.dbg;
889    Valid_Global_Debug_State = true;
890}
891
892// Iterate through the debug status register; return the index of the first hit.
893uint32_t
894DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr)
895{
896    // Read the debug state
897    kern_return_t kret = GetDBGState(true);
898    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
899    if (kret == KERN_SUCCESS)
900    {
901        DBG &debug_state = m_state.context.dbg;
902        uint32_t i, num = NumSupportedHardwareWatchpoints();
903        for (i = 0; i < num; ++i)
904        {
905            if (IsWatchpointHit(debug_state, i))
906            {
907                addr = GetWatchAddress(debug_state, i);
908                DNBLogThreadedIf(LOG_WATCHPOINTS,
909                                 "DNBArchImplX86_64::GetHardwareWatchpointHit() found => %u (addr = 0x%llx).",
910                                 i,
911                                 (uint64_t)addr);
912                return i;
913            }
914        }
915    }
916    return INVALID_NUB_HW_INDEX;
917}
918
919// Set the single step bit in the processor status register.
920kern_return_t
921DNBArchImplX86_64::EnableHardwareSingleStep (bool enable)
922{
923    if (GetGPRState(false) == KERN_SUCCESS)
924    {
925        const uint32_t trace_bit = 0x100u;
926        if (enable)
927            m_state.context.gpr.__rflags |= trace_bit;
928        else
929            m_state.context.gpr.__rflags &= ~trace_bit;
930        return SetGPRState();
931    }
932    return m_state.GetError(e_regSetGPR, Read);
933}
934
935
936//----------------------------------------------------------------------
937// Register information defintions
938//----------------------------------------------------------------------
939
940enum
941{
942    gpr_rax = 0,
943    gpr_rbx,
944    gpr_rcx,
945    gpr_rdx,
946    gpr_rdi,
947    gpr_rsi,
948    gpr_rbp,
949    gpr_rsp,
950    gpr_r8,
951    gpr_r9,
952    gpr_r10,
953    gpr_r11,
954    gpr_r12,
955    gpr_r13,
956    gpr_r14,
957    gpr_r15,
958    gpr_rip,
959    gpr_rflags,
960    gpr_cs,
961    gpr_fs,
962    gpr_gs,
963    k_num_gpr_regs
964};
965
966enum {
967    fpu_fcw,
968    fpu_fsw,
969    fpu_ftw,
970    fpu_fop,
971    fpu_ip,
972    fpu_cs,
973    fpu_dp,
974    fpu_ds,
975    fpu_mxcsr,
976    fpu_mxcsrmask,
977    fpu_stmm0,
978    fpu_stmm1,
979    fpu_stmm2,
980    fpu_stmm3,
981    fpu_stmm4,
982    fpu_stmm5,
983    fpu_stmm6,
984    fpu_stmm7,
985    fpu_xmm0,
986    fpu_xmm1,
987    fpu_xmm2,
988    fpu_xmm3,
989    fpu_xmm4,
990    fpu_xmm5,
991    fpu_xmm6,
992    fpu_xmm7,
993    fpu_xmm8,
994    fpu_xmm9,
995    fpu_xmm10,
996    fpu_xmm11,
997    fpu_xmm12,
998    fpu_xmm13,
999    fpu_xmm14,
1000    fpu_xmm15,
1001    fpu_ymm0,
1002    fpu_ymm1,
1003    fpu_ymm2,
1004    fpu_ymm3,
1005    fpu_ymm4,
1006    fpu_ymm5,
1007    fpu_ymm6,
1008    fpu_ymm7,
1009    fpu_ymm8,
1010    fpu_ymm9,
1011    fpu_ymm10,
1012    fpu_ymm11,
1013    fpu_ymm12,
1014    fpu_ymm13,
1015    fpu_ymm14,
1016    fpu_ymm15,
1017    k_num_fpu_regs,
1018
1019    // Aliases
1020    fpu_fctrl = fpu_fcw,
1021    fpu_fstat = fpu_fsw,
1022    fpu_ftag  = fpu_ftw,
1023    fpu_fiseg = fpu_cs,
1024    fpu_fioff = fpu_ip,
1025    fpu_foseg = fpu_ds,
1026    fpu_fooff = fpu_dp
1027};
1028
1029enum {
1030    exc_trapno,
1031    exc_err,
1032    exc_faultvaddr,
1033    k_num_exc_regs,
1034};
1035
1036
1037enum gcc_dwarf_regnums
1038{
1039    gcc_dwarf_rax = 0,
1040    gcc_dwarf_rdx = 1,
1041    gcc_dwarf_rcx = 2,
1042    gcc_dwarf_rbx = 3,
1043    gcc_dwarf_rsi = 4,
1044    gcc_dwarf_rdi = 5,
1045    gcc_dwarf_rbp = 6,
1046    gcc_dwarf_rsp = 7,
1047    gcc_dwarf_r8,
1048    gcc_dwarf_r9,
1049    gcc_dwarf_r10,
1050    gcc_dwarf_r11,
1051    gcc_dwarf_r12,
1052    gcc_dwarf_r13,
1053    gcc_dwarf_r14,
1054    gcc_dwarf_r15,
1055    gcc_dwarf_rip,
1056    gcc_dwarf_xmm0,
1057    gcc_dwarf_xmm1,
1058    gcc_dwarf_xmm2,
1059    gcc_dwarf_xmm3,
1060    gcc_dwarf_xmm4,
1061    gcc_dwarf_xmm5,
1062    gcc_dwarf_xmm6,
1063    gcc_dwarf_xmm7,
1064    gcc_dwarf_xmm8,
1065    gcc_dwarf_xmm9,
1066    gcc_dwarf_xmm10,
1067    gcc_dwarf_xmm11,
1068    gcc_dwarf_xmm12,
1069    gcc_dwarf_xmm13,
1070    gcc_dwarf_xmm14,
1071    gcc_dwarf_xmm15,
1072    gcc_dwarf_stmm0,
1073    gcc_dwarf_stmm1,
1074    gcc_dwarf_stmm2,
1075    gcc_dwarf_stmm3,
1076    gcc_dwarf_stmm4,
1077    gcc_dwarf_stmm5,
1078    gcc_dwarf_stmm6,
1079    gcc_dwarf_stmm7,
1080    gcc_dwarf_ymm0 = gcc_dwarf_xmm0,
1081    gcc_dwarf_ymm1 = gcc_dwarf_xmm1,
1082    gcc_dwarf_ymm2 = gcc_dwarf_xmm2,
1083    gcc_dwarf_ymm3 = gcc_dwarf_xmm3,
1084    gcc_dwarf_ymm4 = gcc_dwarf_xmm4,
1085    gcc_dwarf_ymm5 = gcc_dwarf_xmm5,
1086    gcc_dwarf_ymm6 = gcc_dwarf_xmm6,
1087    gcc_dwarf_ymm7 = gcc_dwarf_xmm7,
1088    gcc_dwarf_ymm8 = gcc_dwarf_xmm8,
1089    gcc_dwarf_ymm9 = gcc_dwarf_xmm9,
1090    gcc_dwarf_ymm10 = gcc_dwarf_xmm10,
1091    gcc_dwarf_ymm11 = gcc_dwarf_xmm11,
1092    gcc_dwarf_ymm12 = gcc_dwarf_xmm12,
1093    gcc_dwarf_ymm13 = gcc_dwarf_xmm13,
1094    gcc_dwarf_ymm14 = gcc_dwarf_xmm14,
1095    gcc_dwarf_ymm15 = gcc_dwarf_xmm15
1096};
1097
1098enum gdb_regnums
1099{
1100    gdb_rax     =   0,
1101    gdb_rbx     =   1,
1102    gdb_rcx     =   2,
1103    gdb_rdx     =   3,
1104    gdb_rsi     =   4,
1105    gdb_rdi     =   5,
1106    gdb_rbp     =   6,
1107    gdb_rsp     =   7,
1108    gdb_r8      =   8,
1109    gdb_r9      =   9,
1110    gdb_r10     =  10,
1111    gdb_r11     =  11,
1112    gdb_r12     =  12,
1113    gdb_r13     =  13,
1114    gdb_r14     =  14,
1115    gdb_r15     =  15,
1116    gdb_rip     =  16,
1117    gdb_rflags  =  17,
1118    gdb_cs      =  18,
1119    gdb_ss      =  19,
1120    gdb_ds      =  20,
1121    gdb_es      =  21,
1122    gdb_fs      =  22,
1123    gdb_gs      =  23,
1124    gdb_stmm0   =  24,
1125    gdb_stmm1   =  25,
1126    gdb_stmm2   =  26,
1127    gdb_stmm3   =  27,
1128    gdb_stmm4   =  28,
1129    gdb_stmm5   =  29,
1130    gdb_stmm6   =  30,
1131    gdb_stmm7   =  31,
1132    gdb_fctrl   =  32,  gdb_fcw = gdb_fctrl,
1133    gdb_fstat   =  33,  gdb_fsw = gdb_fstat,
1134    gdb_ftag    =  34,  gdb_ftw = gdb_ftag,
1135    gdb_fiseg   =  35,  gdb_fpu_cs  = gdb_fiseg,
1136    gdb_fioff   =  36,  gdb_ip  = gdb_fioff,
1137    gdb_foseg   =  37,  gdb_fpu_ds  = gdb_foseg,
1138    gdb_fooff   =  38,  gdb_dp  = gdb_fooff,
1139    gdb_fop     =  39,
1140    gdb_xmm0    =  40,
1141    gdb_xmm1    =  41,
1142    gdb_xmm2    =  42,
1143    gdb_xmm3    =  43,
1144    gdb_xmm4    =  44,
1145    gdb_xmm5    =  45,
1146    gdb_xmm6    =  46,
1147    gdb_xmm7    =  47,
1148    gdb_xmm8    =  48,
1149    gdb_xmm9    =  49,
1150    gdb_xmm10   =  50,
1151    gdb_xmm11   =  51,
1152    gdb_xmm12   =  52,
1153    gdb_xmm13   =  53,
1154    gdb_xmm14   =  54,
1155    gdb_xmm15   =  55,
1156    gdb_mxcsr   =  56,
1157    gdb_ymm0    =  gdb_xmm0,
1158    gdb_ymm1    =  gdb_xmm1,
1159    gdb_ymm2    =  gdb_xmm2,
1160    gdb_ymm3    =  gdb_xmm3,
1161    gdb_ymm4    =  gdb_xmm4,
1162    gdb_ymm5    =  gdb_xmm5,
1163    gdb_ymm6    =  gdb_xmm6,
1164    gdb_ymm7    =  gdb_xmm7,
1165    gdb_ymm8    =  gdb_xmm8,
1166    gdb_ymm9    =  gdb_xmm9,
1167    gdb_ymm10   =  gdb_xmm10,
1168    gdb_ymm11   =  gdb_xmm11,
1169    gdb_ymm12   =  gdb_xmm12,
1170    gdb_ymm13   =  gdb_xmm13,
1171    gdb_ymm14   =  gdb_xmm14,
1172    gdb_ymm15   =  gdb_xmm15
1173};
1174
1175#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg))
1176#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx))
1177#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx))
1178#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg)     + offsetof (DNBArchImplX86_64::Context, exc))
1179
1180// This does not accurately identify the location of ymm0...7 in
1181// Context.fpu.avx.  That is because there is a bunch of padding
1182// in Context.fpu.avx that we don't need.  Offset macros lay out
1183// the register state that Debugserver transmits to the debugger
1184// -- not to interpret the thread_get_state info.
1185#define AVX_OFFSET_YMM(n)   (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
1186
1187#define GPR_SIZE(reg)       (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1188#define FPU_SIZE_UINT(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1189#define FPU_SIZE_MMST(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1190#define FPU_SIZE_XMM(reg)   (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1191#define FPU_SIZE_YMM(reg)   (32)
1192#define EXC_SIZE(reg)       (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1193
1194// These macros will auto define the register name, alt name, register size,
1195// register offset, encoding, format and native register. This ensures that
1196// the register state structures are defined correctly and have the correct
1197// sizes and offsets.
1198#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg }
1199#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg }
1200#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg }
1201#define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, gdb_##reg }
1202
1203// General purpose registers for 64 bit
1204const DNBRegisterInfo
1205DNBArchImplX86_64::g_gpr_registers[] =
1206{
1207    DEFINE_GPR      (rax),
1208    DEFINE_GPR      (rbx),
1209    DEFINE_GPR_ALT  (rcx , "arg4", GENERIC_REGNUM_ARG4),
1210    DEFINE_GPR_ALT  (rdx , "arg3", GENERIC_REGNUM_ARG3),
1211    DEFINE_GPR_ALT  (rdi , "arg1", GENERIC_REGNUM_ARG1),
1212    DEFINE_GPR_ALT  (rsi , "arg2", GENERIC_REGNUM_ARG2),
1213    DEFINE_GPR_ALT  (rbp , "fp"  , GENERIC_REGNUM_FP),
1214    DEFINE_GPR_ALT  (rsp , "sp"  , GENERIC_REGNUM_SP),
1215    DEFINE_GPR_ALT  (r8  , "arg5", GENERIC_REGNUM_ARG5),
1216    DEFINE_GPR_ALT  (r9  , "arg6", GENERIC_REGNUM_ARG6),
1217    DEFINE_GPR      (r10),
1218    DEFINE_GPR      (r11),
1219    DEFINE_GPR      (r12),
1220    DEFINE_GPR      (r13),
1221    DEFINE_GPR      (r14),
1222    DEFINE_GPR      (r15),
1223    DEFINE_GPR_ALT  (rip , "pc", GENERIC_REGNUM_PC),
1224    DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS),
1225    DEFINE_GPR_ALT2 (cs,        NULL),
1226    DEFINE_GPR_ALT2 (fs,        NULL),
1227    DEFINE_GPR_ALT2 (gs,        NULL),
1228};
1229
1230// Floating point registers 64 bit
1231const DNBRegisterInfo
1232DNBArchImplX86_64::g_fpu_registers_no_avx[] =
1233{
1234    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , FPU_OFFSET(fcw)       , -1U, -1U, -1U, -1U },
1235    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , FPU_OFFSET(fsw)       , -1U, -1U, -1U, -1U },
1236    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , FPU_OFFSET(ftw)       , -1U, -1U, -1U, -1U },
1237    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , FPU_OFFSET(fop)       , -1U, -1U, -1U, -1U },
1238    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , FPU_OFFSET(ip)        , -1U, -1U, -1U, -1U },
1239    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , FPU_OFFSET(cs)        , -1U, -1U, -1U, -1U },
1240    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , FPU_OFFSET(dp)        , -1U, -1U, -1U, -1U },
1241    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , FPU_OFFSET(ds)        , -1U, -1U, -1U, -1U },
1242    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , FPU_OFFSET(mxcsr)     , -1U, -1U, -1U, -1U },
1243    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U },
1244
1245    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1U, gdb_stmm0 },
1246    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1U, gdb_stmm1 },
1247    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1U, gdb_stmm2 },
1248    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1U, gdb_stmm3 },
1249    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1U, gdb_stmm4 },
1250    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1U, gdb_stmm5 },
1251    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1U, gdb_stmm6 },
1252    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1U, gdb_stmm7 },
1253
1254    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1U, gdb_xmm0 },
1255    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1U, gdb_xmm1 },
1256    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1U, gdb_xmm2 },
1257    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1U, gdb_xmm3 },
1258    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1U, gdb_xmm4 },
1259    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1U, gdb_xmm5 },
1260    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1U, gdb_xmm6 },
1261    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1U, gdb_xmm7 },
1262    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1U, gdb_xmm8  },
1263    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1U, gdb_xmm9  },
1264    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1U, gdb_xmm10 },
1265    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1U, gdb_xmm11 },
1266    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1U, gdb_xmm12 },
1267    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1U, gdb_xmm13 },
1268    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1U, gdb_xmm14 },
1269    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1U, gdb_xmm15 },
1270};
1271
1272const DNBRegisterInfo
1273DNBArchImplX86_64::g_fpu_registers_avx[] =
1274{
1275    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , AVX_OFFSET(fcw)       , -1U, -1U, -1U, -1U },
1276    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , AVX_OFFSET(fsw)       , -1U, -1U, -1U, -1U },
1277    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , AVX_OFFSET(ftw)       , -1U, -1U, -1U, -1U },
1278    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , AVX_OFFSET(fop)       , -1U, -1U, -1U, -1U },
1279    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , AVX_OFFSET(ip)        , -1U, -1U, -1U, -1U },
1280    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , AVX_OFFSET(cs)        , -1U, -1U, -1U, -1U },
1281    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , AVX_OFFSET(dp)        , -1U, -1U, -1U, -1U },
1282    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , AVX_OFFSET(ds)        , -1U, -1U, -1U, -1U },
1283    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , AVX_OFFSET(mxcsr)     , -1U, -1U, -1U, -1U },
1284    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U },
1285
1286    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1U, gdb_stmm0 },
1287    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1U, gdb_stmm1 },
1288    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1U, gdb_stmm2 },
1289    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1U, gdb_stmm3 },
1290    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1U, gdb_stmm4 },
1291    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1U, gdb_stmm5 },
1292    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1U, gdb_stmm6 },
1293    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1U, gdb_stmm7 },
1294
1295    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1U, gdb_xmm0 },
1296    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1U, gdb_xmm1 },
1297    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1U, gdb_xmm2 },
1298    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1U, gdb_xmm3 },
1299    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1U, gdb_xmm4 },
1300    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1U, gdb_xmm5 },
1301    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1U, gdb_xmm6 },
1302    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1U, gdb_xmm7 },
1303    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1U, gdb_xmm8  },
1304    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1U, gdb_xmm9  },
1305    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1U, gdb_xmm10 },
1306    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1U, gdb_xmm11 },
1307    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1U, gdb_xmm12 },
1308    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1U, gdb_xmm13 },
1309    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1U, gdb_xmm14 },
1310    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1U, gdb_xmm15 },
1311
1312    { e_regSetFPU, fpu_ymm0 , "ymm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0)   , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1U, gdb_ymm0 },
1313    { e_regSetFPU, fpu_ymm1 , "ymm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1)   , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1U, gdb_ymm1 },
1314    { e_regSetFPU, fpu_ymm2 , "ymm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2)   , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1U, gdb_ymm2 },
1315    { e_regSetFPU, fpu_ymm3 , "ymm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3)   , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1U, gdb_ymm3 },
1316    { e_regSetFPU, fpu_ymm4 , "ymm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4)   , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1U, gdb_ymm4 },
1317    { e_regSetFPU, fpu_ymm5 , "ymm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5)   , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1U, gdb_ymm5 },
1318    { e_regSetFPU, fpu_ymm6 , "ymm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6)   , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1U, gdb_ymm6 },
1319    { e_regSetFPU, fpu_ymm7 , "ymm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7)   , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1U, gdb_ymm7 },
1320    { e_regSetFPU, fpu_ymm8 , "ymm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8)   , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1U, gdb_ymm8  },
1321    { e_regSetFPU, fpu_ymm9 , "ymm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9)   , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1U, gdb_ymm9  },
1322    { e_regSetFPU, fpu_ymm10, "ymm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10)  , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1U, gdb_ymm10 },
1323    { e_regSetFPU, fpu_ymm11, "ymm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11)  , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1U, gdb_ymm11 },
1324    { e_regSetFPU, fpu_ymm12, "ymm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12)  , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1U, gdb_ymm12 },
1325    { e_regSetFPU, fpu_ymm13, "ymm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13)  , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1U, gdb_ymm13 },
1326    { e_regSetFPU, fpu_ymm14, "ymm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14)  , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1U, gdb_ymm14 },
1327    { e_regSetFPU, fpu_ymm15, "ymm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15)  , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1U, gdb_ymm15 }
1328};
1329
1330// Exception registers
1331
1332const DNBRegisterInfo
1333DNBArchImplX86_64::g_exc_registers[] =
1334{
1335    { e_regSetEXC, exc_trapno,      "trapno"    , NULL, Uint, Hex, EXC_SIZE (trapno)    , EXC_OFFSET (trapno)       , -1U, -1U, -1U, -1U },
1336    { e_regSetEXC, exc_err,         "err"       , NULL, Uint, Hex, EXC_SIZE (err)       , EXC_OFFSET (err)          , -1U, -1U, -1U, -1U },
1337    { e_regSetEXC, exc_faultvaddr,  "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr)   , -1U, -1U, -1U, -1U }
1338};
1339
1340// Number of registers in each register set
1341const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
1342const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
1343const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
1344const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
1345const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
1346const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
1347
1348//----------------------------------------------------------------------
1349// Register set definitions. The first definitions at register set index
1350// of zero is for all registers, followed by other registers sets. The
1351// register information for the all register set need not be filled in.
1352//----------------------------------------------------------------------
1353const DNBRegisterSetInfo
1354DNBArchImplX86_64::g_reg_sets_no_avx[] =
1355{
1356    { "x86_64 Registers",           NULL,               k_num_all_registers_no_avx },
1357    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1358    { "Floating Point Registers",   g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
1359    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1360};
1361
1362const DNBRegisterSetInfo
1363DNBArchImplX86_64::g_reg_sets_avx[] =
1364{
1365    { "x86_64 Registers",           NULL,               k_num_all_registers_avx },
1366    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1367    { "Floating Point Registers",   g_fpu_registers_avx, k_num_fpu_registers_avx },
1368    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1369};
1370
1371// Total number of register sets for this architecture
1372const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo);
1373
1374
1375DNBArchProtocol *
1376DNBArchImplX86_64::Create (MachThread *thread)
1377{
1378    DNBArchImplX86_64 *obj = new DNBArchImplX86_64 (thread);
1379
1380    // When new thread comes along, it tries to inherit from the global debug state, if it is valid.
1381    if (Valid_Global_Debug_State)
1382    {
1383        obj->m_state.context.dbg = Global_Debug_State;
1384        kern_return_t kret = obj->SetDBGState();
1385        DNBLogThreadedIf(LOG_WATCHPOINTS,
1386                         "DNBArchImplX86_64::Create() Inherit and SetDBGState() => 0x%8.8x.", kret);
1387    }
1388    return obj;
1389}
1390
1391const uint8_t * const
1392DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size)
1393{
1394    static const uint8_t g_breakpoint_opcode[] = { 0xCC };
1395    if (byte_size == 1)
1396        return g_breakpoint_opcode;
1397    return NULL;
1398}
1399
1400const DNBRegisterSetInfo *
1401DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
1402{
1403    *num_reg_sets = k_num_register_sets;
1404
1405    if (CPUHasAVX() || FORCE_AVX_REGS)
1406        return g_reg_sets_avx;
1407    else
1408        return g_reg_sets_no_avx;
1409}
1410
1411void
1412DNBArchImplX86_64::Initialize()
1413{
1414    DNBArchPluginInfo arch_plugin_info =
1415    {
1416        CPU_TYPE_X86_64,
1417        DNBArchImplX86_64::Create,
1418        DNBArchImplX86_64::GetRegisterSetInfo,
1419        DNBArchImplX86_64::SoftwareBreakpointOpcode
1420    };
1421
1422    // Register this arch plug-in with the main protocol class
1423    DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
1424}
1425
1426bool
1427DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value)
1428{
1429    if (set == REGISTER_SET_GENERIC)
1430    {
1431        switch (reg)
1432        {
1433            case GENERIC_REGNUM_PC:     // Program Counter
1434                set = e_regSetGPR;
1435                reg = gpr_rip;
1436                break;
1437
1438            case GENERIC_REGNUM_SP:     // Stack Pointer
1439                set = e_regSetGPR;
1440                reg = gpr_rsp;
1441                break;
1442
1443            case GENERIC_REGNUM_FP:     // Frame Pointer
1444                set = e_regSetGPR;
1445                reg = gpr_rbp;
1446                break;
1447
1448            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1449                set = e_regSetGPR;
1450                reg = gpr_rflags;
1451                break;
1452
1453            case GENERIC_REGNUM_RA:     // Return Address
1454            default:
1455                return false;
1456        }
1457    }
1458
1459    if (GetRegisterState(set, false) != KERN_SUCCESS)
1460        return false;
1461
1462    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1463    if (regInfo)
1464    {
1465        value->info = *regInfo;
1466        switch (set)
1467        {
1468            case e_regSetGPR:
1469                if (reg < k_num_gpr_registers)
1470                {
1471                    value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg];
1472                    return true;
1473                }
1474                break;
1475
1476            case e_regSetFPU:
1477                if (CPUHasAVX() || FORCE_AVX_REGS)
1478                {
1479                    switch (reg)
1480                    {
1481                    case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw));    return true;
1482                    case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw));    return true;
1483                    case fpu_ftw:       value->value.uint8  = m_state.context.fpu.avx.__fpu_ftw;                      return true;
1484                    case fpu_fop:       value->value.uint16 = m_state.context.fpu.avx.__fpu_fop;                      return true;
1485                    case fpu_ip:        value->value.uint32 = m_state.context.fpu.avx.__fpu_ip;                       return true;
1486                    case fpu_cs:        value->value.uint16 = m_state.context.fpu.avx.__fpu_cs;                       return true;
1487                    case fpu_dp:        value->value.uint32 = m_state.context.fpu.avx.__fpu_dp;                       return true;
1488                    case fpu_ds:        value->value.uint16 = m_state.context.fpu.avx.__fpu_ds;                       return true;
1489                    case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr;                    return true;
1490                    case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask;                return true;
1491
1492                    case fpu_stmm0:
1493                    case fpu_stmm1:
1494                    case fpu_stmm2:
1495                    case fpu_stmm3:
1496                    case fpu_stmm4:
1497                    case fpu_stmm5:
1498                    case fpu_stmm6:
1499                    case fpu_stmm7:
1500                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1501                        return true;
1502
1503                    case fpu_xmm0:
1504                    case fpu_xmm1:
1505                    case fpu_xmm2:
1506                    case fpu_xmm3:
1507                    case fpu_xmm4:
1508                    case fpu_xmm5:
1509                    case fpu_xmm6:
1510                    case fpu_xmm7:
1511                    case fpu_xmm8:
1512                    case fpu_xmm9:
1513                    case fpu_xmm10:
1514                    case fpu_xmm11:
1515                    case fpu_xmm12:
1516                    case fpu_xmm13:
1517                    case fpu_xmm14:
1518                    case fpu_xmm15:
1519                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1520                        return true;
1521
1522                    case fpu_ymm0:
1523                    case fpu_ymm1:
1524                    case fpu_ymm2:
1525                    case fpu_ymm3:
1526                    case fpu_ymm4:
1527                    case fpu_ymm5:
1528                    case fpu_ymm6:
1529                    case fpu_ymm7:
1530                    case fpu_ymm8:
1531                    case fpu_ymm9:
1532                    case fpu_ymm10:
1533                    case fpu_ymm11:
1534                    case fpu_ymm12:
1535                    case fpu_ymm13:
1536                    case fpu_ymm14:
1537                    case fpu_ymm15:
1538                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
1539                        memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
1540                        return true;
1541                    }
1542                }
1543                else
1544                {
1545                    switch (reg)
1546                    {
1547                        case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw));    return true;
1548                        case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw));    return true;
1549                        case fpu_ftw:       value->value.uint8  = m_state.context.fpu.no_avx.__fpu_ftw;                      return true;
1550                        case fpu_fop:       value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop;                      return true;
1551                        case fpu_ip:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip;                       return true;
1552                        case fpu_cs:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs;                       return true;
1553                        case fpu_dp:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp;                       return true;
1554                        case fpu_ds:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds;                       return true;
1555                        case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr;                    return true;
1556                        case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask;                return true;
1557
1558                        case fpu_stmm0:
1559                        case fpu_stmm1:
1560                        case fpu_stmm2:
1561                        case fpu_stmm3:
1562                        case fpu_stmm4:
1563                        case fpu_stmm5:
1564                        case fpu_stmm6:
1565                        case fpu_stmm7:
1566                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1567                            return true;
1568
1569                        case fpu_xmm0:
1570                        case fpu_xmm1:
1571                        case fpu_xmm2:
1572                        case fpu_xmm3:
1573                        case fpu_xmm4:
1574                        case fpu_xmm5:
1575                        case fpu_xmm6:
1576                        case fpu_xmm7:
1577                        case fpu_xmm8:
1578                        case fpu_xmm9:
1579                        case fpu_xmm10:
1580                        case fpu_xmm11:
1581                        case fpu_xmm12:
1582                        case fpu_xmm13:
1583                        case fpu_xmm14:
1584                        case fpu_xmm15:
1585                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1586                            return true;
1587                    }
1588                }
1589                break;
1590
1591            case e_regSetEXC:
1592                switch (reg)
1593                {
1594                case exc_trapno:    value->value.uint32 = m_state.context.exc.__trapno; return true;
1595                case exc_err:       value->value.uint32 = m_state.context.exc.__err; return true;
1596                case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true;
1597                }
1598                break;
1599        }
1600    }
1601    return false;
1602}
1603
1604
1605bool
1606DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value)
1607{
1608    if (set == REGISTER_SET_GENERIC)
1609    {
1610        switch (reg)
1611        {
1612            case GENERIC_REGNUM_PC:     // Program Counter
1613                set = e_regSetGPR;
1614                reg = gpr_rip;
1615                break;
1616
1617            case GENERIC_REGNUM_SP:     // Stack Pointer
1618                set = e_regSetGPR;
1619                reg = gpr_rsp;
1620                break;
1621
1622            case GENERIC_REGNUM_FP:     // Frame Pointer
1623                set = e_regSetGPR;
1624                reg = gpr_rbp;
1625                break;
1626
1627            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1628                set = e_regSetGPR;
1629                reg = gpr_rflags;
1630                break;
1631
1632            case GENERIC_REGNUM_RA:     // Return Address
1633            default:
1634                return false;
1635        }
1636    }
1637
1638    if (GetRegisterState(set, false) != KERN_SUCCESS)
1639        return false;
1640
1641    bool success = false;
1642    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1643    if (regInfo)
1644    {
1645        switch (set)
1646        {
1647            case e_regSetGPR:
1648                if (reg < k_num_gpr_registers)
1649                {
1650                    ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64;
1651                    success = true;
1652                }
1653                break;
1654
1655            case e_regSetFPU:
1656                if (CPUHasAVX() || FORCE_AVX_REGS)
1657                {
1658                    switch (reg)
1659                    {
1660                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1661                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1662                    case fpu_ftw:       m_state.context.fpu.avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1663                    case fpu_fop:       m_state.context.fpu.avx.__fpu_fop = value->value.uint16;                      success = true; break;
1664                    case fpu_ip:        m_state.context.fpu.avx.__fpu_ip = value->value.uint32;                       success = true; break;
1665                    case fpu_cs:        m_state.context.fpu.avx.__fpu_cs = value->value.uint16;                       success = true; break;
1666                    case fpu_dp:        m_state.context.fpu.avx.__fpu_dp = value->value.uint32;                       success = true; break;
1667                    case fpu_ds:        m_state.context.fpu.avx.__fpu_ds = value->value.uint16;                       success = true; break;
1668                    case fpu_mxcsr:     m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1669                    case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1670
1671                    case fpu_stmm0:
1672                    case fpu_stmm1:
1673                    case fpu_stmm2:
1674                    case fpu_stmm3:
1675                    case fpu_stmm4:
1676                    case fpu_stmm5:
1677                    case fpu_stmm6:
1678                    case fpu_stmm7:
1679                        memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1680                        success = true;
1681                        break;
1682
1683                    case fpu_xmm0:
1684                    case fpu_xmm1:
1685                    case fpu_xmm2:
1686                    case fpu_xmm3:
1687                    case fpu_xmm4:
1688                    case fpu_xmm5:
1689                    case fpu_xmm6:
1690                    case fpu_xmm7:
1691                    case fpu_xmm8:
1692                    case fpu_xmm9:
1693                    case fpu_xmm10:
1694                    case fpu_xmm11:
1695                    case fpu_xmm12:
1696                    case fpu_xmm13:
1697                    case fpu_xmm14:
1698                    case fpu_xmm15:
1699                        memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1700                        success = true;
1701                        break;
1702
1703                    case fpu_ymm0:
1704                    case fpu_ymm1:
1705                    case fpu_ymm2:
1706                    case fpu_ymm3:
1707                    case fpu_ymm4:
1708                    case fpu_ymm5:
1709                    case fpu_ymm6:
1710                    case fpu_ymm7:
1711                    case fpu_ymm8:
1712                    case fpu_ymm9:
1713                    case fpu_ymm10:
1714                    case fpu_ymm11:
1715                    case fpu_ymm12:
1716                    case fpu_ymm13:
1717                    case fpu_ymm14:
1718                    case fpu_ymm15:
1719                        memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16);
1720                        memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16);
1721                        return true;
1722                    }
1723                }
1724                else
1725                {
1726                    switch (reg)
1727                    {
1728                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1729                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1730                    case fpu_ftw:       m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1731                    case fpu_fop:       m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16;                      success = true; break;
1732                    case fpu_ip:        m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32;                       success = true; break;
1733                    case fpu_cs:        m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16;                       success = true; break;
1734                    case fpu_dp:        m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32;                       success = true; break;
1735                    case fpu_ds:        m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16;                       success = true; break;
1736                    case fpu_mxcsr:     m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1737                    case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1738
1739                    case fpu_stmm0:
1740                    case fpu_stmm1:
1741                    case fpu_stmm2:
1742                    case fpu_stmm3:
1743                    case fpu_stmm4:
1744                    case fpu_stmm5:
1745                    case fpu_stmm6:
1746                    case fpu_stmm7:
1747                        memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1748                        success = true;
1749                        break;
1750
1751                    case fpu_xmm0:
1752                    case fpu_xmm1:
1753                    case fpu_xmm2:
1754                    case fpu_xmm3:
1755                    case fpu_xmm4:
1756                    case fpu_xmm5:
1757                    case fpu_xmm6:
1758                    case fpu_xmm7:
1759                    case fpu_xmm8:
1760                    case fpu_xmm9:
1761                    case fpu_xmm10:
1762                    case fpu_xmm11:
1763                    case fpu_xmm12:
1764                    case fpu_xmm13:
1765                    case fpu_xmm14:
1766                    case fpu_xmm15:
1767                        memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1768                        success = true;
1769                        break;
1770                    }
1771                }
1772                break;
1773
1774            case e_regSetEXC:
1775                switch (reg)
1776            {
1777                case exc_trapno:    m_state.context.exc.__trapno = value->value.uint32;     success = true; break;
1778                case exc_err:       m_state.context.exc.__err = value->value.uint32;        success = true; break;
1779                case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break;
1780            }
1781                break;
1782        }
1783    }
1784
1785    if (success)
1786        return SetRegisterState(set) == KERN_SUCCESS;
1787    return false;
1788}
1789
1790
1791nub_size_t
1792DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len)
1793{
1794    nub_size_t size = sizeof (m_state.context);
1795
1796    if (buf && buf_len)
1797    {
1798        if (size > buf_len)
1799            size = buf_len;
1800
1801        bool force = false;
1802        kern_return_t kret;
1803        if ((kret = GetGPRState(force)) != KERN_SUCCESS)
1804        {
1805            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to read: %u ", buf, buf_len, kret);
1806            size = 0;
1807        }
1808        else
1809        if ((kret = GetFPUState(force)) != KERN_SUCCESS)
1810        {
1811            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: %s regs failed to read: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1812            size = 0;
1813        }
1814        else
1815        if ((kret = GetEXCState(force)) != KERN_SUCCESS)
1816        {
1817            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: EXC regs failed to read: %u", buf, buf_len, kret);
1818            size = 0;
1819        }
1820        else
1821        {
1822            // Success
1823            ::memcpy (buf, &m_state.context, size);
1824        }
1825    }
1826    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1827    // Return the size of the register context even if NULL was passed in
1828    return size;
1829}
1830
1831nub_size_t
1832DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len)
1833{
1834    nub_size_t size = sizeof (m_state.context);
1835    if (buf == NULL || buf_len == 0)
1836        size = 0;
1837
1838    if (size)
1839    {
1840        if (size > buf_len)
1841            size = buf_len;
1842
1843        ::memcpy (&m_state.context, buf, size);
1844        kern_return_t kret;
1845        if ((kret = SetGPRState()) != KERN_SUCCESS)
1846            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to write: %u", buf, buf_len, kret);
1847        if ((kret = SetFPUState()) != KERN_SUCCESS)
1848            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: %s regs failed to write: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1849        if ((kret = SetEXCState()) != KERN_SUCCESS)
1850            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: EXP regs failed to write: %u", buf, buf_len, kret);
1851    }
1852    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1853    return size;
1854}
1855
1856
1857kern_return_t
1858DNBArchImplX86_64::GetRegisterState(int set, bool force)
1859{
1860    switch (set)
1861    {
1862        case e_regSetALL:    return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
1863        case e_regSetGPR:    return GetGPRState(force);
1864        case e_regSetFPU:    return GetFPUState(force);
1865        case e_regSetEXC:    return GetEXCState(force);
1866        default: break;
1867    }
1868    return KERN_INVALID_ARGUMENT;
1869}
1870
1871kern_return_t
1872DNBArchImplX86_64::SetRegisterState(int set)
1873{
1874    // Make sure we have a valid context to set.
1875    if (RegisterSetStateIsValid(set))
1876    {
1877        switch (set)
1878        {
1879            case e_regSetALL:    return SetGPRState() | SetFPUState() | SetEXCState();
1880            case e_regSetGPR:    return SetGPRState();
1881            case e_regSetFPU:    return SetFPUState();
1882            case e_regSetEXC:    return SetEXCState();
1883            default: break;
1884        }
1885    }
1886    return KERN_INVALID_ARGUMENT;
1887}
1888
1889bool
1890DNBArchImplX86_64::RegisterSetStateIsValid (int set) const
1891{
1892    return m_state.RegsAreValid(set);
1893}
1894
1895
1896
1897#endif    // #if defined (__i386__) || defined (__x86_64__)
1898