DNBArchImplX86_64.cpp revision 73f6b49b568f48755bb5058a3db679c3b9093682
1//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  Created by Greg Clayton on 6/25/07.
11//
12//===----------------------------------------------------------------------===//
13
14#if defined (__i386__) || defined (__x86_64__)
15
16#include <sys/cdefs.h>
17#include <sys/types.h>
18#include <sys/sysctl.h>
19
20#include "MacOSX/x86_64/DNBArchImplX86_64.h"
21#include "DNBLog.h"
22#include "MachThread.h"
23#include "MachProcess.h"
24#include <mach/mach.h>
25#include <stdlib.h>
26
27#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
28enum debugState {
29    debugStateUnknown,
30    debugStateOff,
31    debugStateOn
32};
33
34static debugState sFPUDebugState = debugStateUnknown;
35static debugState sAVXForceState = debugStateUnknown;
36
37static bool DebugFPURegs ()
38{
39    if (sFPUDebugState == debugStateUnknown)
40    {
41        if (getenv("DNB_DEBUG_FPU_REGS"))
42            sFPUDebugState = debugStateOn;
43        else
44            sFPUDebugState = debugStateOff;
45    }
46
47    return (sFPUDebugState == debugStateOn);
48}
49
50static bool ForceAVXRegs ()
51{
52    if (sFPUDebugState == debugStateUnknown)
53    {
54        if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
55            sAVXForceState = debugStateOn;
56        else
57            sAVXForceState = debugStateOff;
58    }
59
60    return (sAVXForceState == debugStateOn);
61}
62
63#define DEBUG_FPU_REGS (DebugFPURegs())
64#define FORCE_AVX_REGS (ForceAVXRegs())
65#else
66#define DEBUG_FPU_REGS (0)
67#define FORCE_AVX_REGS (0)
68#endif
69
70
71extern "C" bool
72CPUHasAVX()
73{
74    enum AVXPresence
75    {
76        eAVXUnknown     = -1,
77        eAVXNotPresent  =  0,
78        eAVXPresent     =  1
79    };
80
81    static AVXPresence g_has_avx = eAVXUnknown;
82    if (g_has_avx == eAVXUnknown)
83    {
84        g_has_avx = eAVXNotPresent;
85
86        // Only xnu-2020 or later has AVX support, any versions before
87        // this have a busted thread_get_state RPC where it would truncate
88        // the thread state buffer (<rdar://problem/10122874>). So we need to
89        // verify the kernel version number manually or disable AVX support.
90        int mib[2];
91        char buffer[1024];
92        size_t length = sizeof(buffer);
93        uint64_t xnu_version = 0;
94        mib[0] = CTL_KERN;
95        mib[1] = KERN_VERSION;
96        int err = ::sysctl(mib, 2, &buffer, &length, NULL, 0);
97        if (err == 0)
98        {
99            const char *xnu = strstr (buffer, "xnu-");
100            if (xnu)
101            {
102                const char *xnu_version_cstr = xnu + 4;
103                xnu_version = strtoull (xnu_version_cstr, NULL, 0);
104                if (xnu_version >= 2020 && xnu_version != ULLONG_MAX)
105                {
106                    if (::HasAVX())
107                    {
108                        g_has_avx = eAVXPresent;
109                    }
110                }
111            }
112        }
113        DNBLogThreadedIf (LOG_THREAD, "CPUHasAVX(): g_has_avx = %i (err = %i, errno = %i, xnu_version = %llu)", g_has_avx, err, errno, xnu_version);
114    }
115
116    return (g_has_avx == eAVXPresent);
117}
118
119uint64_t
120DNBArchImplX86_64::GetPC(uint64_t failValue)
121{
122    // Get program counter
123    if (GetGPRState(false) == KERN_SUCCESS)
124        return m_state.context.gpr.__rip;
125    return failValue;
126}
127
128kern_return_t
129DNBArchImplX86_64::SetPC(uint64_t value)
130{
131    // Get program counter
132    kern_return_t err = GetGPRState(false);
133    if (err == KERN_SUCCESS)
134    {
135        m_state.context.gpr.__rip = value;
136        err = SetGPRState();
137    }
138    return err == KERN_SUCCESS;
139}
140
141uint64_t
142DNBArchImplX86_64::GetSP(uint64_t failValue)
143{
144    // Get stack pointer
145    if (GetGPRState(false) == KERN_SUCCESS)
146        return m_state.context.gpr.__rsp;
147    return failValue;
148}
149
150// Uncomment the value below to verify the values in the debugger.
151//#define DEBUG_GPR_VALUES 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
152
153kern_return_t
154DNBArchImplX86_64::GetGPRState(bool force)
155{
156    if (force || m_state.GetError(e_regSetGPR, Read))
157    {
158#if DEBUG_GPR_VALUES
159        m_state.context.gpr.__rax = ('a' << 8) + 'x';
160        m_state.context.gpr.__rbx = ('b' << 8) + 'x';
161        m_state.context.gpr.__rcx = ('c' << 8) + 'x';
162        m_state.context.gpr.__rdx = ('d' << 8) + 'x';
163        m_state.context.gpr.__rdi = ('d' << 8) + 'i';
164        m_state.context.gpr.__rsi = ('s' << 8) + 'i';
165        m_state.context.gpr.__rbp = ('b' << 8) + 'p';
166        m_state.context.gpr.__rsp = ('s' << 8) + 'p';
167        m_state.context.gpr.__r8  = ('r' << 8) + '8';
168        m_state.context.gpr.__r9  = ('r' << 8) + '9';
169        m_state.context.gpr.__r10 = ('r' << 8) + 'a';
170        m_state.context.gpr.__r11 = ('r' << 8) + 'b';
171        m_state.context.gpr.__r12 = ('r' << 8) + 'c';
172        m_state.context.gpr.__r13 = ('r' << 8) + 'd';
173        m_state.context.gpr.__r14 = ('r' << 8) + 'e';
174        m_state.context.gpr.__r15 = ('r' << 8) + 'f';
175        m_state.context.gpr.__rip = ('i' << 8) + 'p';
176        m_state.context.gpr.__rflags = ('f' << 8) + 'l';
177        m_state.context.gpr.__cs = ('c' << 8) + 's';
178        m_state.context.gpr.__fs = ('f' << 8) + 's';
179        m_state.context.gpr.__gs = ('g' << 8) + 's';
180        m_state.SetError(e_regSetGPR, Read, 0);
181#else
182        mach_msg_type_number_t count = e_regSetWordSizeGPR;
183        m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
184        DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
185                          "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
186                          "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
187                          "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
188                          "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
189                          "\n\trip = %16.16llx"
190                          "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
191                          m_thread->ThreadID(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT,
192                          m_state.GetError(e_regSetGPR, Read),
193                          m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
194                          m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
195                          m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
196                          m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
197                          m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
198                          m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
199                          m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs);
200
201        //      DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
202        //                        "\n\trax = %16.16llx"
203        //                        "\n\trbx = %16.16llx"
204        //                        "\n\trcx = %16.16llx"
205        //                        "\n\trdx = %16.16llx"
206        //                        "\n\trdi = %16.16llx"
207        //                        "\n\trsi = %16.16llx"
208        //                        "\n\trbp = %16.16llx"
209        //                        "\n\trsp = %16.16llx"
210        //                        "\n\t r8 = %16.16llx"
211        //                        "\n\t r9 = %16.16llx"
212        //                        "\n\tr10 = %16.16llx"
213        //                        "\n\tr11 = %16.16llx"
214        //                        "\n\tr12 = %16.16llx"
215        //                        "\n\tr13 = %16.16llx"
216        //                        "\n\tr14 = %16.16llx"
217        //                        "\n\tr15 = %16.16llx"
218        //                        "\n\trip = %16.16llx"
219        //                        "\n\tflg = %16.16llx"
220        //                        "\n\t cs = %16.16llx"
221        //                        "\n\t fs = %16.16llx"
222        //                        "\n\t gs = %16.16llx",
223        //                        m_thread->ThreadID(),
224        //                        x86_THREAD_STATE64,
225        //                        x86_THREAD_STATE64_COUNT,
226        //                        m_state.GetError(e_regSetGPR, Read),
227        //                        m_state.context.gpr.__rax,
228        //                        m_state.context.gpr.__rbx,
229        //                        m_state.context.gpr.__rcx,
230        //                        m_state.context.gpr.__rdx,
231        //                        m_state.context.gpr.__rdi,
232        //                        m_state.context.gpr.__rsi,
233        //                        m_state.context.gpr.__rbp,
234        //                        m_state.context.gpr.__rsp,
235        //                        m_state.context.gpr.__r8,
236        //                        m_state.context.gpr.__r9,
237        //                        m_state.context.gpr.__r10,
238        //                        m_state.context.gpr.__r11,
239        //                        m_state.context.gpr.__r12,
240        //                        m_state.context.gpr.__r13,
241        //                        m_state.context.gpr.__r14,
242        //                        m_state.context.gpr.__r15,
243        //                        m_state.context.gpr.__rip,
244        //                        m_state.context.gpr.__rflags,
245        //                        m_state.context.gpr.__cs,
246        //                        m_state.context.gpr.__fs,
247        //                        m_state.context.gpr.__gs);
248#endif
249    }
250    return m_state.GetError(e_regSetGPR, Read);
251}
252
253// Uncomment the value below to verify the values in the debugger.
254//#define DEBUG_FPU_REGS 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
255
256kern_return_t
257DNBArchImplX86_64::GetFPUState(bool force)
258{
259    if (force || m_state.GetError(e_regSetFPU, Read))
260    {
261        if (DEBUG_FPU_REGS) {
262            if (CPUHasAVX() || FORCE_AVX_REGS)
263            {
264                m_state.context.fpu.avx.__fpu_reserved[0] = -1;
265                m_state.context.fpu.avx.__fpu_reserved[1] = -1;
266                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
267                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
268                m_state.context.fpu.avx.__fpu_ftw = 1;
269                m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
270                m_state.context.fpu.avx.__fpu_fop = 2;
271                m_state.context.fpu.avx.__fpu_ip = 3;
272                m_state.context.fpu.avx.__fpu_cs = 4;
273                m_state.context.fpu.avx.__fpu_rsrv2 = 5;
274                m_state.context.fpu.avx.__fpu_dp = 6;
275                m_state.context.fpu.avx.__fpu_ds = 7;
276                m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
277                m_state.context.fpu.avx.__fpu_mxcsr = 8;
278                m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
279                int i;
280                for (i=0; i<16; ++i)
281                {
282                    if (i<10)
283                    {
284                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
285                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
286                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
287                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
288                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
289                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
290                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
291                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
292                    }
293                    else
294                    {
295                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
296                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
297                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
298                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
299                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
300                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
301                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
302                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
303                    }
304
305                    m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
306                    m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
307                    m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
308                    m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
309                    m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
310                    m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
311                    m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
312                    m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
313                    m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8';
314                    m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9';
315                    m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A';
316                    m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B';
317                    m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C';
318                    m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D';
319                    m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E';
320                    m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F';
321
322                    m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
323                    m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
324                    m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
325                    m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
326                    m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
327                    m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
328                    m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
329                    m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
330                    m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8';
331                    m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9';
332                    m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A';
333                    m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B';
334                    m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C';
335                    m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D';
336                    m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E';
337                    m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F';
338                }
339                for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
340                    m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
341                m_state.context.fpu.avx.__fpu_reserved1 = -1;
342                for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
343                    m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
344                m_state.SetError(e_regSetFPU, Read, 0);
345            }
346            else
347            {
348                m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
349                m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
350                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
351                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
352                m_state.context.fpu.no_avx.__fpu_ftw = 1;
353                m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
354                m_state.context.fpu.no_avx.__fpu_fop = 2;
355                m_state.context.fpu.no_avx.__fpu_ip = 3;
356                m_state.context.fpu.no_avx.__fpu_cs = 4;
357                m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
358                m_state.context.fpu.no_avx.__fpu_dp = 6;
359                m_state.context.fpu.no_avx.__fpu_ds = 7;
360                m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
361                m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
362                m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
363                int i;
364                for (i=0; i<16; ++i)
365                {
366                    if (i<10)
367                    {
368                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
369                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
370                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
371                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
372                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
373                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
374                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
375                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
376                    }
377                    else
378                    {
379                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
380                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
381                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
382                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
383                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
384                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
385                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
386                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
387                    }
388
389                    m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
390                    m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
391                    m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
392                    m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
393                    m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
394                    m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
395                    m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
396                    m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
397                    m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
398                    m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
399                    m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
400                    m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
401                    m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
402                    m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
403                    m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
404                    m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
405                }
406                for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
407                    m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
408                m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
409                m_state.SetError(e_regSetFPU, Read, 0);
410            }
411        }
412        else
413        {
414            if (CPUHasAVX() || FORCE_AVX_REGS)
415            {
416                mach_msg_type_number_t count = e_regSetWordSizeAVX;
417                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
418                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in) carp) => 0x%8.8x",
419                                  m_thread->ThreadID(), __x86_64_AVX_STATE, (uint32_t)count,
420                                  e_regSetWordSizeAVX, m_state.GetError(e_regSetFPU, Read));
421            }
422            else
423            {
424                mach_msg_type_number_t count = e_regSetWordSizeFPU;
425                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
426                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x",
427                                  m_thread->ThreadID(), __x86_64_FLOAT_STATE, (uint32_t)count,
428                                  e_regSetWordSizeFPU, m_state.GetError(e_regSetFPU, Read));
429            }
430        }
431    }
432    return m_state.GetError(e_regSetFPU, Read);
433}
434
435kern_return_t
436DNBArchImplX86_64::GetEXCState(bool force)
437{
438    if (force || m_state.GetError(e_regSetEXC, Read))
439    {
440        mach_msg_type_number_t count = e_regSetWordSizeEXC;
441        m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
442    }
443    return m_state.GetError(e_regSetEXC, Read);
444}
445
446kern_return_t
447DNBArchImplX86_64::SetGPRState()
448{
449    kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
450    DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
451
452    m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
453    DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
454                      "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
455                      "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
456                      "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
457                      "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
458                      "\n\trip = %16.16llx"
459                      "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
460                      m_thread->ThreadID(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR,
461                      m_state.GetError(e_regSetGPR, Write),
462                      m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
463                      m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
464                      m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
465                      m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
466                      m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
467                      m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
468                      m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs);
469    return m_state.GetError(e_regSetGPR, Write);
470}
471
472kern_return_t
473DNBArchImplX86_64::SetFPUState()
474{
475    if (DEBUG_FPU_REGS)
476    {
477        m_state.SetError(e_regSetFPU, Write, 0);
478        return m_state.GetError(e_regSetFPU, Write);
479    }
480    else
481    {
482        if (CPUHasAVX() || FORCE_AVX_REGS)
483        {
484            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
485            return m_state.GetError(e_regSetFPU, Write);
486        }
487        else
488        {
489            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU));
490            return m_state.GetError(e_regSetFPU, Write);
491        }
492    }
493}
494
495kern_return_t
496DNBArchImplX86_64::SetEXCState()
497{
498    m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
499    return m_state.GetError(e_regSetEXC, Write);
500}
501
502kern_return_t
503DNBArchImplX86_64::GetDBGState(bool force)
504{
505    if (force || m_state.GetError(e_regSetDBG, Read))
506    {
507        mach_msg_type_number_t count = e_regSetWordSizeDBG;
508        m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
509    }
510    return m_state.GetError(e_regSetDBG, Read);
511}
512
513kern_return_t
514DNBArchImplX86_64::SetDBGState()
515{
516    m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
517    return m_state.GetError(e_regSetDBG, Write);
518}
519
520void
521DNBArchImplX86_64::ThreadWillResume()
522{
523    // Do we need to step this thread? If so, let the mach thread tell us so.
524    if (m_thread->IsStepping())
525    {
526        // This is the primary thread, let the arch do anything it needs
527        EnableHardwareSingleStep(true);
528    }
529
530    // Reset the debug status register, if necessary, before we resume.
531    kern_return_t kret = GetDBGState(false);
532    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
533    if (kret != KERN_SUCCESS)
534        return;
535
536    DBG &debug_state = m_state.context.dbg;
537    bool need_reset = false;
538    uint32_t i, num = NumSupportedHardwareWatchpoints();
539    for (i = 0; i < num; ++i)
540        if (IsWatchpointHit(debug_state, i))
541            need_reset = true;
542
543    if (need_reset)
544    {
545        ClearWatchpointHits(debug_state);
546        kret = SetDBGState();
547        DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
548    }
549}
550
551bool
552DNBArchImplX86_64::ThreadDidStop()
553{
554    bool success = true;
555
556    m_state.InvalidateAllRegisterStates();
557
558    // Are we stepping a single instruction?
559    if (GetGPRState(true) == KERN_SUCCESS)
560    {
561        // We are single stepping, was this the primary thread?
562        if (m_thread->IsStepping())
563        {
564            // This was the primary thread, we need to clear the trace
565            // bit if so.
566            success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
567        }
568        else
569        {
570            // The MachThread will automatically restore the suspend count
571            // in ThreadDidStop(), so we don't need to do anything here if
572            // we weren't the primary thread the last time
573        }
574    }
575    return success;
576}
577
578bool
579DNBArchImplX86_64::NotifyException(MachException::Data& exc)
580{
581    switch (exc.exc_type)
582    {
583        case EXC_BAD_ACCESS:
584            break;
585        case EXC_BAD_INSTRUCTION:
586            break;
587        case EXC_ARITHMETIC:
588            break;
589        case EXC_EMULATION:
590            break;
591        case EXC_SOFTWARE:
592            break;
593        case EXC_BREAKPOINT:
594            if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
595            {
596                // exc_code = EXC_I386_BPT
597                //
598                nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
599                if (pc != INVALID_NUB_ADDRESS && pc > 0)
600                {
601                    pc -= 1;
602                    // Check for a breakpoint at one byte prior to the current PC value
603                    // since the PC will be just past the trap.
604
605                    nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc);
606                    if (NUB_BREAK_ID_IS_VALID(breakID))
607                    {
608                        // Backup the PC for i386 since the trap was taken and the PC
609                        // is at the address following the single byte trap instruction.
610                        if (m_state.context.gpr.__rip > 0)
611                        {
612                            m_state.context.gpr.__rip = pc;
613                            // Write the new PC back out
614                            SetGPRState ();
615                        }
616                    }
617                    return true;
618                }
619            }
620            else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1)
621            {
622                // exc_code = EXC_I386_SGL
623                //
624                // Check whether this corresponds to a watchpoint hit event.
625                // If yes, set the exc_sub_code to the data break address.
626                nub_addr_t addr = 0;
627                uint32_t hw_index = GetHardwareWatchpointHit(addr);
628                if (hw_index != INVALID_NUB_HW_INDEX)
629                {
630                    exc.exc_data[1] = addr;
631                    // Piggyback the hw_index in the exc.data.
632                    exc.exc_data.push_back(hw_index);
633                }
634
635                return true;
636            }
637            break;
638        case EXC_SYSCALL:
639            break;
640        case EXC_MACH_SYSCALL:
641            break;
642        case EXC_RPC_ALERT:
643            break;
644    }
645    return false;
646}
647
648uint32_t
649DNBArchImplX86_64::NumSupportedHardwareWatchpoints()
650{
651    // Available debug address registers: dr0, dr1, dr2, dr3.
652    return 4;
653}
654
655static uint32_t
656size_and_rw_bits(nub_size_t size, bool read, bool write)
657{
658    uint32_t rw;
659    if (read) {
660        rw = 0x3; // READ or READ/WRITE
661    } else if (write) {
662        rw = 0x1; // WRITE
663    } else {
664        assert(0 && "read and write cannot both be false");
665    }
666
667    switch (size) {
668    case 1:
669        return rw;
670    case 2:
671        return (0x1 << 2) | rw;
672    case 4:
673        return (0x3 << 2) | rw;
674    case 8:
675        return (0x2 << 2) | rw;
676    default:
677        assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
678    }
679}
680void
681DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
682{
683    // Set both dr7 (debug control register) and dri (debug address register).
684
685    // dr7{7-0} encodes the local/gloabl enable bits:
686    //  global enable --. .-- local enable
687    //                  | |
688    //                  v v
689    //      dr0 -> bits{1-0}
690    //      dr1 -> bits{3-2}
691    //      dr2 -> bits{5-4}
692    //      dr3 -> bits{7-6}
693    //
694    // dr7{31-16} encodes the rw/len bits:
695    //  b_x+3, b_x+2, b_x+1, b_x
696    //      where bits{x+1, x} => rw
697    //            0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
698    //      and bits{x+3, x+2} => len
699    //            0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
700    //
701    //      dr0 -> bits{19-16}
702    //      dr1 -> bits{23-20}
703    //      dr2 -> bits{27-24}
704    //      dr3 -> bits{31-28}
705    debug_state.__dr7 |= (1 << (2*hw_index) |
706                          size_and_rw_bits(size, read, write) << (16+4*hw_index));
707    switch (hw_index) {
708    case 0:
709        debug_state.__dr0 = addr; break;
710    case 1:
711        debug_state.__dr1 = addr; break;
712    case 2:
713        debug_state.__dr2 = addr; break;
714    case 3:
715        debug_state.__dr3 = addr; break;
716    default:
717        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
718    }
719    return;
720}
721
722void
723DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
724{
725    debug_state.__dr7 &= ~(3 << (2*hw_index));
726    switch (hw_index) {
727    case 0:
728        debug_state.__dr0 = 0; break;
729    case 1:
730        debug_state.__dr1 = 0; break;
731    case 2:
732        debug_state.__dr2 = 0; break;
733    case 3:
734        debug_state.__dr3 = 0; break;
735    default:
736        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
737    }
738    return;
739}
740
741bool
742DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
743{
744    // Check dr7 (debug control register) for local/global enable bits:
745    //  global enable --. .-- local enable
746    //                  | |
747    //                  v v
748    //      dr0 -> bits{1-0}
749    //      dr1 -> bits{3-2}
750    //      dr2 -> bits{5-4}
751    //      dr3 -> bits{7-6}
752    return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
753}
754
755// Resets local copy of debug status register to wait for the next debug excpetion.
756void
757DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state)
758{
759    // See also IsWatchpointHit().
760    debug_state.__dr6 = 0;
761    return;
762}
763
764bool
765DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
766{
767    // Check dr6 (debug status register) whether a watchpoint hits:
768    //          is watchpoint hit?
769    //                  |
770    //                  v
771    //      dr0 -> bits{0}
772    //      dr1 -> bits{1}
773    //      dr2 -> bits{2}
774    //      dr3 -> bits{3}
775    return (debug_state.__dr6 & (1 << hw_index));
776}
777
778nub_addr_t
779DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
780{
781    switch (hw_index) {
782    case 0:
783        return debug_state.__dr0;
784    case 1:
785        return debug_state.__dr1;
786    case 2:
787        return debug_state.__dr2;
788    case 3:
789        return debug_state.__dr3;
790    default:
791        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
792    }
793}
794
795bool
796DNBArchImplX86_64::StartTransForHWP()
797{
798    if (m_2pc_trans_state != Trans_Done || m_2pc_trans_state != Trans_Rolled_Back)
799        DNBLogError ("%s inconsistent state detected, expected %d or %d, got: %d", __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state);
800    m_2pc_dbg_checkpoint = m_state.context.dbg;
801    m_2pc_trans_state = Trans_Pending;
802    return true;
803}
804bool
805DNBArchImplX86_64::RollbackTransForHWP()
806{
807    m_state.context.dbg = m_2pc_dbg_checkpoint;
808    if (m_2pc_trans_state != Trans_Pending)
809        DNBLogError ("%s inconsistent state detected, expected %d, got: %d", __FUNCTION__, Trans_Pending, m_2pc_trans_state);
810    m_2pc_trans_state = Trans_Rolled_Back;
811    kern_return_t kret = SetDBGState();
812    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret);
813
814    if (kret == KERN_SUCCESS)
815        return true;
816    else
817        return false;
818}
819bool
820DNBArchImplX86_64::FinishTransForHWP()
821{
822    m_2pc_trans_state = Trans_Done;
823    return true;
824}
825DNBArchImplX86_64::DBG
826DNBArchImplX86_64::GetDBGCheckpoint()
827{
828    return m_2pc_dbg_checkpoint;
829}
830
831uint32_t
832DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write)
833{
834    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = 0x%llx, size = %zu, read = %u, write = %u)", (uint64_t)addr, size, read, write);
835
836    const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
837
838    // Can only watch 1, 2, 4, or 8 bytes.
839    if (!(size == 1 || size == 2 || size == 4 || size == 8))
840        return INVALID_NUB_HW_INDEX;
841
842    // We must watch for either read or write
843    if (read == false && write == false)
844        return INVALID_NUB_HW_INDEX;
845
846    // Read the debug state
847    kern_return_t kret = GetDBGState(false);
848
849    if (kret == KERN_SUCCESS)
850    {
851        // Check to make sure we have the needed hardware support
852        uint32_t i = 0;
853
854        DBG &debug_state = m_state.context.dbg;
855        for (i = 0; i < num_hw_watchpoints; ++i)
856        {
857            if (IsWatchpointVacant(debug_state, i))
858                break;
859        }
860
861        // See if we found an available hw breakpoint slot above
862        if (i < num_hw_watchpoints)
863        {
864            StartTransForHWP();
865
866            // Modify our local copy of the debug state, first.
867            SetWatchpoint(debug_state, i, addr, size, read, write);
868            // Now set the watch point in the inferior.
869            kret = SetDBGState();
870            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
871
872            if (kret == KERN_SUCCESS)
873                return i;
874            else // Revert to the previous debug state voluntarily.  The transaction coordinator knows that we have failed.
875                m_state.context.dbg = GetDBGCheckpoint();
876        }
877        else
878        {
879            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
880        }
881    }
882    return INVALID_NUB_HW_INDEX;
883}
884
885bool
886DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index)
887{
888    kern_return_t kret = GetDBGState(false);
889
890    const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
891    if (kret == KERN_SUCCESS)
892    {
893        DBG &debug_state = m_state.context.dbg;
894        if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
895        {
896            StartTransForHWP();
897
898            // Modify our local copy of the debug state, first.
899            ClearWatchpoint(debug_state, hw_index);
900            // Now disable the watch point in the inferior.
901            kret = SetDBGState();
902            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
903                             hw_index);
904
905            if (kret == KERN_SUCCESS)
906                return true;
907            else // Revert to the previous debug state voluntarily.  The transaction coordinator knows that we have failed.
908                m_state.context.dbg = GetDBGCheckpoint();
909        }
910    }
911    return false;
912}
913
914DNBArchImplX86_64::DBG DNBArchImplX86_64::Global_Debug_State = {0,0,0,0,0,0,0,0};
915bool DNBArchImplX86_64::Valid_Global_Debug_State = false;
916
917// Use this callback from MachThread, which in turn was called from MachThreadList, to update
918// the global view of the hardware watchpoint state, so that when new thread comes along, they
919// get to inherit the existing hardware watchpoint state.
920void
921DNBArchImplX86_64::HardwareWatchpointStateChanged ()
922{
923    Global_Debug_State = m_state.context.dbg;
924    Valid_Global_Debug_State = true;
925}
926
927// Iterate through the debug status register; return the index of the first hit.
928uint32_t
929DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr)
930{
931    // Read the debug state
932    kern_return_t kret = GetDBGState(true);
933    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
934    if (kret == KERN_SUCCESS)
935    {
936        DBG &debug_state = m_state.context.dbg;
937        uint32_t i, num = NumSupportedHardwareWatchpoints();
938        for (i = 0; i < num; ++i)
939        {
940            if (IsWatchpointHit(debug_state, i))
941            {
942                addr = GetWatchAddress(debug_state, i);
943                DNBLogThreadedIf(LOG_WATCHPOINTS,
944                                 "DNBArchImplX86_64::GetHardwareWatchpointHit() found => %u (addr = 0x%llx).",
945                                 i,
946                                 (uint64_t)addr);
947                return i;
948            }
949        }
950    }
951    return INVALID_NUB_HW_INDEX;
952}
953
954// Set the single step bit in the processor status register.
955kern_return_t
956DNBArchImplX86_64::EnableHardwareSingleStep (bool enable)
957{
958    if (GetGPRState(false) == KERN_SUCCESS)
959    {
960        const uint32_t trace_bit = 0x100u;
961        if (enable)
962            m_state.context.gpr.__rflags |= trace_bit;
963        else
964            m_state.context.gpr.__rflags &= ~trace_bit;
965        return SetGPRState();
966    }
967    return m_state.GetError(e_regSetGPR, Read);
968}
969
970
971//----------------------------------------------------------------------
972// Register information defintions
973//----------------------------------------------------------------------
974
975enum
976{
977    gpr_rax = 0,
978    gpr_rbx,
979    gpr_rcx,
980    gpr_rdx,
981    gpr_rdi,
982    gpr_rsi,
983    gpr_rbp,
984    gpr_rsp,
985    gpr_r8,
986    gpr_r9,
987    gpr_r10,
988    gpr_r11,
989    gpr_r12,
990    gpr_r13,
991    gpr_r14,
992    gpr_r15,
993    gpr_rip,
994    gpr_rflags,
995    gpr_cs,
996    gpr_fs,
997    gpr_gs,
998    k_num_gpr_regs
999};
1000
1001enum {
1002    fpu_fcw,
1003    fpu_fsw,
1004    fpu_ftw,
1005    fpu_fop,
1006    fpu_ip,
1007    fpu_cs,
1008    fpu_dp,
1009    fpu_ds,
1010    fpu_mxcsr,
1011    fpu_mxcsrmask,
1012    fpu_stmm0,
1013    fpu_stmm1,
1014    fpu_stmm2,
1015    fpu_stmm3,
1016    fpu_stmm4,
1017    fpu_stmm5,
1018    fpu_stmm6,
1019    fpu_stmm7,
1020    fpu_xmm0,
1021    fpu_xmm1,
1022    fpu_xmm2,
1023    fpu_xmm3,
1024    fpu_xmm4,
1025    fpu_xmm5,
1026    fpu_xmm6,
1027    fpu_xmm7,
1028    fpu_xmm8,
1029    fpu_xmm9,
1030    fpu_xmm10,
1031    fpu_xmm11,
1032    fpu_xmm12,
1033    fpu_xmm13,
1034    fpu_xmm14,
1035    fpu_xmm15,
1036    fpu_ymm0,
1037    fpu_ymm1,
1038    fpu_ymm2,
1039    fpu_ymm3,
1040    fpu_ymm4,
1041    fpu_ymm5,
1042    fpu_ymm6,
1043    fpu_ymm7,
1044    fpu_ymm8,
1045    fpu_ymm9,
1046    fpu_ymm10,
1047    fpu_ymm11,
1048    fpu_ymm12,
1049    fpu_ymm13,
1050    fpu_ymm14,
1051    fpu_ymm15,
1052    k_num_fpu_regs,
1053
1054    // Aliases
1055    fpu_fctrl = fpu_fcw,
1056    fpu_fstat = fpu_fsw,
1057    fpu_ftag  = fpu_ftw,
1058    fpu_fiseg = fpu_cs,
1059    fpu_fioff = fpu_ip,
1060    fpu_foseg = fpu_ds,
1061    fpu_fooff = fpu_dp
1062};
1063
1064enum {
1065    exc_trapno,
1066    exc_err,
1067    exc_faultvaddr,
1068    k_num_exc_regs,
1069};
1070
1071
1072enum gcc_dwarf_regnums
1073{
1074    gcc_dwarf_rax = 0,
1075    gcc_dwarf_rdx = 1,
1076    gcc_dwarf_rcx = 2,
1077    gcc_dwarf_rbx = 3,
1078    gcc_dwarf_rsi = 4,
1079    gcc_dwarf_rdi = 5,
1080    gcc_dwarf_rbp = 6,
1081    gcc_dwarf_rsp = 7,
1082    gcc_dwarf_r8,
1083    gcc_dwarf_r9,
1084    gcc_dwarf_r10,
1085    gcc_dwarf_r11,
1086    gcc_dwarf_r12,
1087    gcc_dwarf_r13,
1088    gcc_dwarf_r14,
1089    gcc_dwarf_r15,
1090    gcc_dwarf_rip,
1091    gcc_dwarf_xmm0,
1092    gcc_dwarf_xmm1,
1093    gcc_dwarf_xmm2,
1094    gcc_dwarf_xmm3,
1095    gcc_dwarf_xmm4,
1096    gcc_dwarf_xmm5,
1097    gcc_dwarf_xmm6,
1098    gcc_dwarf_xmm7,
1099    gcc_dwarf_xmm8,
1100    gcc_dwarf_xmm9,
1101    gcc_dwarf_xmm10,
1102    gcc_dwarf_xmm11,
1103    gcc_dwarf_xmm12,
1104    gcc_dwarf_xmm13,
1105    gcc_dwarf_xmm14,
1106    gcc_dwarf_xmm15,
1107    gcc_dwarf_stmm0,
1108    gcc_dwarf_stmm1,
1109    gcc_dwarf_stmm2,
1110    gcc_dwarf_stmm3,
1111    gcc_dwarf_stmm4,
1112    gcc_dwarf_stmm5,
1113    gcc_dwarf_stmm6,
1114    gcc_dwarf_stmm7,
1115    gcc_dwarf_ymm0 = gcc_dwarf_xmm0,
1116    gcc_dwarf_ymm1 = gcc_dwarf_xmm1,
1117    gcc_dwarf_ymm2 = gcc_dwarf_xmm2,
1118    gcc_dwarf_ymm3 = gcc_dwarf_xmm3,
1119    gcc_dwarf_ymm4 = gcc_dwarf_xmm4,
1120    gcc_dwarf_ymm5 = gcc_dwarf_xmm5,
1121    gcc_dwarf_ymm6 = gcc_dwarf_xmm6,
1122    gcc_dwarf_ymm7 = gcc_dwarf_xmm7,
1123    gcc_dwarf_ymm8 = gcc_dwarf_xmm8,
1124    gcc_dwarf_ymm9 = gcc_dwarf_xmm9,
1125    gcc_dwarf_ymm10 = gcc_dwarf_xmm10,
1126    gcc_dwarf_ymm11 = gcc_dwarf_xmm11,
1127    gcc_dwarf_ymm12 = gcc_dwarf_xmm12,
1128    gcc_dwarf_ymm13 = gcc_dwarf_xmm13,
1129    gcc_dwarf_ymm14 = gcc_dwarf_xmm14,
1130    gcc_dwarf_ymm15 = gcc_dwarf_xmm15
1131};
1132
1133enum gdb_regnums
1134{
1135    gdb_rax     =   0,
1136    gdb_rbx     =   1,
1137    gdb_rcx     =   2,
1138    gdb_rdx     =   3,
1139    gdb_rsi     =   4,
1140    gdb_rdi     =   5,
1141    gdb_rbp     =   6,
1142    gdb_rsp     =   7,
1143    gdb_r8      =   8,
1144    gdb_r9      =   9,
1145    gdb_r10     =  10,
1146    gdb_r11     =  11,
1147    gdb_r12     =  12,
1148    gdb_r13     =  13,
1149    gdb_r14     =  14,
1150    gdb_r15     =  15,
1151    gdb_rip     =  16,
1152    gdb_rflags  =  17,
1153    gdb_cs      =  18,
1154    gdb_ss      =  19,
1155    gdb_ds      =  20,
1156    gdb_es      =  21,
1157    gdb_fs      =  22,
1158    gdb_gs      =  23,
1159    gdb_stmm0   =  24,
1160    gdb_stmm1   =  25,
1161    gdb_stmm2   =  26,
1162    gdb_stmm3   =  27,
1163    gdb_stmm4   =  28,
1164    gdb_stmm5   =  29,
1165    gdb_stmm6   =  30,
1166    gdb_stmm7   =  31,
1167    gdb_fctrl   =  32,  gdb_fcw = gdb_fctrl,
1168    gdb_fstat   =  33,  gdb_fsw = gdb_fstat,
1169    gdb_ftag    =  34,  gdb_ftw = gdb_ftag,
1170    gdb_fiseg   =  35,  gdb_fpu_cs  = gdb_fiseg,
1171    gdb_fioff   =  36,  gdb_ip  = gdb_fioff,
1172    gdb_foseg   =  37,  gdb_fpu_ds  = gdb_foseg,
1173    gdb_fooff   =  38,  gdb_dp  = gdb_fooff,
1174    gdb_fop     =  39,
1175    gdb_xmm0    =  40,
1176    gdb_xmm1    =  41,
1177    gdb_xmm2    =  42,
1178    gdb_xmm3    =  43,
1179    gdb_xmm4    =  44,
1180    gdb_xmm5    =  45,
1181    gdb_xmm6    =  46,
1182    gdb_xmm7    =  47,
1183    gdb_xmm8    =  48,
1184    gdb_xmm9    =  49,
1185    gdb_xmm10   =  50,
1186    gdb_xmm11   =  51,
1187    gdb_xmm12   =  52,
1188    gdb_xmm13   =  53,
1189    gdb_xmm14   =  54,
1190    gdb_xmm15   =  55,
1191    gdb_mxcsr   =  56,
1192    gdb_ymm0    =  gdb_xmm0,
1193    gdb_ymm1    =  gdb_xmm1,
1194    gdb_ymm2    =  gdb_xmm2,
1195    gdb_ymm3    =  gdb_xmm3,
1196    gdb_ymm4    =  gdb_xmm4,
1197    gdb_ymm5    =  gdb_xmm5,
1198    gdb_ymm6    =  gdb_xmm6,
1199    gdb_ymm7    =  gdb_xmm7,
1200    gdb_ymm8    =  gdb_xmm8,
1201    gdb_ymm9    =  gdb_xmm9,
1202    gdb_ymm10   =  gdb_xmm10,
1203    gdb_ymm11   =  gdb_xmm11,
1204    gdb_ymm12   =  gdb_xmm12,
1205    gdb_ymm13   =  gdb_xmm13,
1206    gdb_ymm14   =  gdb_xmm14,
1207    gdb_ymm15   =  gdb_xmm15
1208};
1209
1210#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg))
1211#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx))
1212#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx))
1213#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg)     + offsetof (DNBArchImplX86_64::Context, exc))
1214
1215// This does not accurately identify the location of ymm0...7 in
1216// Context.fpu.avx.  That is because there is a bunch of padding
1217// in Context.fpu.avx that we don't need.  Offset macros lay out
1218// the register state that Debugserver transmits to the debugger
1219// -- not to interpret the thread_get_state info.
1220#define AVX_OFFSET_YMM(n)   (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
1221
1222#define GPR_SIZE(reg)       (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1223#define FPU_SIZE_UINT(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1224#define FPU_SIZE_MMST(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1225#define FPU_SIZE_XMM(reg)   (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1226#define FPU_SIZE_YMM(reg)   (32)
1227#define EXC_SIZE(reg)       (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1228
1229// These macros will auto define the register name, alt name, register size,
1230// register offset, encoding, format and native register. This ensures that
1231// the register state structures are defined correctly and have the correct
1232// sizes and offsets.
1233#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg }
1234#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg }
1235#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg }
1236#define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, gdb_##reg }
1237
1238// General purpose registers for 64 bit
1239const DNBRegisterInfo
1240DNBArchImplX86_64::g_gpr_registers[] =
1241{
1242    DEFINE_GPR      (rax),
1243    DEFINE_GPR      (rbx),
1244    DEFINE_GPR_ALT  (rcx , "arg4", GENERIC_REGNUM_ARG4),
1245    DEFINE_GPR_ALT  (rdx , "arg3", GENERIC_REGNUM_ARG3),
1246    DEFINE_GPR_ALT  (rdi , "arg1", GENERIC_REGNUM_ARG1),
1247    DEFINE_GPR_ALT  (rsi , "arg2", GENERIC_REGNUM_ARG2),
1248    DEFINE_GPR_ALT  (rbp , "fp"  , GENERIC_REGNUM_FP),
1249    DEFINE_GPR_ALT  (rsp , "sp"  , GENERIC_REGNUM_SP),
1250    DEFINE_GPR_ALT  (r8  , "arg5", GENERIC_REGNUM_ARG5),
1251    DEFINE_GPR_ALT  (r9  , "arg6", GENERIC_REGNUM_ARG6),
1252    DEFINE_GPR      (r10),
1253    DEFINE_GPR      (r11),
1254    DEFINE_GPR      (r12),
1255    DEFINE_GPR      (r13),
1256    DEFINE_GPR      (r14),
1257    DEFINE_GPR      (r15),
1258    DEFINE_GPR_ALT  (rip , "pc", GENERIC_REGNUM_PC),
1259    DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS),
1260    DEFINE_GPR_ALT2 (cs,        NULL),
1261    DEFINE_GPR_ALT2 (fs,        NULL),
1262    DEFINE_GPR_ALT2 (gs,        NULL),
1263};
1264
1265// Floating point registers 64 bit
1266const DNBRegisterInfo
1267DNBArchImplX86_64::g_fpu_registers_no_avx[] =
1268{
1269    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , FPU_OFFSET(fcw)       , -1U, -1U, -1U, -1U },
1270    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , FPU_OFFSET(fsw)       , -1U, -1U, -1U, -1U },
1271    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , FPU_OFFSET(ftw)       , -1U, -1U, -1U, -1U },
1272    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , FPU_OFFSET(fop)       , -1U, -1U, -1U, -1U },
1273    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , FPU_OFFSET(ip)        , -1U, -1U, -1U, -1U },
1274    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , FPU_OFFSET(cs)        , -1U, -1U, -1U, -1U },
1275    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , FPU_OFFSET(dp)        , -1U, -1U, -1U, -1U },
1276    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , FPU_OFFSET(ds)        , -1U, -1U, -1U, -1U },
1277    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , FPU_OFFSET(mxcsr)     , -1U, -1U, -1U, -1U },
1278    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U },
1279
1280    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1U, gdb_stmm0 },
1281    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1U, gdb_stmm1 },
1282    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1U, gdb_stmm2 },
1283    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1U, gdb_stmm3 },
1284    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1U, gdb_stmm4 },
1285    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1U, gdb_stmm5 },
1286    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1U, gdb_stmm6 },
1287    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1U, gdb_stmm7 },
1288
1289    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1U, gdb_xmm0 },
1290    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1U, gdb_xmm1 },
1291    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1U, gdb_xmm2 },
1292    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1U, gdb_xmm3 },
1293    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1U, gdb_xmm4 },
1294    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1U, gdb_xmm5 },
1295    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1U, gdb_xmm6 },
1296    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1U, gdb_xmm7 },
1297    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1U, gdb_xmm8  },
1298    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1U, gdb_xmm9  },
1299    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1U, gdb_xmm10 },
1300    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1U, gdb_xmm11 },
1301    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1U, gdb_xmm12 },
1302    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1U, gdb_xmm13 },
1303    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1U, gdb_xmm14 },
1304    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1U, gdb_xmm15 },
1305};
1306
1307const DNBRegisterInfo
1308DNBArchImplX86_64::g_fpu_registers_avx[] =
1309{
1310    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , AVX_OFFSET(fcw)       , -1U, -1U, -1U, -1U },
1311    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , AVX_OFFSET(fsw)       , -1U, -1U, -1U, -1U },
1312    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , AVX_OFFSET(ftw)       , -1U, -1U, -1U, -1U },
1313    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , AVX_OFFSET(fop)       , -1U, -1U, -1U, -1U },
1314    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , AVX_OFFSET(ip)        , -1U, -1U, -1U, -1U },
1315    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , AVX_OFFSET(cs)        , -1U, -1U, -1U, -1U },
1316    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , AVX_OFFSET(dp)        , -1U, -1U, -1U, -1U },
1317    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , AVX_OFFSET(ds)        , -1U, -1U, -1U, -1U },
1318    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , AVX_OFFSET(mxcsr)     , -1U, -1U, -1U, -1U },
1319    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U },
1320
1321    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1U, gdb_stmm0 },
1322    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1U, gdb_stmm1 },
1323    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1U, gdb_stmm2 },
1324    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1U, gdb_stmm3 },
1325    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1U, gdb_stmm4 },
1326    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1U, gdb_stmm5 },
1327    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1U, gdb_stmm6 },
1328    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1U, gdb_stmm7 },
1329
1330    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1U, gdb_xmm0 },
1331    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1U, gdb_xmm1 },
1332    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1U, gdb_xmm2 },
1333    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1U, gdb_xmm3 },
1334    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1U, gdb_xmm4 },
1335    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1U, gdb_xmm5 },
1336    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1U, gdb_xmm6 },
1337    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1U, gdb_xmm7 },
1338    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1U, gdb_xmm8  },
1339    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1U, gdb_xmm9  },
1340    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1U, gdb_xmm10 },
1341    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1U, gdb_xmm11 },
1342    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1U, gdb_xmm12 },
1343    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1U, gdb_xmm13 },
1344    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1U, gdb_xmm14 },
1345    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1U, gdb_xmm15 },
1346
1347    { e_regSetFPU, fpu_ymm0 , "ymm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0)   , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1U, gdb_ymm0 },
1348    { e_regSetFPU, fpu_ymm1 , "ymm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1)   , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1U, gdb_ymm1 },
1349    { e_regSetFPU, fpu_ymm2 , "ymm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2)   , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1U, gdb_ymm2 },
1350    { e_regSetFPU, fpu_ymm3 , "ymm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3)   , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1U, gdb_ymm3 },
1351    { e_regSetFPU, fpu_ymm4 , "ymm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4)   , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1U, gdb_ymm4 },
1352    { e_regSetFPU, fpu_ymm5 , "ymm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5)   , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1U, gdb_ymm5 },
1353    { e_regSetFPU, fpu_ymm6 , "ymm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6)   , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1U, gdb_ymm6 },
1354    { e_regSetFPU, fpu_ymm7 , "ymm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7)   , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1U, gdb_ymm7 },
1355    { e_regSetFPU, fpu_ymm8 , "ymm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8)   , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1U, gdb_ymm8  },
1356    { e_regSetFPU, fpu_ymm9 , "ymm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9)   , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1U, gdb_ymm9  },
1357    { e_regSetFPU, fpu_ymm10, "ymm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10)  , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1U, gdb_ymm10 },
1358    { e_regSetFPU, fpu_ymm11, "ymm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11)  , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1U, gdb_ymm11 },
1359    { e_regSetFPU, fpu_ymm12, "ymm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12)  , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1U, gdb_ymm12 },
1360    { e_regSetFPU, fpu_ymm13, "ymm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13)  , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1U, gdb_ymm13 },
1361    { e_regSetFPU, fpu_ymm14, "ymm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14)  , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1U, gdb_ymm14 },
1362    { e_regSetFPU, fpu_ymm15, "ymm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15)  , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1U, gdb_ymm15 }
1363};
1364
1365// Exception registers
1366
1367const DNBRegisterInfo
1368DNBArchImplX86_64::g_exc_registers[] =
1369{
1370    { e_regSetEXC, exc_trapno,      "trapno"    , NULL, Uint, Hex, EXC_SIZE (trapno)    , EXC_OFFSET (trapno)       , -1U, -1U, -1U, -1U },
1371    { e_regSetEXC, exc_err,         "err"       , NULL, Uint, Hex, EXC_SIZE (err)       , EXC_OFFSET (err)          , -1U, -1U, -1U, -1U },
1372    { e_regSetEXC, exc_faultvaddr,  "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr)   , -1U, -1U, -1U, -1U }
1373};
1374
1375// Number of registers in each register set
1376const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
1377const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
1378const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
1379const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
1380const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
1381const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
1382
1383//----------------------------------------------------------------------
1384// Register set definitions. The first definitions at register set index
1385// of zero is for all registers, followed by other registers sets. The
1386// register information for the all register set need not be filled in.
1387//----------------------------------------------------------------------
1388const DNBRegisterSetInfo
1389DNBArchImplX86_64::g_reg_sets_no_avx[] =
1390{
1391    { "x86_64 Registers",           NULL,               k_num_all_registers_no_avx },
1392    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1393    { "Floating Point Registers",   g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
1394    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1395};
1396
1397const DNBRegisterSetInfo
1398DNBArchImplX86_64::g_reg_sets_avx[] =
1399{
1400    { "x86_64 Registers",           NULL,               k_num_all_registers_avx },
1401    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1402    { "Floating Point Registers",   g_fpu_registers_avx, k_num_fpu_registers_avx },
1403    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1404};
1405
1406// Total number of register sets for this architecture
1407const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo);
1408
1409
1410DNBArchProtocol *
1411DNBArchImplX86_64::Create (MachThread *thread)
1412{
1413    DNBArchImplX86_64 *obj = new DNBArchImplX86_64 (thread);
1414
1415    // When new thread comes along, it tries to inherit from the global debug state, if it is valid.
1416    if (Valid_Global_Debug_State)
1417    {
1418        obj->m_state.context.dbg = Global_Debug_State;
1419        kern_return_t kret = obj->SetDBGState();
1420        DNBLogThreadedIf(LOG_WATCHPOINTS,
1421                         "DNBArchImplX86_64::Create() Inherit and SetDBGState() => 0x%8.8x.", kret);
1422    }
1423    return obj;
1424}
1425
1426const uint8_t * const
1427DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size)
1428{
1429    static const uint8_t g_breakpoint_opcode[] = { 0xCC };
1430    if (byte_size == 1)
1431        return g_breakpoint_opcode;
1432    return NULL;
1433}
1434
1435const DNBRegisterSetInfo *
1436DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
1437{
1438    *num_reg_sets = k_num_register_sets;
1439
1440    if (CPUHasAVX() || FORCE_AVX_REGS)
1441        return g_reg_sets_avx;
1442    else
1443        return g_reg_sets_no_avx;
1444}
1445
1446void
1447DNBArchImplX86_64::Initialize()
1448{
1449    DNBArchPluginInfo arch_plugin_info =
1450    {
1451        CPU_TYPE_X86_64,
1452        DNBArchImplX86_64::Create,
1453        DNBArchImplX86_64::GetRegisterSetInfo,
1454        DNBArchImplX86_64::SoftwareBreakpointOpcode
1455    };
1456
1457    // Register this arch plug-in with the main protocol class
1458    DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
1459}
1460
1461bool
1462DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value)
1463{
1464    if (set == REGISTER_SET_GENERIC)
1465    {
1466        switch (reg)
1467        {
1468            case GENERIC_REGNUM_PC:     // Program Counter
1469                set = e_regSetGPR;
1470                reg = gpr_rip;
1471                break;
1472
1473            case GENERIC_REGNUM_SP:     // Stack Pointer
1474                set = e_regSetGPR;
1475                reg = gpr_rsp;
1476                break;
1477
1478            case GENERIC_REGNUM_FP:     // Frame Pointer
1479                set = e_regSetGPR;
1480                reg = gpr_rbp;
1481                break;
1482
1483            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1484                set = e_regSetGPR;
1485                reg = gpr_rflags;
1486                break;
1487
1488            case GENERIC_REGNUM_RA:     // Return Address
1489            default:
1490                return false;
1491        }
1492    }
1493
1494    if (GetRegisterState(set, false) != KERN_SUCCESS)
1495        return false;
1496
1497    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1498    if (regInfo)
1499    {
1500        value->info = *regInfo;
1501        switch (set)
1502        {
1503            case e_regSetGPR:
1504                if (reg < k_num_gpr_registers)
1505                {
1506                    value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg];
1507                    return true;
1508                }
1509                break;
1510
1511            case e_regSetFPU:
1512                if (CPUHasAVX() || FORCE_AVX_REGS)
1513                {
1514                    switch (reg)
1515                    {
1516                    case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw));    return true;
1517                    case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw));    return true;
1518                    case fpu_ftw:       value->value.uint8  = m_state.context.fpu.avx.__fpu_ftw;                      return true;
1519                    case fpu_fop:       value->value.uint16 = m_state.context.fpu.avx.__fpu_fop;                      return true;
1520                    case fpu_ip:        value->value.uint32 = m_state.context.fpu.avx.__fpu_ip;                       return true;
1521                    case fpu_cs:        value->value.uint16 = m_state.context.fpu.avx.__fpu_cs;                       return true;
1522                    case fpu_dp:        value->value.uint32 = m_state.context.fpu.avx.__fpu_dp;                       return true;
1523                    case fpu_ds:        value->value.uint16 = m_state.context.fpu.avx.__fpu_ds;                       return true;
1524                    case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr;                    return true;
1525                    case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask;                return true;
1526
1527                    case fpu_stmm0:
1528                    case fpu_stmm1:
1529                    case fpu_stmm2:
1530                    case fpu_stmm3:
1531                    case fpu_stmm4:
1532                    case fpu_stmm5:
1533                    case fpu_stmm6:
1534                    case fpu_stmm7:
1535                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1536                        return true;
1537
1538                    case fpu_xmm0:
1539                    case fpu_xmm1:
1540                    case fpu_xmm2:
1541                    case fpu_xmm3:
1542                    case fpu_xmm4:
1543                    case fpu_xmm5:
1544                    case fpu_xmm6:
1545                    case fpu_xmm7:
1546                    case fpu_xmm8:
1547                    case fpu_xmm9:
1548                    case fpu_xmm10:
1549                    case fpu_xmm11:
1550                    case fpu_xmm12:
1551                    case fpu_xmm13:
1552                    case fpu_xmm14:
1553                    case fpu_xmm15:
1554                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1555                        return true;
1556
1557                    case fpu_ymm0:
1558                    case fpu_ymm1:
1559                    case fpu_ymm2:
1560                    case fpu_ymm3:
1561                    case fpu_ymm4:
1562                    case fpu_ymm5:
1563                    case fpu_ymm6:
1564                    case fpu_ymm7:
1565                    case fpu_ymm8:
1566                    case fpu_ymm9:
1567                    case fpu_ymm10:
1568                    case fpu_ymm11:
1569                    case fpu_ymm12:
1570                    case fpu_ymm13:
1571                    case fpu_ymm14:
1572                    case fpu_ymm15:
1573                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
1574                        memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
1575                        return true;
1576                    }
1577                }
1578                else
1579                {
1580                    switch (reg)
1581                    {
1582                        case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw));    return true;
1583                        case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw));    return true;
1584                        case fpu_ftw:       value->value.uint8  = m_state.context.fpu.no_avx.__fpu_ftw;                      return true;
1585                        case fpu_fop:       value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop;                      return true;
1586                        case fpu_ip:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip;                       return true;
1587                        case fpu_cs:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs;                       return true;
1588                        case fpu_dp:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp;                       return true;
1589                        case fpu_ds:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds;                       return true;
1590                        case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr;                    return true;
1591                        case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask;                return true;
1592
1593                        case fpu_stmm0:
1594                        case fpu_stmm1:
1595                        case fpu_stmm2:
1596                        case fpu_stmm3:
1597                        case fpu_stmm4:
1598                        case fpu_stmm5:
1599                        case fpu_stmm6:
1600                        case fpu_stmm7:
1601                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1602                            return true;
1603
1604                        case fpu_xmm0:
1605                        case fpu_xmm1:
1606                        case fpu_xmm2:
1607                        case fpu_xmm3:
1608                        case fpu_xmm4:
1609                        case fpu_xmm5:
1610                        case fpu_xmm6:
1611                        case fpu_xmm7:
1612                        case fpu_xmm8:
1613                        case fpu_xmm9:
1614                        case fpu_xmm10:
1615                        case fpu_xmm11:
1616                        case fpu_xmm12:
1617                        case fpu_xmm13:
1618                        case fpu_xmm14:
1619                        case fpu_xmm15:
1620                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1621                            return true;
1622                    }
1623                }
1624                break;
1625
1626            case e_regSetEXC:
1627                switch (reg)
1628                {
1629                case exc_trapno:    value->value.uint32 = m_state.context.exc.__trapno; return true;
1630                case exc_err:       value->value.uint32 = m_state.context.exc.__err; return true;
1631                case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true;
1632                }
1633                break;
1634        }
1635    }
1636    return false;
1637}
1638
1639
1640bool
1641DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value)
1642{
1643    if (set == REGISTER_SET_GENERIC)
1644    {
1645        switch (reg)
1646        {
1647            case GENERIC_REGNUM_PC:     // Program Counter
1648                set = e_regSetGPR;
1649                reg = gpr_rip;
1650                break;
1651
1652            case GENERIC_REGNUM_SP:     // Stack Pointer
1653                set = e_regSetGPR;
1654                reg = gpr_rsp;
1655                break;
1656
1657            case GENERIC_REGNUM_FP:     // Frame Pointer
1658                set = e_regSetGPR;
1659                reg = gpr_rbp;
1660                break;
1661
1662            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1663                set = e_regSetGPR;
1664                reg = gpr_rflags;
1665                break;
1666
1667            case GENERIC_REGNUM_RA:     // Return Address
1668            default:
1669                return false;
1670        }
1671    }
1672
1673    if (GetRegisterState(set, false) != KERN_SUCCESS)
1674        return false;
1675
1676    bool success = false;
1677    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1678    if (regInfo)
1679    {
1680        switch (set)
1681        {
1682            case e_regSetGPR:
1683                if (reg < k_num_gpr_registers)
1684                {
1685                    ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64;
1686                    success = true;
1687                }
1688                break;
1689
1690            case e_regSetFPU:
1691                if (CPUHasAVX() || FORCE_AVX_REGS)
1692                {
1693                    switch (reg)
1694                    {
1695                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1696                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1697                    case fpu_ftw:       m_state.context.fpu.avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1698                    case fpu_fop:       m_state.context.fpu.avx.__fpu_fop = value->value.uint16;                      success = true; break;
1699                    case fpu_ip:        m_state.context.fpu.avx.__fpu_ip = value->value.uint32;                       success = true; break;
1700                    case fpu_cs:        m_state.context.fpu.avx.__fpu_cs = value->value.uint16;                       success = true; break;
1701                    case fpu_dp:        m_state.context.fpu.avx.__fpu_dp = value->value.uint32;                       success = true; break;
1702                    case fpu_ds:        m_state.context.fpu.avx.__fpu_ds = value->value.uint16;                       success = true; break;
1703                    case fpu_mxcsr:     m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1704                    case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1705
1706                    case fpu_stmm0:
1707                    case fpu_stmm1:
1708                    case fpu_stmm2:
1709                    case fpu_stmm3:
1710                    case fpu_stmm4:
1711                    case fpu_stmm5:
1712                    case fpu_stmm6:
1713                    case fpu_stmm7:
1714                        memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1715                        success = true;
1716                        break;
1717
1718                    case fpu_xmm0:
1719                    case fpu_xmm1:
1720                    case fpu_xmm2:
1721                    case fpu_xmm3:
1722                    case fpu_xmm4:
1723                    case fpu_xmm5:
1724                    case fpu_xmm6:
1725                    case fpu_xmm7:
1726                    case fpu_xmm8:
1727                    case fpu_xmm9:
1728                    case fpu_xmm10:
1729                    case fpu_xmm11:
1730                    case fpu_xmm12:
1731                    case fpu_xmm13:
1732                    case fpu_xmm14:
1733                    case fpu_xmm15:
1734                        memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1735                        success = true;
1736                        break;
1737
1738                    case fpu_ymm0:
1739                    case fpu_ymm1:
1740                    case fpu_ymm2:
1741                    case fpu_ymm3:
1742                    case fpu_ymm4:
1743                    case fpu_ymm5:
1744                    case fpu_ymm6:
1745                    case fpu_ymm7:
1746                    case fpu_ymm8:
1747                    case fpu_ymm9:
1748                    case fpu_ymm10:
1749                    case fpu_ymm11:
1750                    case fpu_ymm12:
1751                    case fpu_ymm13:
1752                    case fpu_ymm14:
1753                    case fpu_ymm15:
1754                        memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16);
1755                        memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16);
1756                        return true;
1757                    }
1758                }
1759                else
1760                {
1761                    switch (reg)
1762                    {
1763                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1764                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1765                    case fpu_ftw:       m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1766                    case fpu_fop:       m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16;                      success = true; break;
1767                    case fpu_ip:        m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32;                       success = true; break;
1768                    case fpu_cs:        m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16;                       success = true; break;
1769                    case fpu_dp:        m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32;                       success = true; break;
1770                    case fpu_ds:        m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16;                       success = true; break;
1771                    case fpu_mxcsr:     m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1772                    case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1773
1774                    case fpu_stmm0:
1775                    case fpu_stmm1:
1776                    case fpu_stmm2:
1777                    case fpu_stmm3:
1778                    case fpu_stmm4:
1779                    case fpu_stmm5:
1780                    case fpu_stmm6:
1781                    case fpu_stmm7:
1782                        memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1783                        success = true;
1784                        break;
1785
1786                    case fpu_xmm0:
1787                    case fpu_xmm1:
1788                    case fpu_xmm2:
1789                    case fpu_xmm3:
1790                    case fpu_xmm4:
1791                    case fpu_xmm5:
1792                    case fpu_xmm6:
1793                    case fpu_xmm7:
1794                    case fpu_xmm8:
1795                    case fpu_xmm9:
1796                    case fpu_xmm10:
1797                    case fpu_xmm11:
1798                    case fpu_xmm12:
1799                    case fpu_xmm13:
1800                    case fpu_xmm14:
1801                    case fpu_xmm15:
1802                        memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1803                        success = true;
1804                        break;
1805                    }
1806                }
1807                break;
1808
1809            case e_regSetEXC:
1810                switch (reg)
1811            {
1812                case exc_trapno:    m_state.context.exc.__trapno = value->value.uint32;     success = true; break;
1813                case exc_err:       m_state.context.exc.__err = value->value.uint32;        success = true; break;
1814                case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break;
1815            }
1816                break;
1817        }
1818    }
1819
1820    if (success)
1821        return SetRegisterState(set) == KERN_SUCCESS;
1822    return false;
1823}
1824
1825
1826nub_size_t
1827DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len)
1828{
1829    nub_size_t size = sizeof (m_state.context);
1830
1831    if (buf && buf_len)
1832    {
1833        if (size > buf_len)
1834            size = buf_len;
1835
1836        bool force = false;
1837        kern_return_t kret;
1838        if ((kret = GetGPRState(force)) != KERN_SUCCESS)
1839        {
1840            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to read: %u ", buf, buf_len, kret);
1841            size = 0;
1842        }
1843        else
1844        if ((kret = GetFPUState(force)) != KERN_SUCCESS)
1845        {
1846            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: %s regs failed to read: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1847            size = 0;
1848        }
1849        else
1850        if ((kret = GetEXCState(force)) != KERN_SUCCESS)
1851        {
1852            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) error: EXC regs failed to read: %u", buf, buf_len, kret);
1853            size = 0;
1854        }
1855        else
1856        {
1857            // Success
1858            ::memcpy (buf, &m_state.context, size);
1859        }
1860    }
1861    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1862    // Return the size of the register context even if NULL was passed in
1863    return size;
1864}
1865
1866nub_size_t
1867DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len)
1868{
1869    nub_size_t size = sizeof (m_state.context);
1870    if (buf == NULL || buf_len == 0)
1871        size = 0;
1872
1873    if (size)
1874    {
1875        if (size > buf_len)
1876            size = buf_len;
1877
1878        ::memcpy (&m_state.context, buf, size);
1879        kern_return_t kret;
1880        if ((kret = SetGPRState()) != KERN_SUCCESS)
1881            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to write: %u", buf, buf_len, kret);
1882        if ((kret = SetFPUState()) != KERN_SUCCESS)
1883            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: %s regs failed to write: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1884        if ((kret = SetEXCState()) != KERN_SUCCESS)
1885            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) error: EXP regs failed to write: %u", buf, buf_len, kret);
1886    }
1887    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1888    return size;
1889}
1890
1891
1892kern_return_t
1893DNBArchImplX86_64::GetRegisterState(int set, bool force)
1894{
1895    switch (set)
1896    {
1897        case e_regSetALL:    return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
1898        case e_regSetGPR:    return GetGPRState(force);
1899        case e_regSetFPU:    return GetFPUState(force);
1900        case e_regSetEXC:    return GetEXCState(force);
1901        default: break;
1902    }
1903    return KERN_INVALID_ARGUMENT;
1904}
1905
1906kern_return_t
1907DNBArchImplX86_64::SetRegisterState(int set)
1908{
1909    // Make sure we have a valid context to set.
1910    if (RegisterSetStateIsValid(set))
1911    {
1912        switch (set)
1913        {
1914            case e_regSetALL:    return SetGPRState() | SetFPUState() | SetEXCState();
1915            case e_regSetGPR:    return SetGPRState();
1916            case e_regSetFPU:    return SetFPUState();
1917            case e_regSetEXC:    return SetEXCState();
1918            default: break;
1919        }
1920    }
1921    return KERN_INVALID_ARGUMENT;
1922}
1923
1924bool
1925DNBArchImplX86_64::RegisterSetStateIsValid (int set) const
1926{
1927    return m_state.RegsAreValid(set);
1928}
1929
1930
1931
1932#endif    // #if defined (__i386__) || defined (__x86_64__)
1933