DNBArchImplX86_64.cpp revision 5b4b00f5fd83d87de954d80d54fd6cc922772c19
1//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  Created by Greg Clayton on 6/25/07.
11//
12//===----------------------------------------------------------------------===//
13
14#if defined (__i386__) || defined (__x86_64__)
15
16#include <sys/cdefs.h>
17
18#include "MacOSX/x86_64/DNBArchImplX86_64.h"
19#include "DNBLog.h"
20#include "MachThread.h"
21#include "MachProcess.h"
22#include <mach/mach.h>
23#include <stdlib.h>
24
25#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
26enum debugState {
27    debugStateUnknown,
28    debugStateOff,
29    debugStateOn
30};
31
32static debugState sFPUDebugState = debugStateUnknown;
33static debugState sAVXForceState = debugStateUnknown;
34
35static bool DebugFPURegs ()
36{
37    if (sFPUDebugState == debugStateUnknown)
38    {
39        if (getenv("DNB_DEBUG_FPU_REGS"))
40            sFPUDebugState = debugStateOn;
41        else
42            sFPUDebugState = debugStateOff;
43    }
44
45    return (sFPUDebugState == debugStateOn);
46}
47
48static bool ForceAVXRegs ()
49{
50    if (sFPUDebugState == debugStateUnknown)
51    {
52        if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
53            sAVXForceState = debugStateOn;
54        else
55            sAVXForceState = debugStateOff;
56    }
57
58    return (sAVXForceState == debugStateOn);
59}
60
61#define DEBUG_FPU_REGS (DebugFPURegs())
62#define FORCE_AVX_REGS (ForceAVXRegs())
63#else
64#define DEBUG_FPU_REGS (0)
65#define FORCE_AVX_REGS (0)
66#endif
67
68enum DNBArchImplX86_64::AVXPresence DNBArchImplX86_64::s_has_avx = DNBArchImplX86_64::kAVXUnknown;
69
70uint64_t
71DNBArchImplX86_64::GetPC(uint64_t failValue)
72{
73    // Get program counter
74    if (GetGPRState(false) == KERN_SUCCESS)
75        return m_state.context.gpr.__rip;
76    return failValue;
77}
78
79kern_return_t
80DNBArchImplX86_64::SetPC(uint64_t value)
81{
82    // Get program counter
83    kern_return_t err = GetGPRState(false);
84    if (err == KERN_SUCCESS)
85    {
86        m_state.context.gpr.__rip = value;
87        err = SetGPRState();
88    }
89    return err == KERN_SUCCESS;
90}
91
92uint64_t
93DNBArchImplX86_64::GetSP(uint64_t failValue)
94{
95    // Get stack pointer
96    if (GetGPRState(false) == KERN_SUCCESS)
97        return m_state.context.gpr.__rsp;
98    return failValue;
99}
100
101// Uncomment the value below to verify the values in the debugger.
102//#define DEBUG_GPR_VALUES 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
103
104kern_return_t
105DNBArchImplX86_64::GetGPRState(bool force)
106{
107    if (force || m_state.GetError(e_regSetGPR, Read))
108    {
109        kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
110        DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (GetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
111
112#if DEBUG_GPR_VALUES
113        m_state.context.gpr.__rax = ('a' << 8) + 'x';
114        m_state.context.gpr.__rbx = ('b' << 8) + 'x';
115        m_state.context.gpr.__rcx = ('c' << 8) + 'x';
116        m_state.context.gpr.__rdx = ('d' << 8) + 'x';
117        m_state.context.gpr.__rdi = ('d' << 8) + 'i';
118        m_state.context.gpr.__rsi = ('s' << 8) + 'i';
119        m_state.context.gpr.__rbp = ('b' << 8) + 'p';
120        m_state.context.gpr.__rsp = ('s' << 8) + 'p';
121        m_state.context.gpr.__r8  = ('r' << 8) + '8';
122        m_state.context.gpr.__r9  = ('r' << 8) + '9';
123        m_state.context.gpr.__r10 = ('r' << 8) + 'a';
124        m_state.context.gpr.__r11 = ('r' << 8) + 'b';
125        m_state.context.gpr.__r12 = ('r' << 8) + 'c';
126        m_state.context.gpr.__r13 = ('r' << 8) + 'd';
127        m_state.context.gpr.__r14 = ('r' << 8) + 'e';
128        m_state.context.gpr.__r15 = ('r' << 8) + 'f';
129        m_state.context.gpr.__rip = ('i' << 8) + 'p';
130        m_state.context.gpr.__rflags = ('f' << 8) + 'l';
131        m_state.context.gpr.__cs = ('c' << 8) + 's';
132        m_state.context.gpr.__fs = ('f' << 8) + 's';
133        m_state.context.gpr.__gs = ('g' << 8) + 's';
134        m_state.SetError(e_regSetGPR, Read, 0);
135#else
136        mach_msg_type_number_t count = e_regSetWordSizeGPR;
137        m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
138        DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
139                          "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
140                          "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
141                          "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
142                          "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
143                          "\n\trip = %16.16llx"
144                          "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
145                          m_thread->ThreadID(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT,
146                          m_state.GetError(e_regSetGPR, Read),
147                          m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
148                          m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
149                          m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
150                          m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
151                          m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
152                          m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
153                          m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs);
154
155        //      DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
156        //                        "\n\trax = %16.16llx"
157        //                        "\n\trbx = %16.16llx"
158        //                        "\n\trcx = %16.16llx"
159        //                        "\n\trdx = %16.16llx"
160        //                        "\n\trdi = %16.16llx"
161        //                        "\n\trsi = %16.16llx"
162        //                        "\n\trbp = %16.16llx"
163        //                        "\n\trsp = %16.16llx"
164        //                        "\n\t r8 = %16.16llx"
165        //                        "\n\t r9 = %16.16llx"
166        //                        "\n\tr10 = %16.16llx"
167        //                        "\n\tr11 = %16.16llx"
168        //                        "\n\tr12 = %16.16llx"
169        //                        "\n\tr13 = %16.16llx"
170        //                        "\n\tr14 = %16.16llx"
171        //                        "\n\tr15 = %16.16llx"
172        //                        "\n\trip = %16.16llx"
173        //                        "\n\tflg = %16.16llx"
174        //                        "\n\t cs = %16.16llx"
175        //                        "\n\t fs = %16.16llx"
176        //                        "\n\t gs = %16.16llx",
177        //                        m_thread->ThreadID(),
178        //                        x86_THREAD_STATE64,
179        //                        x86_THREAD_STATE64_COUNT,
180        //                        m_state.GetError(e_regSetGPR, Read),
181        //                        m_state.context.gpr.__rax,
182        //                        m_state.context.gpr.__rbx,
183        //                        m_state.context.gpr.__rcx,
184        //                        m_state.context.gpr.__rdx,
185        //                        m_state.context.gpr.__rdi,
186        //                        m_state.context.gpr.__rsi,
187        //                        m_state.context.gpr.__rbp,
188        //                        m_state.context.gpr.__rsp,
189        //                        m_state.context.gpr.__r8,
190        //                        m_state.context.gpr.__r9,
191        //                        m_state.context.gpr.__r10,
192        //                        m_state.context.gpr.__r11,
193        //                        m_state.context.gpr.__r12,
194        //                        m_state.context.gpr.__r13,
195        //                        m_state.context.gpr.__r14,
196        //                        m_state.context.gpr.__r15,
197        //                        m_state.context.gpr.__rip,
198        //                        m_state.context.gpr.__rflags,
199        //                        m_state.context.gpr.__cs,
200        //                        m_state.context.gpr.__fs,
201        //                        m_state.context.gpr.__gs);
202#endif
203    }
204    return m_state.GetError(e_regSetGPR, Read);
205}
206
207// Uncomment the value below to verify the values in the debugger.
208//#define DEBUG_FPU_REGS 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
209
210kern_return_t
211DNBArchImplX86_64::GetFPUState(bool force)
212{
213    if (force || m_state.GetError(e_regSetFPU, Read))
214    {
215        if (DEBUG_FPU_REGS) {
216            if (CPUHasAVX() || FORCE_AVX_REGS)
217            {
218                m_state.context.fpu.avx.__fpu_reserved[0] = -1;
219                m_state.context.fpu.avx.__fpu_reserved[1] = -1;
220                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
221                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
222                m_state.context.fpu.avx.__fpu_ftw = 1;
223                m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
224                m_state.context.fpu.avx.__fpu_fop = 2;
225                m_state.context.fpu.avx.__fpu_ip = 3;
226                m_state.context.fpu.avx.__fpu_cs = 4;
227                m_state.context.fpu.avx.__fpu_rsrv2 = 5;
228                m_state.context.fpu.avx.__fpu_dp = 6;
229                m_state.context.fpu.avx.__fpu_ds = 7;
230                m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
231                m_state.context.fpu.avx.__fpu_mxcsr = 8;
232                m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
233                int i;
234                for (i=0; i<16; ++i)
235                {
236                    if (i<10)
237                    {
238                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
239                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
240                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
241                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
242                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
243                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
244                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
245                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
246                    }
247                    else
248                    {
249                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
250                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
251                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
252                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
253                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
254                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
255                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
256                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
257                    }
258
259                    m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
260                    m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
261                    m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
262                    m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
263                    m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
264                    m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
265                    m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
266                    m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
267                    m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8';
268                    m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9';
269                    m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A';
270                    m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B';
271                    m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C';
272                    m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D';
273                    m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E';
274                    m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F';
275
276                    m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
277                    m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
278                    m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
279                    m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
280                    m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
281                    m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
282                    m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
283                    m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
284                    m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8';
285                    m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9';
286                    m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A';
287                    m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B';
288                    m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C';
289                    m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D';
290                    m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E';
291                    m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F';
292                }
293                for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
294                    m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
295                m_state.context.fpu.avx.__fpu_reserved1 = -1;
296                for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
297                    m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
298                m_state.SetError(e_regSetFPU, Read, 0);
299            }
300            else
301            {
302                m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
303                m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
304                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
305                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
306                m_state.context.fpu.no_avx.__fpu_ftw = 1;
307                m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
308                m_state.context.fpu.no_avx.__fpu_fop = 2;
309                m_state.context.fpu.no_avx.__fpu_ip = 3;
310                m_state.context.fpu.no_avx.__fpu_cs = 4;
311                m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
312                m_state.context.fpu.no_avx.__fpu_dp = 6;
313                m_state.context.fpu.no_avx.__fpu_ds = 7;
314                m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
315                m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
316                m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
317                int i;
318                for (i=0; i<16; ++i)
319                {
320                    if (i<10)
321                    {
322                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
323                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
324                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
325                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
326                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
327                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
328                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
329                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
330                    }
331                    else
332                    {
333                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
334                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
335                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
336                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
337                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
338                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
339                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
340                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
341                    }
342
343                    m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
344                    m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
345                    m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
346                    m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
347                    m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
348                    m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
349                    m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
350                    m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
351                    m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
352                    m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
353                    m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
354                    m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
355                    m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
356                    m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
357                    m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
358                    m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
359                }
360                for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
361                    m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
362                m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
363                m_state.SetError(e_regSetFPU, Read, 0);
364            }
365        }
366        else
367        {
368            if (CPUHasAVX() || FORCE_AVX_REGS)
369            {
370                mach_msg_type_number_t count = e_regSetWordSizeAVX;
371                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
372            }
373            else
374            {
375                mach_msg_type_number_t count = e_regSetWordSizeFPR;
376                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
377            }
378        }
379    }
380    return m_state.GetError(e_regSetFPU, Read);
381}
382
383kern_return_t
384DNBArchImplX86_64::GetEXCState(bool force)
385{
386    if (force || m_state.GetError(e_regSetEXC, Read))
387    {
388        mach_msg_type_number_t count = e_regSetWordSizeEXC;
389        m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
390    }
391    return m_state.GetError(e_regSetEXC, Read);
392}
393
394kern_return_t
395DNBArchImplX86_64::SetGPRState()
396{
397    kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
398    DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
399
400    m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
401    DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
402                      "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
403                      "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
404                      "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
405                      "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
406                      "\n\trip = %16.16llx"
407                      "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
408                      m_thread->ThreadID(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR,
409                      m_state.GetError(e_regSetGPR, Write),
410                      m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
411                      m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
412                      m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
413                      m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
414                      m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
415                      m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
416                      m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs);
417    return m_state.GetError(e_regSetGPR, Write);
418}
419
420kern_return_t
421DNBArchImplX86_64::SetFPUState()
422{
423    if (DEBUG_FPU_REGS)
424    {
425        m_state.SetError(e_regSetFPU, Write, 0);
426        return m_state.GetError(e_regSetFPU, Write);
427    }
428    else
429    {
430        if (CPUHasAVX() || FORCE_AVX_REGS)
431        {
432            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
433            return m_state.GetError(e_regSetFPU, Write);
434        }
435        else
436        {
437            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPR));
438            return m_state.GetError(e_regSetFPU, Write);
439        }
440    }
441}
442
443kern_return_t
444DNBArchImplX86_64::SetEXCState()
445{
446    m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
447    return m_state.GetError(e_regSetEXC, Write);
448}
449
450kern_return_t
451DNBArchImplX86_64::GetDBGState(bool force)
452{
453    if (force || m_state.GetError(e_regSetDBG, Read))
454    {
455        mach_msg_type_number_t count = e_regSetWordSizeDBG;
456        m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
457    }
458    return m_state.GetError(e_regSetDBG, Read);
459}
460
461kern_return_t
462DNBArchImplX86_64::SetDBGState()
463{
464    m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
465    return m_state.GetError(e_regSetDBG, Write);
466}
467
468void
469DNBArchImplX86_64::ThreadWillResume()
470{
471    // Do we need to step this thread? If so, let the mach thread tell us so.
472    if (m_thread->IsStepping())
473    {
474        // This is the primary thread, let the arch do anything it needs
475        EnableHardwareSingleStep(true);
476    }
477
478    // Reset the debug status register before we resume.
479    kern_return_t kret = GetDBGState(false);
480    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
481    if (kret == KERN_SUCCESS)
482    {
483        DBG debug_state = m_state.context.dbg;
484        ClearWatchpointHits(debug_state);
485        kret = SetDBGState();
486        DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
487    }
488}
489
490bool
491DNBArchImplX86_64::ThreadDidStop()
492{
493    bool success = true;
494
495    m_state.InvalidateAllRegisterStates();
496
497    // Are we stepping a single instruction?
498    if (GetGPRState(true) == KERN_SUCCESS)
499    {
500        // We are single stepping, was this the primary thread?
501        if (m_thread->IsStepping())
502        {
503            // This was the primary thread, we need to clear the trace
504            // bit if so.
505            success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
506        }
507        else
508        {
509            // The MachThread will automatically restore the suspend count
510            // in ThreadDidStop(), so we don't need to do anything here if
511            // we weren't the primary thread the last time
512        }
513    }
514    return success;
515}
516
517bool
518DNBArchImplX86_64::NotifyException(MachException::Data& exc)
519{
520    switch (exc.exc_type)
521    {
522        case EXC_BAD_ACCESS:
523            break;
524        case EXC_BAD_INSTRUCTION:
525            break;
526        case EXC_ARITHMETIC:
527            break;
528        case EXC_EMULATION:
529            break;
530        case EXC_SOFTWARE:
531            break;
532        case EXC_BREAKPOINT:
533            if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
534            {
535                nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
536                if (pc != INVALID_NUB_ADDRESS && pc > 0)
537                {
538                    pc -= 1;
539                    // Check for a breakpoint at one byte prior to the current PC value
540                    // since the PC will be just past the trap.
541
542                    nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc);
543                    if (NUB_BREAK_ID_IS_VALID(breakID))
544                    {
545                        // Backup the PC for i386 since the trap was taken and the PC
546                        // is at the address following the single byte trap instruction.
547                        if (m_state.context.gpr.__rip > 0)
548                        {
549                            m_state.context.gpr.__rip = pc;
550                            // Write the new PC back out
551                            SetGPRState ();
552                        }
553                    }
554                    return true;
555                }
556            }
557            break;
558        case EXC_SYSCALL:
559            break;
560        case EXC_MACH_SYSCALL:
561            break;
562        case EXC_RPC_ALERT:
563            break;
564    }
565    return false;
566}
567
568uint32_t
569DNBArchImplX86_64::NumSupportedHardwareWatchpoints()
570{
571    // Available debug address registers: dr0, dr1, dr2, dr3.
572    return 4;
573}
574
575static uint32_t
576size_and_rw_bits(nub_size_t size, bool read, bool write)
577{
578    uint32_t rw;
579    if (read) {
580        rw = 0x3; // READ or READ/WRITE
581    } else if (write) {
582        rw = 0x1; // WRITE
583    } else {
584        assert(0 && "read and write cannot both be false");
585    }
586
587    switch (size) {
588    case 1:
589        return rw;
590    case 2:
591        return (0x1 << 2) | rw;
592    case 4:
593        return (0x3 << 2) | rw;
594    case 8:
595        return (0x2 << 2) | rw;
596    default:
597        assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
598    }
599}
600void
601DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
602{
603    // Set both dr7 (debug control register) and dri (debug address register).
604
605    // dr7{7-0} encodes the local/gloabl enable bits:
606    //  global enable --. .-- local enable
607    //                  | |
608    //                  v v
609    //      dr0 -> bits{1-0}
610    //      dr1 -> bits{3-2}
611    //      dr2 -> bits{5-4}
612    //      dr3 -> bits{7-6}
613    //
614    // dr7{31-16} encodes the rw/len bits:
615    //  b_x+3, b_x+2, b_x+1, b_x
616    //      where bits{x+1, x} => rw
617    //            0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
618    //      and bits{x+3, x+2} => len
619    //            0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
620    //
621    //      dr0 -> bits{19-16}
622    //      dr1 -> bits{23-20}
623    //      dr2 -> bits{27-24}
624    //      dr3 -> bits{31-28}
625    debug_state.__dr7 |= (1 << (2*hw_index) |
626                          size_and_rw_bits(size, read, write) << (16+4*hw_index));
627    switch (hw_index) {
628    case 0:
629        debug_state.__dr0 == addr; break;
630    case 1:
631        debug_state.__dr1 == addr; break;
632    case 2:
633        debug_state.__dr2 == addr; break;
634    case 3:
635        debug_state.__dr3 == addr; break;
636    default:
637        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
638    }
639    return;
640}
641
642void
643DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
644{
645    debug_state.__dr7 &= ~(3 << (2*hw_index));
646    switch (hw_index) {
647    case 0:
648        debug_state.__dr0 == 0; break;
649    case 1:
650        debug_state.__dr1 == 0; break;
651    case 2:
652        debug_state.__dr2 == 0; break;
653    case 3:
654        debug_state.__dr3 == 0; break;
655    default:
656        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
657    }
658    return;
659}
660
661bool
662DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
663{
664    // Check dr7 (debug control register) for local/global enable bits:
665    //  global enable --. .-- local enable
666    //                  | |
667    //                  v v
668    //      dr0 -> bits{1-0}
669    //      dr1 -> bits{3-2}
670    //      dr2 -> bits{5-4}
671    //      dr3 -> bits{7-6}
672    return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
673}
674
675// Resets local copy of debug status register to wait for the next debug excpetion.
676void
677DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state)
678{
679    // See also IsWatchpointHit().
680    debug_state.__dr6 = 0;
681    return;
682}
683
684bool
685DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
686{
687    // Check dr6 (debug status register) whether a watchpoint hits:
688    //          is watchpoint hit?
689    //                  |
690    //                  v
691    //      dr0 -> bits{0}
692    //      dr1 -> bits{1}
693    //      dr2 -> bits{2}
694    //      dr3 -> bits{3}
695    return (debug_state.__dr6 & (1 << hw_index));
696}
697
698uint32_t
699DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write)
700{
701    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = %8.8p, size = %u, read = %u, write = %u)", addr, size, read, write);
702
703    const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
704
705    // Can only watch 1, 2, 4, or 8 bytes.
706    if (!(size == 1 || size == 2 || size == 4 || size == 8))
707        return INVALID_NUB_HW_INDEX;
708
709    // We must watch for either read or write
710    if (read == false && write == false)
711        return INVALID_NUB_HW_INDEX;
712
713    // Read the debug state
714    kern_return_t kret = GetDBGState(false);
715
716    if (kret == KERN_SUCCESS)
717    {
718        // Check to make sure we have the needed hardware support
719        uint32_t i = 0;
720
721        DBG debug_state = m_state.context.dbg;
722        for (i = 0; i < num_hw_watchpoints; ++i)
723        {
724            if (IsWatchpointVacant(debug_state, i))
725                break;
726        }
727
728        // See if we found an available hw breakpoint slot above
729        if (i < num_hw_watchpoints)
730        {
731            // Modify our local copy of the debug state, first.
732            SetWatchpoint(debug_state, i, addr, size, read, write);
733            // Now set the watch point in the inferior.
734            kret = SetDBGState();
735            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
736
737            if (kret == KERN_SUCCESS)
738                return i;
739        }
740        else
741        {
742            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
743        }
744    }
745    return INVALID_NUB_HW_INDEX;
746}
747
748bool
749DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index)
750{
751    kern_return_t kret = GetDBGState(false);
752
753    const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
754    if (kret == KERN_SUCCESS)
755    {
756        DBG debug_state = m_state.context.dbg;
757        if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
758        {
759            // Modify our local copy of the debug state, first.
760            ClearWatchpoint(debug_state, hw_index);
761            // Now disable the watch point in the inferior.
762            kret = SetDBGState();
763            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
764                             hw_index);
765
766            if (kret == KERN_SUCCESS)
767                return true;
768        }
769    }
770    return false;
771}
772
773// Iterate through the debug status register; return the index of the first hit.
774uint32_t
775DNBArchImplX86_64::GetHardwareWatchpointHit()
776{
777    // Read the debug state
778    kern_return_t kret = GetDBGState(false);
779    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
780    if (kret == KERN_SUCCESS)
781    {
782        DBG debug_state = m_state.context.dbg;
783        uint32_t i, num = NumSupportedHardwareWatchpoints();
784        for (i = 0; i < num; ++i)
785        {
786            if (IsWatchpointHit(debug_state, i))
787            {
788                DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() found => %u.", i);
789                return i;
790            }
791        }
792    }
793    return INVALID_NUB_HW_INDEX;
794}
795
796// Set the single step bit in the processor status register.
797kern_return_t
798DNBArchImplX86_64::EnableHardwareSingleStep (bool enable)
799{
800    if (GetGPRState(false) == KERN_SUCCESS)
801    {
802        const uint32_t trace_bit = 0x100u;
803        if (enable)
804            m_state.context.gpr.__rflags |= trace_bit;
805        else
806            m_state.context.gpr.__rflags &= ~trace_bit;
807        return SetGPRState();
808    }
809    return m_state.GetError(e_regSetGPR, Read);
810}
811
812
813//----------------------------------------------------------------------
814// Register information defintions
815//----------------------------------------------------------------------
816
817enum
818{
819    gpr_rax = 0,
820    gpr_rbx,
821    gpr_rcx,
822    gpr_rdx,
823    gpr_rdi,
824    gpr_rsi,
825    gpr_rbp,
826    gpr_rsp,
827    gpr_r8,
828    gpr_r9,
829    gpr_r10,
830    gpr_r11,
831    gpr_r12,
832    gpr_r13,
833    gpr_r14,
834    gpr_r15,
835    gpr_rip,
836    gpr_rflags,
837    gpr_cs,
838    gpr_fs,
839    gpr_gs,
840    k_num_gpr_regs
841};
842
843enum {
844    fpu_fcw,
845    fpu_fsw,
846    fpu_ftw,
847    fpu_fop,
848    fpu_ip,
849    fpu_cs,
850    fpu_dp,
851    fpu_ds,
852    fpu_mxcsr,
853    fpu_mxcsrmask,
854    fpu_stmm0,
855    fpu_stmm1,
856    fpu_stmm2,
857    fpu_stmm3,
858    fpu_stmm4,
859    fpu_stmm5,
860    fpu_stmm6,
861    fpu_stmm7,
862    fpu_xmm0,
863    fpu_xmm1,
864    fpu_xmm2,
865    fpu_xmm3,
866    fpu_xmm4,
867    fpu_xmm5,
868    fpu_xmm6,
869    fpu_xmm7,
870    fpu_xmm8,
871    fpu_xmm9,
872    fpu_xmm10,
873    fpu_xmm11,
874    fpu_xmm12,
875    fpu_xmm13,
876    fpu_xmm14,
877    fpu_xmm15,
878    fpu_ymm0,
879    fpu_ymm1,
880    fpu_ymm2,
881    fpu_ymm3,
882    fpu_ymm4,
883    fpu_ymm5,
884    fpu_ymm6,
885    fpu_ymm7,
886    fpu_ymm8,
887    fpu_ymm9,
888    fpu_ymm10,
889    fpu_ymm11,
890    fpu_ymm12,
891    fpu_ymm13,
892    fpu_ymm14,
893    fpu_ymm15,
894    k_num_fpu_regs,
895
896    // Aliases
897    fpu_fctrl = fpu_fcw,
898    fpu_fstat = fpu_fsw,
899    fpu_ftag  = fpu_ftw,
900    fpu_fiseg = fpu_cs,
901    fpu_fioff = fpu_ip,
902    fpu_foseg = fpu_ds,
903    fpu_fooff = fpu_dp
904};
905
906enum {
907    exc_trapno,
908    exc_err,
909    exc_faultvaddr,
910    k_num_exc_regs,
911};
912
913
914enum gcc_dwarf_regnums
915{
916    gcc_dwarf_rax = 0,
917    gcc_dwarf_rdx = 1,
918    gcc_dwarf_rcx = 2,
919    gcc_dwarf_rbx = 3,
920    gcc_dwarf_rsi = 4,
921    gcc_dwarf_rdi = 5,
922    gcc_dwarf_rbp = 6,
923    gcc_dwarf_rsp = 7,
924    gcc_dwarf_r8,
925    gcc_dwarf_r9,
926    gcc_dwarf_r10,
927    gcc_dwarf_r11,
928    gcc_dwarf_r12,
929    gcc_dwarf_r13,
930    gcc_dwarf_r14,
931    gcc_dwarf_r15,
932    gcc_dwarf_rip,
933    gcc_dwarf_xmm0,
934    gcc_dwarf_xmm1,
935    gcc_dwarf_xmm2,
936    gcc_dwarf_xmm3,
937    gcc_dwarf_xmm4,
938    gcc_dwarf_xmm5,
939    gcc_dwarf_xmm6,
940    gcc_dwarf_xmm7,
941    gcc_dwarf_xmm8,
942    gcc_dwarf_xmm9,
943    gcc_dwarf_xmm10,
944    gcc_dwarf_xmm11,
945    gcc_dwarf_xmm12,
946    gcc_dwarf_xmm13,
947    gcc_dwarf_xmm14,
948    gcc_dwarf_xmm15,
949    gcc_dwarf_stmm0,
950    gcc_dwarf_stmm1,
951    gcc_dwarf_stmm2,
952    gcc_dwarf_stmm3,
953    gcc_dwarf_stmm4,
954    gcc_dwarf_stmm5,
955    gcc_dwarf_stmm6,
956    gcc_dwarf_stmm7,
957    gcc_dwarf_ymm0 = gcc_dwarf_xmm0,
958    gcc_dwarf_ymm1 = gcc_dwarf_xmm1,
959    gcc_dwarf_ymm2 = gcc_dwarf_xmm2,
960    gcc_dwarf_ymm3 = gcc_dwarf_xmm3,
961    gcc_dwarf_ymm4 = gcc_dwarf_xmm4,
962    gcc_dwarf_ymm5 = gcc_dwarf_xmm5,
963    gcc_dwarf_ymm6 = gcc_dwarf_xmm6,
964    gcc_dwarf_ymm7 = gcc_dwarf_xmm7,
965    gcc_dwarf_ymm8 = gcc_dwarf_xmm8,
966    gcc_dwarf_ymm9 = gcc_dwarf_xmm9,
967    gcc_dwarf_ymm10 = gcc_dwarf_xmm10,
968    gcc_dwarf_ymm11 = gcc_dwarf_xmm11,
969    gcc_dwarf_ymm12 = gcc_dwarf_xmm12,
970    gcc_dwarf_ymm13 = gcc_dwarf_xmm13,
971    gcc_dwarf_ymm14 = gcc_dwarf_xmm14,
972    gcc_dwarf_ymm15 = gcc_dwarf_xmm15
973};
974
975enum gdb_regnums
976{
977    gdb_rax     =   0,
978    gdb_rbx     =   1,
979    gdb_rcx     =   2,
980    gdb_rdx     =   3,
981    gdb_rsi     =   4,
982    gdb_rdi     =   5,
983    gdb_rbp     =   6,
984    gdb_rsp     =   7,
985    gdb_r8      =   8,
986    gdb_r9      =   9,
987    gdb_r10     =  10,
988    gdb_r11     =  11,
989    gdb_r12     =  12,
990    gdb_r13     =  13,
991    gdb_r14     =  14,
992    gdb_r15     =  15,
993    gdb_rip     =  16,
994    gdb_rflags  =  17,
995    gdb_cs      =  18,
996    gdb_ss      =  19,
997    gdb_ds      =  20,
998    gdb_es      =  21,
999    gdb_fs      =  22,
1000    gdb_gs      =  23,
1001    gdb_stmm0   =  24,
1002    gdb_stmm1   =  25,
1003    gdb_stmm2   =  26,
1004    gdb_stmm3   =  27,
1005    gdb_stmm4   =  28,
1006    gdb_stmm5   =  29,
1007    gdb_stmm6   =  30,
1008    gdb_stmm7   =  31,
1009    gdb_fctrl   =  32,  gdb_fcw = gdb_fctrl,
1010    gdb_fstat   =  33,  gdb_fsw = gdb_fstat,
1011    gdb_ftag    =  34,  gdb_ftw = gdb_ftag,
1012    gdb_fiseg   =  35,  gdb_fpu_cs  = gdb_fiseg,
1013    gdb_fioff   =  36,  gdb_ip  = gdb_fioff,
1014    gdb_foseg   =  37,  gdb_fpu_ds  = gdb_foseg,
1015    gdb_fooff   =  38,  gdb_dp  = gdb_fooff,
1016    gdb_fop     =  39,
1017    gdb_xmm0    =  40,
1018    gdb_xmm1    =  41,
1019    gdb_xmm2    =  42,
1020    gdb_xmm3    =  43,
1021    gdb_xmm4    =  44,
1022    gdb_xmm5    =  45,
1023    gdb_xmm6    =  46,
1024    gdb_xmm7    =  47,
1025    gdb_xmm8    =  48,
1026    gdb_xmm9    =  49,
1027    gdb_xmm10   =  50,
1028    gdb_xmm11   =  51,
1029    gdb_xmm12   =  52,
1030    gdb_xmm13   =  53,
1031    gdb_xmm14   =  54,
1032    gdb_xmm15   =  55,
1033    gdb_mxcsr   =  56,
1034    gdb_ymm0    =  gdb_xmm0,
1035    gdb_ymm1    =  gdb_xmm1,
1036    gdb_ymm2    =  gdb_xmm2,
1037    gdb_ymm3    =  gdb_xmm3,
1038    gdb_ymm4    =  gdb_xmm4,
1039    gdb_ymm5    =  gdb_xmm5,
1040    gdb_ymm6    =  gdb_xmm6,
1041    gdb_ymm7    =  gdb_xmm7,
1042    gdb_ymm8    =  gdb_xmm8,
1043    gdb_ymm9    =  gdb_xmm9,
1044    gdb_ymm10   =  gdb_xmm10,
1045    gdb_ymm11   =  gdb_xmm11,
1046    gdb_ymm12   =  gdb_xmm12,
1047    gdb_ymm13   =  gdb_xmm13,
1048    gdb_ymm14   =  gdb_xmm14,
1049    gdb_ymm15   =  gdb_xmm15
1050};
1051
1052#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg))
1053#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx))
1054#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx))
1055#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg)     + offsetof (DNBArchImplX86_64::Context, exc))
1056
1057// This does not accurately identify the location of ymm0...7 in
1058// Context.fpu.avx.  That is because there is a bunch of padding
1059// in Context.fpu.avx that we don't need.  Offset macros lay out
1060// the register state that Debugserver transmits to the debugger
1061// -- not to interpret the thread_get_state info.
1062#define AVX_OFFSET_YMM(n)   (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
1063
1064#define GPR_SIZE(reg)       (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1065#define FPU_SIZE_UINT(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1066#define FPU_SIZE_MMST(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1067#define FPU_SIZE_XMM(reg)   (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1068#define FPU_SIZE_YMM(reg)   (32)
1069#define EXC_SIZE(reg)       (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1070
1071// These macros will auto define the register name, alt name, register size,
1072// register offset, encoding, format and native register. This ensures that
1073// the register state structures are defined correctly and have the correct
1074// sizes and offsets.
1075#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg }
1076#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg }
1077#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg }
1078#define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, gdb_##reg }
1079
1080// General purpose registers for 64 bit
1081const DNBRegisterInfo
1082DNBArchImplX86_64::g_gpr_registers[] =
1083{
1084    DEFINE_GPR      (rax),
1085    DEFINE_GPR      (rbx),
1086    DEFINE_GPR_ALT  (rcx , "arg4", GENERIC_REGNUM_ARG4),
1087    DEFINE_GPR_ALT  (rdx , "arg3", GENERIC_REGNUM_ARG3),
1088    DEFINE_GPR_ALT  (rdi , "arg1", GENERIC_REGNUM_ARG1),
1089    DEFINE_GPR_ALT  (rsi , "arg2", GENERIC_REGNUM_ARG2),
1090    DEFINE_GPR_ALT  (rbp , "fp"  , GENERIC_REGNUM_FP),
1091    DEFINE_GPR_ALT  (rsp , "sp"  , GENERIC_REGNUM_SP),
1092    DEFINE_GPR_ALT  (r8  , "arg5", GENERIC_REGNUM_ARG5),
1093    DEFINE_GPR_ALT  (r9  , "arg6", GENERIC_REGNUM_ARG6),
1094    DEFINE_GPR      (r10),
1095    DEFINE_GPR      (r11),
1096    DEFINE_GPR      (r12),
1097    DEFINE_GPR      (r13),
1098    DEFINE_GPR      (r14),
1099    DEFINE_GPR      (r15),
1100    DEFINE_GPR_ALT  (rip , "pc", GENERIC_REGNUM_PC),
1101    DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS),
1102    DEFINE_GPR_ALT2 (cs,        NULL),
1103    DEFINE_GPR_ALT2 (fs,        NULL),
1104    DEFINE_GPR_ALT2 (gs,        NULL),
1105};
1106
1107// Floating point registers 64 bit
1108const DNBRegisterInfo
1109DNBArchImplX86_64::g_fpu_registers_no_avx[] =
1110{
1111    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , FPU_OFFSET(fcw)       , -1, -1, -1, -1 },
1112    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , FPU_OFFSET(fsw)       , -1, -1, -1, -1 },
1113    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , FPU_OFFSET(ftw)       , -1, -1, -1, -1 },
1114    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , FPU_OFFSET(fop)       , -1, -1, -1, -1 },
1115    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , FPU_OFFSET(ip)        , -1, -1, -1, -1 },
1116    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , FPU_OFFSET(cs)        , -1, -1, -1, -1 },
1117    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , FPU_OFFSET(dp)        , -1, -1, -1, -1 },
1118    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , FPU_OFFSET(ds)        , -1, -1, -1, -1 },
1119    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , FPU_OFFSET(mxcsr)     , -1, -1, -1, -1 },
1120    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1, -1, -1, -1 },
1121
1122    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 },
1123    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 },
1124    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 },
1125    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 },
1126    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 },
1127    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 },
1128    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 },
1129    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 },
1130
1131    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 },
1132    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 },
1133    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 },
1134    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 },
1135    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 },
1136    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 },
1137    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 },
1138    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 },
1139    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8  },
1140    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9  },
1141    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 },
1142    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 },
1143    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 },
1144    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 },
1145    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 },
1146    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 },
1147};
1148
1149const DNBRegisterInfo
1150DNBArchImplX86_64::g_fpu_registers_avx[] =
1151{
1152    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , AVX_OFFSET(fcw)       , -1, -1, -1, -1 },
1153    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , AVX_OFFSET(fsw)       , -1, -1, -1, -1 },
1154    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , AVX_OFFSET(ftw)       , -1, -1, -1, -1 },
1155    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , AVX_OFFSET(fop)       , -1, -1, -1, -1 },
1156    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , AVX_OFFSET(ip)        , -1, -1, -1, -1 },
1157    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , AVX_OFFSET(cs)        , -1, -1, -1, -1 },
1158    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , AVX_OFFSET(dp)        , -1, -1, -1, -1 },
1159    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , AVX_OFFSET(ds)        , -1, -1, -1, -1 },
1160    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , AVX_OFFSET(mxcsr)     , -1, -1, -1, -1 },
1161    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1, -1, -1, -1 },
1162
1163    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 },
1164    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 },
1165    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 },
1166    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 },
1167    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 },
1168    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 },
1169    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 },
1170    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 },
1171
1172    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 },
1173    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 },
1174    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 },
1175    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 },
1176    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 },
1177    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 },
1178    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 },
1179    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 },
1180    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8  },
1181    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9  },
1182    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 },
1183    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 },
1184    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 },
1185    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 },
1186    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 },
1187    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 },
1188
1189    { e_regSetFPU, fpu_ymm0 , "ymm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0)   , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1, gdb_ymm0 },
1190    { e_regSetFPU, fpu_ymm1 , "ymm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1)   , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1, gdb_ymm1 },
1191    { e_regSetFPU, fpu_ymm2 , "ymm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2)   , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1, gdb_ymm2 },
1192    { e_regSetFPU, fpu_ymm3 , "ymm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3)   , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1, gdb_ymm3 },
1193    { e_regSetFPU, fpu_ymm4 , "ymm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4)   , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1, gdb_ymm4 },
1194    { e_regSetFPU, fpu_ymm5 , "ymm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5)   , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1, gdb_ymm5 },
1195    { e_regSetFPU, fpu_ymm6 , "ymm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6)   , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1, gdb_ymm6 },
1196    { e_regSetFPU, fpu_ymm7 , "ymm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7)   , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1, gdb_ymm7 },
1197    { e_regSetFPU, fpu_ymm8 , "ymm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8)   , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1, gdb_ymm8  },
1198    { e_regSetFPU, fpu_ymm9 , "ymm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9)   , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1, gdb_ymm9  },
1199    { e_regSetFPU, fpu_ymm10, "ymm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10)  , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1, gdb_ymm10 },
1200    { e_regSetFPU, fpu_ymm11, "ymm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11)  , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1, gdb_ymm11 },
1201    { e_regSetFPU, fpu_ymm12, "ymm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12)  , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1, gdb_ymm12 },
1202    { e_regSetFPU, fpu_ymm13, "ymm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13)  , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1, gdb_ymm13 },
1203    { e_regSetFPU, fpu_ymm14, "ymm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14)  , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1, gdb_ymm14 },
1204    { e_regSetFPU, fpu_ymm15, "ymm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15)  , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1, gdb_ymm15 }
1205};
1206
1207// Exception registers
1208
1209const DNBRegisterInfo
1210DNBArchImplX86_64::g_exc_registers[] =
1211{
1212    { e_regSetEXC, exc_trapno,      "trapno"    , NULL, Uint, Hex, EXC_SIZE (trapno)    , EXC_OFFSET (trapno)       , -1, -1, -1, -1 },
1213    { e_regSetEXC, exc_err,         "err"       , NULL, Uint, Hex, EXC_SIZE (err)       , EXC_OFFSET (err)          , -1, -1, -1, -1 },
1214    { e_regSetEXC, exc_faultvaddr,  "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr)   , -1, -1, -1, -1 }
1215};
1216
1217// Number of registers in each register set
1218const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
1219const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
1220const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
1221const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
1222const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
1223const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
1224
1225//----------------------------------------------------------------------
1226// Register set definitions. The first definitions at register set index
1227// of zero is for all registers, followed by other registers sets. The
1228// register information for the all register set need not be filled in.
1229//----------------------------------------------------------------------
1230const DNBRegisterSetInfo
1231DNBArchImplX86_64::g_reg_sets_no_avx[] =
1232{
1233    { "x86_64 Registers",           NULL,               k_num_all_registers_no_avx },
1234    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1235    { "Floating Point Registers",   g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
1236    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1237};
1238
1239const DNBRegisterSetInfo
1240DNBArchImplX86_64::g_reg_sets_avx[] =
1241{
1242    { "x86_64 Registers",           NULL,               k_num_all_registers_avx },
1243    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1244    { "Floating Point Registers",   g_fpu_registers_avx, k_num_fpu_registers_avx },
1245    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1246};
1247
1248// Total number of register sets for this architecture
1249const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo);
1250
1251
1252DNBArchProtocol *
1253DNBArchImplX86_64::Create (MachThread *thread)
1254{
1255    return new DNBArchImplX86_64 (thread);
1256}
1257
1258const uint8_t * const
1259DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size)
1260{
1261    static const uint8_t g_breakpoint_opcode[] = { 0xCC };
1262    if (byte_size == 1)
1263        return g_breakpoint_opcode;
1264    return NULL;
1265}
1266
1267const DNBRegisterSetInfo *
1268DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
1269{
1270    *num_reg_sets = k_num_register_sets;
1271
1272    if (CPUHasAVX() || FORCE_AVX_REGS)
1273        return g_reg_sets_avx;
1274    else
1275        return g_reg_sets_no_avx;
1276}
1277
1278void
1279DNBArchImplX86_64::Initialize()
1280{
1281    DNBArchPluginInfo arch_plugin_info =
1282    {
1283        CPU_TYPE_X86_64,
1284        DNBArchImplX86_64::Create,
1285        DNBArchImplX86_64::GetRegisterSetInfo,
1286        DNBArchImplX86_64::SoftwareBreakpointOpcode
1287    };
1288
1289    // Register this arch plug-in with the main protocol class
1290    DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
1291}
1292
1293bool
1294DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value)
1295{
1296    if (set == REGISTER_SET_GENERIC)
1297    {
1298        switch (reg)
1299        {
1300            case GENERIC_REGNUM_PC:     // Program Counter
1301                set = e_regSetGPR;
1302                reg = gpr_rip;
1303                break;
1304
1305            case GENERIC_REGNUM_SP:     // Stack Pointer
1306                set = e_regSetGPR;
1307                reg = gpr_rsp;
1308                break;
1309
1310            case GENERIC_REGNUM_FP:     // Frame Pointer
1311                set = e_regSetGPR;
1312                reg = gpr_rbp;
1313                break;
1314
1315            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1316                set = e_regSetGPR;
1317                reg = gpr_rflags;
1318                break;
1319
1320            case GENERIC_REGNUM_RA:     // Return Address
1321            default:
1322                return false;
1323        }
1324    }
1325
1326    if (GetRegisterState(set, false) != KERN_SUCCESS)
1327        return false;
1328
1329    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1330    if (regInfo)
1331    {
1332        value->info = *regInfo;
1333        switch (set)
1334        {
1335            case e_regSetGPR:
1336                if (reg < k_num_gpr_registers)
1337                {
1338                    value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg];
1339                    return true;
1340                }
1341                break;
1342
1343            case e_regSetFPU:
1344                if (CPUHasAVX() || FORCE_AVX_REGS)
1345                {
1346                    switch (reg)
1347                    {
1348                    case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw));    return true;
1349                    case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw));    return true;
1350                    case fpu_ftw:       value->value.uint8  = m_state.context.fpu.avx.__fpu_ftw;                      return true;
1351                    case fpu_fop:       value->value.uint16 = m_state.context.fpu.avx.__fpu_fop;                      return true;
1352                    case fpu_ip:        value->value.uint32 = m_state.context.fpu.avx.__fpu_ip;                       return true;
1353                    case fpu_cs:        value->value.uint16 = m_state.context.fpu.avx.__fpu_cs;                       return true;
1354                    case fpu_dp:        value->value.uint32 = m_state.context.fpu.avx.__fpu_dp;                       return true;
1355                    case fpu_ds:        value->value.uint16 = m_state.context.fpu.avx.__fpu_ds;                       return true;
1356                    case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr;                    return true;
1357                    case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask;                return true;
1358
1359                    case fpu_stmm0:
1360                    case fpu_stmm1:
1361                    case fpu_stmm2:
1362                    case fpu_stmm3:
1363                    case fpu_stmm4:
1364                    case fpu_stmm5:
1365                    case fpu_stmm6:
1366                    case fpu_stmm7:
1367                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1368                        return true;
1369
1370                    case fpu_xmm0:
1371                    case fpu_xmm1:
1372                    case fpu_xmm2:
1373                    case fpu_xmm3:
1374                    case fpu_xmm4:
1375                    case fpu_xmm5:
1376                    case fpu_xmm6:
1377                    case fpu_xmm7:
1378                    case fpu_xmm8:
1379                    case fpu_xmm9:
1380                    case fpu_xmm10:
1381                    case fpu_xmm11:
1382                    case fpu_xmm12:
1383                    case fpu_xmm13:
1384                    case fpu_xmm14:
1385                    case fpu_xmm15:
1386                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1387                        return true;
1388
1389                    case fpu_ymm0:
1390                    case fpu_ymm1:
1391                    case fpu_ymm2:
1392                    case fpu_ymm3:
1393                    case fpu_ymm4:
1394                    case fpu_ymm5:
1395                    case fpu_ymm6:
1396                    case fpu_ymm7:
1397                    case fpu_ymm8:
1398                    case fpu_ymm9:
1399                    case fpu_ymm10:
1400                    case fpu_ymm11:
1401                    case fpu_ymm12:
1402                    case fpu_ymm13:
1403                    case fpu_ymm14:
1404                    case fpu_ymm15:
1405                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
1406                        memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
1407                        return true;
1408                    }
1409                }
1410                else
1411                {
1412                    switch (reg)
1413                    {
1414                        case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw));    return true;
1415                        case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw));    return true;
1416                        case fpu_ftw:       value->value.uint8  = m_state.context.fpu.no_avx.__fpu_ftw;                      return true;
1417                        case fpu_fop:       value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop;                      return true;
1418                        case fpu_ip:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip;                       return true;
1419                        case fpu_cs:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs;                       return true;
1420                        case fpu_dp:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp;                       return true;
1421                        case fpu_ds:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds;                       return true;
1422                        case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr;                    return true;
1423                        case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask;                return true;
1424
1425                        case fpu_stmm0:
1426                        case fpu_stmm1:
1427                        case fpu_stmm2:
1428                        case fpu_stmm3:
1429                        case fpu_stmm4:
1430                        case fpu_stmm5:
1431                        case fpu_stmm6:
1432                        case fpu_stmm7:
1433                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1434                            return true;
1435
1436                        case fpu_xmm0:
1437                        case fpu_xmm1:
1438                        case fpu_xmm2:
1439                        case fpu_xmm3:
1440                        case fpu_xmm4:
1441                        case fpu_xmm5:
1442                        case fpu_xmm6:
1443                        case fpu_xmm7:
1444                        case fpu_xmm8:
1445                        case fpu_xmm9:
1446                        case fpu_xmm10:
1447                        case fpu_xmm11:
1448                        case fpu_xmm12:
1449                        case fpu_xmm13:
1450                        case fpu_xmm14:
1451                        case fpu_xmm15:
1452                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1453                            return true;
1454                    }
1455                }
1456                break;
1457
1458            case e_regSetEXC:
1459                switch (reg)
1460                {
1461                case exc_trapno:    value->value.uint32 = m_state.context.exc.__trapno; return true;
1462                case exc_err:       value->value.uint32 = m_state.context.exc.__err; return true;
1463                case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true;
1464                }
1465                break;
1466        }
1467    }
1468    return false;
1469}
1470
1471
1472bool
1473DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value)
1474{
1475    if (set == REGISTER_SET_GENERIC)
1476    {
1477        switch (reg)
1478        {
1479            case GENERIC_REGNUM_PC:     // Program Counter
1480                set = e_regSetGPR;
1481                reg = gpr_rip;
1482                break;
1483
1484            case GENERIC_REGNUM_SP:     // Stack Pointer
1485                set = e_regSetGPR;
1486                reg = gpr_rsp;
1487                break;
1488
1489            case GENERIC_REGNUM_FP:     // Frame Pointer
1490                set = e_regSetGPR;
1491                reg = gpr_rbp;
1492                break;
1493
1494            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1495                set = e_regSetGPR;
1496                reg = gpr_rflags;
1497                break;
1498
1499            case GENERIC_REGNUM_RA:     // Return Address
1500            default:
1501                return false;
1502        }
1503    }
1504
1505    if (GetRegisterState(set, false) != KERN_SUCCESS)
1506        return false;
1507
1508    bool success = false;
1509    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1510    if (regInfo)
1511    {
1512        switch (set)
1513        {
1514            case e_regSetGPR:
1515                if (reg < k_num_gpr_registers)
1516                {
1517                    ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64;
1518                    success = true;
1519                }
1520                break;
1521
1522            case e_regSetFPU:
1523                if (CPUHasAVX() || FORCE_AVX_REGS)
1524                {
1525                    switch (reg)
1526                    {
1527                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1528                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1529                    case fpu_ftw:       m_state.context.fpu.avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1530                    case fpu_fop:       m_state.context.fpu.avx.__fpu_fop = value->value.uint16;                      success = true; break;
1531                    case fpu_ip:        m_state.context.fpu.avx.__fpu_ip = value->value.uint32;                       success = true; break;
1532                    case fpu_cs:        m_state.context.fpu.avx.__fpu_cs = value->value.uint16;                       success = true; break;
1533                    case fpu_dp:        m_state.context.fpu.avx.__fpu_dp = value->value.uint32;                       success = true; break;
1534                    case fpu_ds:        m_state.context.fpu.avx.__fpu_ds = value->value.uint16;                       success = true; break;
1535                    case fpu_mxcsr:     m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1536                    case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1537
1538                    case fpu_stmm0:
1539                    case fpu_stmm1:
1540                    case fpu_stmm2:
1541                    case fpu_stmm3:
1542                    case fpu_stmm4:
1543                    case fpu_stmm5:
1544                    case fpu_stmm6:
1545                    case fpu_stmm7:
1546                        memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1547                        success = true;
1548                        break;
1549
1550                    case fpu_xmm0:
1551                    case fpu_xmm1:
1552                    case fpu_xmm2:
1553                    case fpu_xmm3:
1554                    case fpu_xmm4:
1555                    case fpu_xmm5:
1556                    case fpu_xmm6:
1557                    case fpu_xmm7:
1558                    case fpu_xmm8:
1559                    case fpu_xmm9:
1560                    case fpu_xmm10:
1561                    case fpu_xmm11:
1562                    case fpu_xmm12:
1563                    case fpu_xmm13:
1564                    case fpu_xmm14:
1565                    case fpu_xmm15:
1566                        memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1567                        success = true;
1568                        break;
1569
1570                    case fpu_ymm0:
1571                    case fpu_ymm1:
1572                    case fpu_ymm2:
1573                    case fpu_ymm3:
1574                    case fpu_ymm4:
1575                    case fpu_ymm5:
1576                    case fpu_ymm6:
1577                    case fpu_ymm7:
1578                    case fpu_ymm8:
1579                    case fpu_ymm9:
1580                    case fpu_ymm10:
1581                    case fpu_ymm11:
1582                    case fpu_ymm12:
1583                    case fpu_ymm13:
1584                    case fpu_ymm14:
1585                    case fpu_ymm15:
1586                        memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16);
1587                        memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16);
1588                        return true;
1589                    }
1590                }
1591                else
1592                {
1593                    switch (reg)
1594                    {
1595                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1596                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1597                    case fpu_ftw:       m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1598                    case fpu_fop:       m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16;                      success = true; break;
1599                    case fpu_ip:        m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32;                       success = true; break;
1600                    case fpu_cs:        m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16;                       success = true; break;
1601                    case fpu_dp:        m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32;                       success = true; break;
1602                    case fpu_ds:        m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16;                       success = true; break;
1603                    case fpu_mxcsr:     m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1604                    case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1605
1606                    case fpu_stmm0:
1607                    case fpu_stmm1:
1608                    case fpu_stmm2:
1609                    case fpu_stmm3:
1610                    case fpu_stmm4:
1611                    case fpu_stmm5:
1612                    case fpu_stmm6:
1613                    case fpu_stmm7:
1614                        memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1615                        success = true;
1616                        break;
1617
1618                    case fpu_xmm0:
1619                    case fpu_xmm1:
1620                    case fpu_xmm2:
1621                    case fpu_xmm3:
1622                    case fpu_xmm4:
1623                    case fpu_xmm5:
1624                    case fpu_xmm6:
1625                    case fpu_xmm7:
1626                    case fpu_xmm8:
1627                    case fpu_xmm9:
1628                    case fpu_xmm10:
1629                    case fpu_xmm11:
1630                    case fpu_xmm12:
1631                    case fpu_xmm13:
1632                    case fpu_xmm14:
1633                    case fpu_xmm15:
1634                        memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1635                        success = true;
1636                        break;
1637                    }
1638                }
1639                break;
1640
1641            case e_regSetEXC:
1642                switch (reg)
1643            {
1644                case exc_trapno:    m_state.context.exc.__trapno = value->value.uint32;     success = true; break;
1645                case exc_err:       m_state.context.exc.__err = value->value.uint32;        success = true; break;
1646                case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break;
1647            }
1648                break;
1649        }
1650    }
1651
1652    if (success)
1653        return SetRegisterState(set) == KERN_SUCCESS;
1654    return false;
1655}
1656
1657
1658nub_size_t
1659DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len)
1660{
1661    nub_size_t size = sizeof (m_state.context);
1662
1663    if (buf && buf_len)
1664    {
1665        if (size > buf_len)
1666            size = buf_len;
1667
1668        bool force = false;
1669        if (GetGPRState(force) | GetFPUState(force) | GetEXCState(force))
1670            return 0;
1671        ::memcpy (buf, &m_state.context, size);
1672    }
1673    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1674    // Return the size of the register context even if NULL was passed in
1675    return size;
1676}
1677
1678nub_size_t
1679DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len)
1680{
1681    nub_size_t size = sizeof (m_state.context);
1682    if (buf == NULL || buf_len == 0)
1683        size = 0;
1684
1685    if (size)
1686    {
1687        if (size > buf_len)
1688            size = buf_len;
1689
1690        ::memcpy (&m_state.context, buf, size);
1691        SetGPRState();
1692        SetFPUState();
1693        SetEXCState();
1694    }
1695    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1696    return size;
1697}
1698
1699
1700kern_return_t
1701DNBArchImplX86_64::GetRegisterState(int set, bool force)
1702{
1703    switch (set)
1704    {
1705        case e_regSetALL:    return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
1706        case e_regSetGPR:    return GetGPRState(force);
1707        case e_regSetFPU:    return GetFPUState(force);
1708        case e_regSetEXC:    return GetEXCState(force);
1709        default: break;
1710    }
1711    return KERN_INVALID_ARGUMENT;
1712}
1713
1714kern_return_t
1715DNBArchImplX86_64::SetRegisterState(int set)
1716{
1717    // Make sure we have a valid context to set.
1718    if (RegisterSetStateIsValid(set))
1719    {
1720        switch (set)
1721        {
1722            case e_regSetALL:    return SetGPRState() | SetFPUState() | SetEXCState();
1723            case e_regSetGPR:    return SetGPRState();
1724            case e_regSetFPU:    return SetFPUState();
1725            case e_regSetEXC:    return SetEXCState();
1726            default: break;
1727        }
1728    }
1729    return KERN_INVALID_ARGUMENT;
1730}
1731
1732bool
1733DNBArchImplX86_64::RegisterSetStateIsValid (int set) const
1734{
1735    return m_state.RegsAreValid(set);
1736}
1737
1738
1739
1740#endif    // #if defined (__i386__) || defined (__x86_64__)
1741