DNBArchImplX86_64.cpp revision d1119941f77f56d9660d1666c47df8f24e4408e2
1//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  Created by Greg Clayton on 6/25/07.
11//
12//===----------------------------------------------------------------------===//
13
14#if defined (__i386__) || defined (__x86_64__)
15
16#include <sys/cdefs.h>
17
18#include "MacOSX/x86_64/DNBArchImplX86_64.h"
19#include "DNBLog.h"
20#include "MachThread.h"
21#include "MachProcess.h"
22#include <mach/mach.h>
23#include <stdlib.h>
24
25#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
26enum debugState {
27    debugStateUnknown,
28    debugStateOff,
29    debugStateOn
30};
31
32static debugState sFPUDebugState = debugStateUnknown;
33static debugState sAVXForceState = debugStateUnknown;
34
35static bool DebugFPURegs ()
36{
37    if (sFPUDebugState == debugStateUnknown)
38    {
39        if (getenv("DNB_DEBUG_FPU_REGS"))
40            sFPUDebugState = debugStateOn;
41        else
42            sFPUDebugState = debugStateOff;
43    }
44
45    return (sFPUDebugState == debugStateOn);
46}
47
48static bool ForceAVXRegs ()
49{
50    if (sFPUDebugState == debugStateUnknown)
51    {
52        if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
53            sAVXForceState = debugStateOn;
54        else
55            sAVXForceState = debugStateOff;
56    }
57
58    return (sAVXForceState == debugStateOn);
59}
60
61#define DEBUG_FPU_REGS (DebugFPURegs())
62#define FORCE_AVX_REGS (ForceAVXRegs())
63#else
64#define DEBUG_FPU_REGS (0)
65#define FORCE_AVX_REGS (0)
66#endif
67
68enum DNBArchImplX86_64::AVXPresence DNBArchImplX86_64::s_has_avx = DNBArchImplX86_64::kAVXUnknown;
69
70uint64_t
71DNBArchImplX86_64::GetPC(uint64_t failValue)
72{
73    // Get program counter
74    if (GetGPRState(false) == KERN_SUCCESS)
75        return m_state.context.gpr.__rip;
76    return failValue;
77}
78
79kern_return_t
80DNBArchImplX86_64::SetPC(uint64_t value)
81{
82    // Get program counter
83    kern_return_t err = GetGPRState(false);
84    if (err == KERN_SUCCESS)
85    {
86        m_state.context.gpr.__rip = value;
87        err = SetGPRState();
88    }
89    return err == KERN_SUCCESS;
90}
91
92uint64_t
93DNBArchImplX86_64::GetSP(uint64_t failValue)
94{
95    // Get stack pointer
96    if (GetGPRState(false) == KERN_SUCCESS)
97        return m_state.context.gpr.__rsp;
98    return failValue;
99}
100
101// Uncomment the value below to verify the values in the debugger.
102//#define DEBUG_GPR_VALUES 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
103
104kern_return_t
105DNBArchImplX86_64::GetGPRState(bool force)
106{
107    if (force || m_state.GetError(e_regSetGPR, Read))
108    {
109        kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
110        DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (GetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
111
112#if DEBUG_GPR_VALUES
113        m_state.context.gpr.__rax = ('a' << 8) + 'x';
114        m_state.context.gpr.__rbx = ('b' << 8) + 'x';
115        m_state.context.gpr.__rcx = ('c' << 8) + 'x';
116        m_state.context.gpr.__rdx = ('d' << 8) + 'x';
117        m_state.context.gpr.__rdi = ('d' << 8) + 'i';
118        m_state.context.gpr.__rsi = ('s' << 8) + 'i';
119        m_state.context.gpr.__rbp = ('b' << 8) + 'p';
120        m_state.context.gpr.__rsp = ('s' << 8) + 'p';
121        m_state.context.gpr.__r8  = ('r' << 8) + '8';
122        m_state.context.gpr.__r9  = ('r' << 8) + '9';
123        m_state.context.gpr.__r10 = ('r' << 8) + 'a';
124        m_state.context.gpr.__r11 = ('r' << 8) + 'b';
125        m_state.context.gpr.__r12 = ('r' << 8) + 'c';
126        m_state.context.gpr.__r13 = ('r' << 8) + 'd';
127        m_state.context.gpr.__r14 = ('r' << 8) + 'e';
128        m_state.context.gpr.__r15 = ('r' << 8) + 'f';
129        m_state.context.gpr.__rip = ('i' << 8) + 'p';
130        m_state.context.gpr.__rflags = ('f' << 8) + 'l';
131        m_state.context.gpr.__cs = ('c' << 8) + 's';
132        m_state.context.gpr.__fs = ('f' << 8) + 's';
133        m_state.context.gpr.__gs = ('g' << 8) + 's';
134        m_state.SetError(e_regSetGPR, Read, 0);
135#else
136        mach_msg_type_number_t count = e_regSetWordSizeGPR;
137        m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
138        DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
139                          "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
140                          "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
141                          "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
142                          "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
143                          "\n\trip = %16.16llx"
144                          "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
145                          m_thread->ThreadID(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT,
146                          m_state.GetError(e_regSetGPR, Read),
147                          m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
148                          m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
149                          m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
150                          m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
151                          m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
152                          m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
153                          m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs);
154
155        //      DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
156        //                        "\n\trax = %16.16llx"
157        //                        "\n\trbx = %16.16llx"
158        //                        "\n\trcx = %16.16llx"
159        //                        "\n\trdx = %16.16llx"
160        //                        "\n\trdi = %16.16llx"
161        //                        "\n\trsi = %16.16llx"
162        //                        "\n\trbp = %16.16llx"
163        //                        "\n\trsp = %16.16llx"
164        //                        "\n\t r8 = %16.16llx"
165        //                        "\n\t r9 = %16.16llx"
166        //                        "\n\tr10 = %16.16llx"
167        //                        "\n\tr11 = %16.16llx"
168        //                        "\n\tr12 = %16.16llx"
169        //                        "\n\tr13 = %16.16llx"
170        //                        "\n\tr14 = %16.16llx"
171        //                        "\n\tr15 = %16.16llx"
172        //                        "\n\trip = %16.16llx"
173        //                        "\n\tflg = %16.16llx"
174        //                        "\n\t cs = %16.16llx"
175        //                        "\n\t fs = %16.16llx"
176        //                        "\n\t gs = %16.16llx",
177        //                        m_thread->ThreadID(),
178        //                        x86_THREAD_STATE64,
179        //                        x86_THREAD_STATE64_COUNT,
180        //                        m_state.GetError(e_regSetGPR, Read),
181        //                        m_state.context.gpr.__rax,
182        //                        m_state.context.gpr.__rbx,
183        //                        m_state.context.gpr.__rcx,
184        //                        m_state.context.gpr.__rdx,
185        //                        m_state.context.gpr.__rdi,
186        //                        m_state.context.gpr.__rsi,
187        //                        m_state.context.gpr.__rbp,
188        //                        m_state.context.gpr.__rsp,
189        //                        m_state.context.gpr.__r8,
190        //                        m_state.context.gpr.__r9,
191        //                        m_state.context.gpr.__r10,
192        //                        m_state.context.gpr.__r11,
193        //                        m_state.context.gpr.__r12,
194        //                        m_state.context.gpr.__r13,
195        //                        m_state.context.gpr.__r14,
196        //                        m_state.context.gpr.__r15,
197        //                        m_state.context.gpr.__rip,
198        //                        m_state.context.gpr.__rflags,
199        //                        m_state.context.gpr.__cs,
200        //                        m_state.context.gpr.__fs,
201        //                        m_state.context.gpr.__gs);
202#endif
203    }
204    return m_state.GetError(e_regSetGPR, Read);
205}
206
207// Uncomment the value below to verify the values in the debugger.
208//#define DEBUG_FPU_REGS 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
209
210kern_return_t
211DNBArchImplX86_64::GetFPUState(bool force)
212{
213    if (force || m_state.GetError(e_regSetFPU, Read))
214    {
215        if (DEBUG_FPU_REGS) {
216            if (CPUHasAVX() || FORCE_AVX_REGS)
217            {
218                m_state.context.fpu.avx.__fpu_reserved[0] = -1;
219                m_state.context.fpu.avx.__fpu_reserved[1] = -1;
220                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
221                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
222                m_state.context.fpu.avx.__fpu_ftw = 1;
223                m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
224                m_state.context.fpu.avx.__fpu_fop = 2;
225                m_state.context.fpu.avx.__fpu_ip = 3;
226                m_state.context.fpu.avx.__fpu_cs = 4;
227                m_state.context.fpu.avx.__fpu_rsrv2 = 5;
228                m_state.context.fpu.avx.__fpu_dp = 6;
229                m_state.context.fpu.avx.__fpu_ds = 7;
230                m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
231                m_state.context.fpu.avx.__fpu_mxcsr = 8;
232                m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
233                int i;
234                for (i=0; i<16; ++i)
235                {
236                    if (i<10)
237                    {
238                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
239                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
240                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
241                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
242                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
243                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
244                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
245                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
246                    }
247                    else
248                    {
249                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
250                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
251                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
252                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
253                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
254                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
255                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
256                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
257                    }
258
259                    m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
260                    m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
261                    m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
262                    m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
263                    m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
264                    m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
265                    m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
266                    m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
267                    m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8';
268                    m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9';
269                    m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A';
270                    m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B';
271                    m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C';
272                    m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D';
273                    m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E';
274                    m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F';
275
276                    m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
277                    m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
278                    m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
279                    m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
280                    m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
281                    m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
282                    m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
283                    m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
284                    m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8';
285                    m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9';
286                    m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A';
287                    m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B';
288                    m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C';
289                    m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D';
290                    m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E';
291                    m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F';
292                }
293                for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
294                    m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
295                m_state.context.fpu.avx.__fpu_reserved1 = -1;
296                for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
297                    m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
298                m_state.SetError(e_regSetFPU, Read, 0);
299            }
300            else
301            {
302                m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
303                m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
304                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
305                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
306                m_state.context.fpu.no_avx.__fpu_ftw = 1;
307                m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
308                m_state.context.fpu.no_avx.__fpu_fop = 2;
309                m_state.context.fpu.no_avx.__fpu_ip = 3;
310                m_state.context.fpu.no_avx.__fpu_cs = 4;
311                m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
312                m_state.context.fpu.no_avx.__fpu_dp = 6;
313                m_state.context.fpu.no_avx.__fpu_ds = 7;
314                m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
315                m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
316                m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
317                int i;
318                for (i=0; i<16; ++i)
319                {
320                    if (i<10)
321                    {
322                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
323                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
324                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
325                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
326                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
327                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
328                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
329                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
330                    }
331                    else
332                    {
333                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
334                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
335                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
336                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
337                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
338                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
339                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
340                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
341                    }
342
343                    m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
344                    m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
345                    m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
346                    m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
347                    m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
348                    m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
349                    m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
350                    m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
351                    m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
352                    m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
353                    m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
354                    m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
355                    m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
356                    m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
357                    m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
358                    m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
359                }
360                for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
361                    m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
362                m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
363                m_state.SetError(e_regSetFPU, Read, 0);
364            }
365        }
366        else
367        {
368            if (CPUHasAVX() || FORCE_AVX_REGS)
369            {
370                mach_msg_type_number_t count = e_regSetWordSizeAVX;
371                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
372            }
373            else
374            {
375                mach_msg_type_number_t count = e_regSetWordSizeFPR;
376                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
377            }
378        }
379    }
380    return m_state.GetError(e_regSetFPU, Read);
381}
382
383kern_return_t
384DNBArchImplX86_64::GetEXCState(bool force)
385{
386    if (force || m_state.GetError(e_regSetEXC, Read))
387    {
388        mach_msg_type_number_t count = e_regSetWordSizeEXC;
389        m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
390    }
391    return m_state.GetError(e_regSetEXC, Read);
392}
393
394kern_return_t
395DNBArchImplX86_64::SetGPRState()
396{
397    kern_return_t kret = ::thread_abort_safely(m_thread->ThreadID());
398    DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->ThreadID(), kret, m_thread->Process()->StopCount());
399
400    m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
401    DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
402                      "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
403                      "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
404                      "\n\t r8 = %16.16llx  r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
405                      "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
406                      "\n\trip = %16.16llx"
407                      "\n\tflg = %16.16llx  cs = %16.16llx  fs = %16.16llx  gs = %16.16llx",
408                      m_thread->ThreadID(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR,
409                      m_state.GetError(e_regSetGPR, Write),
410                      m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
411                      m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
412                      m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
413                      m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
414                      m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
415                      m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
416                      m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs);
417    return m_state.GetError(e_regSetGPR, Write);
418}
419
420kern_return_t
421DNBArchImplX86_64::SetFPUState()
422{
423    if (DEBUG_FPU_REGS)
424    {
425        m_state.SetError(e_regSetFPU, Write, 0);
426        return m_state.GetError(e_regSetFPU, Write);
427    }
428    else
429    {
430        if (CPUHasAVX() || FORCE_AVX_REGS)
431        {
432            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
433            return m_state.GetError(e_regSetFPU, Write);
434        }
435        else
436        {
437            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPR));
438            return m_state.GetError(e_regSetFPU, Write);
439        }
440    }
441}
442
443kern_return_t
444DNBArchImplX86_64::SetEXCState()
445{
446    m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
447    return m_state.GetError(e_regSetEXC, Write);
448}
449
450kern_return_t
451DNBArchImplX86_64::GetDBGState(bool force)
452{
453    if (force || m_state.GetError(e_regSetDBG, Read))
454    {
455        mach_msg_type_number_t count = e_regSetWordSizeDBG;
456        m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
457    }
458    return m_state.GetError(e_regSetDBG, Read);
459}
460
461kern_return_t
462DNBArchImplX86_64::SetDBGState()
463{
464    m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
465    return m_state.GetError(e_regSetDBG, Write);
466}
467
468void
469DNBArchImplX86_64::ThreadWillResume()
470{
471    // Do we need to step this thread? If so, let the mach thread tell us so.
472    if (m_thread->IsStepping())
473    {
474        // This is the primary thread, let the arch do anything it needs
475        EnableHardwareSingleStep(true);
476    }
477}
478
479bool
480DNBArchImplX86_64::ThreadDidStop()
481{
482    bool success = true;
483
484    m_state.InvalidateAllRegisterStates();
485
486    // Are we stepping a single instruction?
487    if (GetGPRState(true) == KERN_SUCCESS)
488    {
489        // We are single stepping, was this the primary thread?
490        if (m_thread->IsStepping())
491        {
492            // This was the primary thread, we need to clear the trace
493            // bit if so.
494            success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
495        }
496        else
497        {
498            // The MachThread will automatically restore the suspend count
499            // in ThreadDidStop(), so we don't need to do anything here if
500            // we weren't the primary thread the last time
501        }
502    }
503    return success;
504}
505
506bool
507DNBArchImplX86_64::NotifyException(MachException::Data& exc)
508{
509    switch (exc.exc_type)
510    {
511        case EXC_BAD_ACCESS:
512            break;
513        case EXC_BAD_INSTRUCTION:
514            break;
515        case EXC_ARITHMETIC:
516            break;
517        case EXC_EMULATION:
518            break;
519        case EXC_SOFTWARE:
520            break;
521        case EXC_BREAKPOINT:
522            if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
523            {
524                nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
525                if (pc != INVALID_NUB_ADDRESS && pc > 0)
526                {
527                    pc -= 1;
528                    // Check for a breakpoint at one byte prior to the current PC value
529                    // since the PC will be just past the trap.
530
531                    nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc);
532                    if (NUB_BREAK_ID_IS_VALID(breakID))
533                    {
534                        // Backup the PC for i386 since the trap was taken and the PC
535                        // is at the address following the single byte trap instruction.
536                        if (m_state.context.gpr.__rip > 0)
537                        {
538                            m_state.context.gpr.__rip = pc;
539                            // Write the new PC back out
540                            SetGPRState ();
541                        }
542                    }
543                    return true;
544                }
545            }
546            break;
547        case EXC_SYSCALL:
548            break;
549        case EXC_MACH_SYSCALL:
550            break;
551        case EXC_RPC_ALERT:
552            break;
553    }
554    return false;
555}
556
557uint32_t
558DNBArchImplX86_64::NumSupportedHardwareWatchpoints()
559{
560    // Available debug address registers: dr0, dr1, dr2, dr3.
561    return 4;
562}
563
564static uint32_t
565size_and_rw_bits(nub_size_t size, bool read, bool write)
566{
567    uint32_t rw;
568    if (read) {
569        rw = 0x3; // READ or READ/WRITE
570    } else if (write) {
571        rw = 0x1; // WRITE
572    } else {
573        assert(0 && "read and write cannot both be false");
574    }
575
576    switch (size) {
577    case 1:
578        return rw;
579    case 2:
580        return (0x1 << 2) | rw;
581    case 4:
582        return (0x3 << 2) | rw;
583    case 8:
584        return (0x2 << 2) | rw;
585    default:
586        assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
587    }
588}
589void
590DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
591{
592    // Set both dr7 (debug control register) and dri (debug address register).
593
594    // dr7{7-0} encodes the local/gloabl enable bits:
595    //  global enable --. .-- local enable
596    //                  | |
597    //                  v v
598    //      dr0 -> bits{1-0}
599    //      dr1 -> bits{3-2}
600    //      dr2 -> bits{5-4}
601    //      dr3 -> bits{7-6}
602    //
603    // dr7{31-16} encodes the rw/len bits:
604    //  b_x+3, b_x+2, b_x+1, b_x
605    //      where bits{x+1, x} => rw
606    //            0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
607    //      and bits{x+3, x+2} => len
608    //            0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
609    //
610    //      dr0 -> bits{19-16}
611    //      dr1 -> bits{23-20}
612    //      dr2 -> bits{27-24}
613    //      dr3 -> bits{31-28}
614    debug_state.__dr7 |= (1 << (2*hw_index) |
615                          size_and_rw_bits(size, read, write) << (16+4*hw_index));
616    switch (hw_index) {
617    case 0:
618        debug_state.__dr0 == addr; break;
619    case 1:
620        debug_state.__dr1 == addr; break;
621    case 2:
622        debug_state.__dr2 == addr; break;
623    case 3:
624        debug_state.__dr3 == addr; break;
625    default:
626        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
627    }
628    return;
629}
630
631void
632DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
633{
634    debug_state.__dr7 &= ~(3 << (2*hw_index));
635    switch (hw_index) {
636    case 0:
637        debug_state.__dr0 == 0; break;
638    case 1:
639        debug_state.__dr1 == 0; break;
640    case 2:
641        debug_state.__dr2 == 0; break;
642    case 3:
643        debug_state.__dr3 == 0; break;
644    default:
645        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
646    }
647    return;
648}
649
650bool
651DNBArchImplX86_64::IsVacantWatchpoint(const DBG &debug_state, uint32_t hw_index)
652{
653    // Check dr7 (debug control register) for local/global enable bits:
654    //  global enable --. .-- local enable
655    //                  | |
656    //                  v v
657    //      dr0 -> bits{1-0}
658    //      dr1 -> bits{3-2}
659    //      dr2 -> bits{5-4}
660    //      dr3 -> bits{7-6}
661    return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
662}
663
664uint32_t
665DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write)
666{
667    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = %8.8p, size = %u, read = %u, write = %u)", addr, size, read, write);
668
669    const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
670
671    // Can only watch 1, 2, 4, or 8 bytes.
672    if (!(size == 1 || size == 2 || size == 4 || size == 8))
673        return INVALID_NUB_HW_INDEX;
674
675    // We must watch for either read or write
676    if (read == false && write == false)
677        return INVALID_NUB_HW_INDEX;
678
679    // Read the debug state
680    kern_return_t kret = GetDBGState(false);
681
682    if (kret == KERN_SUCCESS)
683    {
684        // Check to make sure we have the needed hardware support
685        uint32_t i = 0;
686
687        DBG debug_state = m_state.context.dbg;
688        for (i = 0; i < num_hw_watchpoints; ++i)
689        {
690            if (IsVacantWatchpoint(debug_state, i))
691                break;
692        }
693
694        // See if we found an available hw breakpoint slot above
695        if (i < num_hw_watchpoints)
696        {
697            // Modify our local copy of the debug state, first.
698            SetWatchpoint(debug_state, i, addr, size, read, write);
699            // Now set the watch point in the inferior.
700            kret = SetDBGState();
701            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
702
703            if (kret == KERN_SUCCESS)
704                return i;
705        }
706        else
707        {
708            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
709        }
710    }
711    return INVALID_NUB_HW_INDEX;
712}
713
714bool
715DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index)
716{
717    kern_return_t kret = GetDBGState(false);
718
719    const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
720    if (kret == KERN_SUCCESS)
721    {
722        DBG debug_state = m_state.context.dbg;
723        if (hw_index < num_hw_points && !IsVacantWatchpoint(debug_state, hw_index))
724        {
725            // Modify our local copy of the debug state, first.
726            ClearWatchpoint(debug_state, hw_index);
727            // Now disable the watch point in the inferior.
728            kret = SetDBGState();
729            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
730                             hw_index);
731
732            if (kret == KERN_SUCCESS)
733                return true;
734        }
735    }
736    return false;
737}
738
739// Set the single step bit in the processor status register.
740kern_return_t
741DNBArchImplX86_64::EnableHardwareSingleStep (bool enable)
742{
743    if (GetGPRState(false) == KERN_SUCCESS)
744    {
745        const uint32_t trace_bit = 0x100u;
746        if (enable)
747            m_state.context.gpr.__rflags |= trace_bit;
748        else
749            m_state.context.gpr.__rflags &= ~trace_bit;
750        return SetGPRState();
751    }
752    return m_state.GetError(e_regSetGPR, Read);
753}
754
755
756//----------------------------------------------------------------------
757// Register information defintions
758//----------------------------------------------------------------------
759
760enum
761{
762    gpr_rax = 0,
763    gpr_rbx,
764    gpr_rcx,
765    gpr_rdx,
766    gpr_rdi,
767    gpr_rsi,
768    gpr_rbp,
769    gpr_rsp,
770    gpr_r8,
771    gpr_r9,
772    gpr_r10,
773    gpr_r11,
774    gpr_r12,
775    gpr_r13,
776    gpr_r14,
777    gpr_r15,
778    gpr_rip,
779    gpr_rflags,
780    gpr_cs,
781    gpr_fs,
782    gpr_gs,
783    k_num_gpr_regs
784};
785
786enum {
787    fpu_fcw,
788    fpu_fsw,
789    fpu_ftw,
790    fpu_fop,
791    fpu_ip,
792    fpu_cs,
793    fpu_dp,
794    fpu_ds,
795    fpu_mxcsr,
796    fpu_mxcsrmask,
797    fpu_stmm0,
798    fpu_stmm1,
799    fpu_stmm2,
800    fpu_stmm3,
801    fpu_stmm4,
802    fpu_stmm5,
803    fpu_stmm6,
804    fpu_stmm7,
805    fpu_xmm0,
806    fpu_xmm1,
807    fpu_xmm2,
808    fpu_xmm3,
809    fpu_xmm4,
810    fpu_xmm5,
811    fpu_xmm6,
812    fpu_xmm7,
813    fpu_xmm8,
814    fpu_xmm9,
815    fpu_xmm10,
816    fpu_xmm11,
817    fpu_xmm12,
818    fpu_xmm13,
819    fpu_xmm14,
820    fpu_xmm15,
821    fpu_ymm0,
822    fpu_ymm1,
823    fpu_ymm2,
824    fpu_ymm3,
825    fpu_ymm4,
826    fpu_ymm5,
827    fpu_ymm6,
828    fpu_ymm7,
829    fpu_ymm8,
830    fpu_ymm9,
831    fpu_ymm10,
832    fpu_ymm11,
833    fpu_ymm12,
834    fpu_ymm13,
835    fpu_ymm14,
836    fpu_ymm15,
837    k_num_fpu_regs,
838
839    // Aliases
840    fpu_fctrl = fpu_fcw,
841    fpu_fstat = fpu_fsw,
842    fpu_ftag  = fpu_ftw,
843    fpu_fiseg = fpu_cs,
844    fpu_fioff = fpu_ip,
845    fpu_foseg = fpu_ds,
846    fpu_fooff = fpu_dp
847};
848
849enum {
850    exc_trapno,
851    exc_err,
852    exc_faultvaddr,
853    k_num_exc_regs,
854};
855
856
857enum gcc_dwarf_regnums
858{
859    gcc_dwarf_rax = 0,
860    gcc_dwarf_rdx = 1,
861    gcc_dwarf_rcx = 2,
862    gcc_dwarf_rbx = 3,
863    gcc_dwarf_rsi = 4,
864    gcc_dwarf_rdi = 5,
865    gcc_dwarf_rbp = 6,
866    gcc_dwarf_rsp = 7,
867    gcc_dwarf_r8,
868    gcc_dwarf_r9,
869    gcc_dwarf_r10,
870    gcc_dwarf_r11,
871    gcc_dwarf_r12,
872    gcc_dwarf_r13,
873    gcc_dwarf_r14,
874    gcc_dwarf_r15,
875    gcc_dwarf_rip,
876    gcc_dwarf_xmm0,
877    gcc_dwarf_xmm1,
878    gcc_dwarf_xmm2,
879    gcc_dwarf_xmm3,
880    gcc_dwarf_xmm4,
881    gcc_dwarf_xmm5,
882    gcc_dwarf_xmm6,
883    gcc_dwarf_xmm7,
884    gcc_dwarf_xmm8,
885    gcc_dwarf_xmm9,
886    gcc_dwarf_xmm10,
887    gcc_dwarf_xmm11,
888    gcc_dwarf_xmm12,
889    gcc_dwarf_xmm13,
890    gcc_dwarf_xmm14,
891    gcc_dwarf_xmm15,
892    gcc_dwarf_stmm0,
893    gcc_dwarf_stmm1,
894    gcc_dwarf_stmm2,
895    gcc_dwarf_stmm3,
896    gcc_dwarf_stmm4,
897    gcc_dwarf_stmm5,
898    gcc_dwarf_stmm6,
899    gcc_dwarf_stmm7,
900    gcc_dwarf_ymm0 = gcc_dwarf_xmm0,
901    gcc_dwarf_ymm1 = gcc_dwarf_xmm1,
902    gcc_dwarf_ymm2 = gcc_dwarf_xmm2,
903    gcc_dwarf_ymm3 = gcc_dwarf_xmm3,
904    gcc_dwarf_ymm4 = gcc_dwarf_xmm4,
905    gcc_dwarf_ymm5 = gcc_dwarf_xmm5,
906    gcc_dwarf_ymm6 = gcc_dwarf_xmm6,
907    gcc_dwarf_ymm7 = gcc_dwarf_xmm7,
908    gcc_dwarf_ymm8 = gcc_dwarf_xmm8,
909    gcc_dwarf_ymm9 = gcc_dwarf_xmm9,
910    gcc_dwarf_ymm10 = gcc_dwarf_xmm10,
911    gcc_dwarf_ymm11 = gcc_dwarf_xmm11,
912    gcc_dwarf_ymm12 = gcc_dwarf_xmm12,
913    gcc_dwarf_ymm13 = gcc_dwarf_xmm13,
914    gcc_dwarf_ymm14 = gcc_dwarf_xmm14,
915    gcc_dwarf_ymm15 = gcc_dwarf_xmm15
916};
917
918enum gdb_regnums
919{
920    gdb_rax     =   0,
921    gdb_rbx     =   1,
922    gdb_rcx     =   2,
923    gdb_rdx     =   3,
924    gdb_rsi     =   4,
925    gdb_rdi     =   5,
926    gdb_rbp     =   6,
927    gdb_rsp     =   7,
928    gdb_r8      =   8,
929    gdb_r9      =   9,
930    gdb_r10     =  10,
931    gdb_r11     =  11,
932    gdb_r12     =  12,
933    gdb_r13     =  13,
934    gdb_r14     =  14,
935    gdb_r15     =  15,
936    gdb_rip     =  16,
937    gdb_rflags  =  17,
938    gdb_cs      =  18,
939    gdb_ss      =  19,
940    gdb_ds      =  20,
941    gdb_es      =  21,
942    gdb_fs      =  22,
943    gdb_gs      =  23,
944    gdb_stmm0   =  24,
945    gdb_stmm1   =  25,
946    gdb_stmm2   =  26,
947    gdb_stmm3   =  27,
948    gdb_stmm4   =  28,
949    gdb_stmm5   =  29,
950    gdb_stmm6   =  30,
951    gdb_stmm7   =  31,
952    gdb_fctrl   =  32,  gdb_fcw = gdb_fctrl,
953    gdb_fstat   =  33,  gdb_fsw = gdb_fstat,
954    gdb_ftag    =  34,  gdb_ftw = gdb_ftag,
955    gdb_fiseg   =  35,  gdb_fpu_cs  = gdb_fiseg,
956    gdb_fioff   =  36,  gdb_ip  = gdb_fioff,
957    gdb_foseg   =  37,  gdb_fpu_ds  = gdb_foseg,
958    gdb_fooff   =  38,  gdb_dp  = gdb_fooff,
959    gdb_fop     =  39,
960    gdb_xmm0    =  40,
961    gdb_xmm1    =  41,
962    gdb_xmm2    =  42,
963    gdb_xmm3    =  43,
964    gdb_xmm4    =  44,
965    gdb_xmm5    =  45,
966    gdb_xmm6    =  46,
967    gdb_xmm7    =  47,
968    gdb_xmm8    =  48,
969    gdb_xmm9    =  49,
970    gdb_xmm10   =  50,
971    gdb_xmm11   =  51,
972    gdb_xmm12   =  52,
973    gdb_xmm13   =  53,
974    gdb_xmm14   =  54,
975    gdb_xmm15   =  55,
976    gdb_mxcsr   =  56,
977    gdb_ymm0    =  gdb_xmm0,
978    gdb_ymm1    =  gdb_xmm1,
979    gdb_ymm2    =  gdb_xmm2,
980    gdb_ymm3    =  gdb_xmm3,
981    gdb_ymm4    =  gdb_xmm4,
982    gdb_ymm5    =  gdb_xmm5,
983    gdb_ymm6    =  gdb_xmm6,
984    gdb_ymm7    =  gdb_xmm7,
985    gdb_ymm8    =  gdb_xmm8,
986    gdb_ymm9    =  gdb_xmm9,
987    gdb_ymm10   =  gdb_xmm10,
988    gdb_ymm11   =  gdb_xmm11,
989    gdb_ymm12   =  gdb_xmm12,
990    gdb_ymm13   =  gdb_xmm13,
991    gdb_ymm14   =  gdb_xmm14,
992    gdb_ymm15   =  gdb_xmm15
993};
994
995#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg))
996#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx))
997#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx))
998#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg)     + offsetof (DNBArchImplX86_64::Context, exc))
999
1000// This does not accurately identify the location of ymm0...7 in
1001// Context.fpu.avx.  That is because there is a bunch of padding
1002// in Context.fpu.avx that we don't need.  Offset macros lay out
1003// the register state that Debugserver transmits to the debugger
1004// -- not to interpret the thread_get_state info.
1005#define AVX_OFFSET_YMM(n)   (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
1006
1007#define GPR_SIZE(reg)       (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1008#define FPU_SIZE_UINT(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1009#define FPU_SIZE_MMST(reg)  (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1010#define FPU_SIZE_XMM(reg)   (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1011#define FPU_SIZE_YMM(reg)   (32)
1012#define EXC_SIZE(reg)       (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1013
1014// These macros will auto define the register name, alt name, register size,
1015// register offset, encoding, format and native register. This ensures that
1016// the register state structures are defined correctly and have the correct
1017// sizes and offsets.
1018#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg }
1019#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg }
1020#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg }
1021#define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, gdb_##reg }
1022
1023// General purpose registers for 64 bit
1024const DNBRegisterInfo
1025DNBArchImplX86_64::g_gpr_registers[] =
1026{
1027    DEFINE_GPR      (rax),
1028    DEFINE_GPR      (rbx),
1029    DEFINE_GPR_ALT  (rcx , "arg4", GENERIC_REGNUM_ARG4),
1030    DEFINE_GPR_ALT  (rdx , "arg3", GENERIC_REGNUM_ARG3),
1031    DEFINE_GPR_ALT  (rdi , "arg1", GENERIC_REGNUM_ARG1),
1032    DEFINE_GPR_ALT  (rsi , "arg2", GENERIC_REGNUM_ARG2),
1033    DEFINE_GPR_ALT  (rbp , "fp"  , GENERIC_REGNUM_FP),
1034    DEFINE_GPR_ALT  (rsp , "sp"  , GENERIC_REGNUM_SP),
1035    DEFINE_GPR_ALT  (r8  , "arg5", GENERIC_REGNUM_ARG5),
1036    DEFINE_GPR_ALT  (r9  , "arg6", GENERIC_REGNUM_ARG6),
1037    DEFINE_GPR      (r10),
1038    DEFINE_GPR      (r11),
1039    DEFINE_GPR      (r12),
1040    DEFINE_GPR      (r13),
1041    DEFINE_GPR      (r14),
1042    DEFINE_GPR      (r15),
1043    DEFINE_GPR_ALT  (rip , "pc", GENERIC_REGNUM_PC),
1044    DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS),
1045    DEFINE_GPR_ALT2 (cs,        NULL),
1046    DEFINE_GPR_ALT2 (fs,        NULL),
1047    DEFINE_GPR_ALT2 (gs,        NULL),
1048};
1049
1050// Floating point registers 64 bit
1051const DNBRegisterInfo
1052DNBArchImplX86_64::g_fpu_registers_no_avx[] =
1053{
1054    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , FPU_OFFSET(fcw)       , -1, -1, -1, -1 },
1055    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , FPU_OFFSET(fsw)       , -1, -1, -1, -1 },
1056    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , FPU_OFFSET(ftw)       , -1, -1, -1, -1 },
1057    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , FPU_OFFSET(fop)       , -1, -1, -1, -1 },
1058    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , FPU_OFFSET(ip)        , -1, -1, -1, -1 },
1059    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , FPU_OFFSET(cs)        , -1, -1, -1, -1 },
1060    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , FPU_OFFSET(dp)        , -1, -1, -1, -1 },
1061    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , FPU_OFFSET(ds)        , -1, -1, -1, -1 },
1062    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , FPU_OFFSET(mxcsr)     , -1, -1, -1, -1 },
1063    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1, -1, -1, -1 },
1064
1065    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 },
1066    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 },
1067    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 },
1068    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 },
1069    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 },
1070    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 },
1071    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 },
1072    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 },
1073
1074    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 },
1075    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 },
1076    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 },
1077    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 },
1078    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 },
1079    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 },
1080    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 },
1081    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 },
1082    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8  },
1083    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9  },
1084    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 },
1085    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 },
1086    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 },
1087    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 },
1088    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 },
1089    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 },
1090};
1091
1092const DNBRegisterInfo
1093DNBArchImplX86_64::g_fpu_registers_avx[] =
1094{
1095    { e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , AVX_OFFSET(fcw)       , -1, -1, -1, -1 },
1096    { e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , AVX_OFFSET(fsw)       , -1, -1, -1, -1 },
1097    { e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , AVX_OFFSET(ftw)       , -1, -1, -1, -1 },
1098    { e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , AVX_OFFSET(fop)       , -1, -1, -1, -1 },
1099    { e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , AVX_OFFSET(ip)        , -1, -1, -1, -1 },
1100    { e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , AVX_OFFSET(cs)        , -1, -1, -1, -1 },
1101    { e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , AVX_OFFSET(dp)        , -1, -1, -1, -1 },
1102    { e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , AVX_OFFSET(ds)        , -1, -1, -1, -1 },
1103    { e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , AVX_OFFSET(mxcsr)     , -1, -1, -1, -1 },
1104    { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1, -1, -1, -1 },
1105
1106    { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1, gdb_stmm0 },
1107    { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1, gdb_stmm1 },
1108    { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1, gdb_stmm2 },
1109    { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1, gdb_stmm3 },
1110    { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1, gdb_stmm4 },
1111    { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1, gdb_stmm5 },
1112    { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1, gdb_stmm6 },
1113    { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1, gdb_stmm7 },
1114
1115    { e_regSetFPU, fpu_xmm0 , "xmm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0)   , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1, gdb_xmm0 },
1116    { e_regSetFPU, fpu_xmm1 , "xmm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1)   , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1, gdb_xmm1 },
1117    { e_regSetFPU, fpu_xmm2 , "xmm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2)   , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1, gdb_xmm2 },
1118    { e_regSetFPU, fpu_xmm3 , "xmm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3)   , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1, gdb_xmm3 },
1119    { e_regSetFPU, fpu_xmm4 , "xmm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4)   , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1, gdb_xmm4 },
1120    { e_regSetFPU, fpu_xmm5 , "xmm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5)   , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1, gdb_xmm5 },
1121    { e_regSetFPU, fpu_xmm6 , "xmm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6)   , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1, gdb_xmm6 },
1122    { e_regSetFPU, fpu_xmm7 , "xmm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7)   , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1, gdb_xmm7 },
1123    { e_regSetFPU, fpu_xmm8 , "xmm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8)   , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1, gdb_xmm8  },
1124    { e_regSetFPU, fpu_xmm9 , "xmm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9)   , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1, gdb_xmm9  },
1125    { e_regSetFPU, fpu_xmm10, "xmm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10)  , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1, gdb_xmm10 },
1126    { e_regSetFPU, fpu_xmm11, "xmm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11)  , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1, gdb_xmm11 },
1127    { e_regSetFPU, fpu_xmm12, "xmm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12)  , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1, gdb_xmm12 },
1128    { e_regSetFPU, fpu_xmm13, "xmm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13)  , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1, gdb_xmm13 },
1129    { e_regSetFPU, fpu_xmm14, "xmm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14)  , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1, gdb_xmm14 },
1130    { e_regSetFPU, fpu_xmm15, "xmm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15)  , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1, gdb_xmm15 },
1131
1132    { e_regSetFPU, fpu_ymm0 , "ymm0"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0)   , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1, gdb_ymm0 },
1133    { e_regSetFPU, fpu_ymm1 , "ymm1"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1)   , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1, gdb_ymm1 },
1134    { e_regSetFPU, fpu_ymm2 , "ymm2"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2)   , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1, gdb_ymm2 },
1135    { e_regSetFPU, fpu_ymm3 , "ymm3"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3)   , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1, gdb_ymm3 },
1136    { e_regSetFPU, fpu_ymm4 , "ymm4"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4)   , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1, gdb_ymm4 },
1137    { e_regSetFPU, fpu_ymm5 , "ymm5"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5)   , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1, gdb_ymm5 },
1138    { e_regSetFPU, fpu_ymm6 , "ymm6"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6)   , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1, gdb_ymm6 },
1139    { e_regSetFPU, fpu_ymm7 , "ymm7"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7)   , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1, gdb_ymm7 },
1140    { e_regSetFPU, fpu_ymm8 , "ymm8"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8)   , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1, gdb_ymm8  },
1141    { e_regSetFPU, fpu_ymm9 , "ymm9"    , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9)   , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1, gdb_ymm9  },
1142    { e_regSetFPU, fpu_ymm10, "ymm10"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10)  , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1, gdb_ymm10 },
1143    { e_regSetFPU, fpu_ymm11, "ymm11"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11)  , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1, gdb_ymm11 },
1144    { e_regSetFPU, fpu_ymm12, "ymm12"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12)  , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1, gdb_ymm12 },
1145    { e_regSetFPU, fpu_ymm13, "ymm13"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13)  , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1, gdb_ymm13 },
1146    { e_regSetFPU, fpu_ymm14, "ymm14"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14)  , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1, gdb_ymm14 },
1147    { e_regSetFPU, fpu_ymm15, "ymm15"   , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15)  , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1, gdb_ymm15 }
1148};
1149
1150// Exception registers
1151
1152const DNBRegisterInfo
1153DNBArchImplX86_64::g_exc_registers[] =
1154{
1155    { e_regSetEXC, exc_trapno,      "trapno"    , NULL, Uint, Hex, EXC_SIZE (trapno)    , EXC_OFFSET (trapno)       , -1, -1, -1, -1 },
1156    { e_regSetEXC, exc_err,         "err"       , NULL, Uint, Hex, EXC_SIZE (err)       , EXC_OFFSET (err)          , -1, -1, -1, -1 },
1157    { e_regSetEXC, exc_faultvaddr,  "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr)   , -1, -1, -1, -1 }
1158};
1159
1160// Number of registers in each register set
1161const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
1162const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
1163const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
1164const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
1165const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
1166const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
1167
1168//----------------------------------------------------------------------
1169// Register set definitions. The first definitions at register set index
1170// of zero is for all registers, followed by other registers sets. The
1171// register information for the all register set need not be filled in.
1172//----------------------------------------------------------------------
1173const DNBRegisterSetInfo
1174DNBArchImplX86_64::g_reg_sets_no_avx[] =
1175{
1176    { "x86_64 Registers",           NULL,               k_num_all_registers_no_avx },
1177    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1178    { "Floating Point Registers",   g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
1179    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1180};
1181
1182const DNBRegisterSetInfo
1183DNBArchImplX86_64::g_reg_sets_avx[] =
1184{
1185    { "x86_64 Registers",           NULL,               k_num_all_registers_avx },
1186    { "General Purpose Registers",  g_gpr_registers,    k_num_gpr_registers },
1187    { "Floating Point Registers",   g_fpu_registers_avx, k_num_fpu_registers_avx },
1188    { "Exception State Registers",  g_exc_registers,    k_num_exc_registers }
1189};
1190
1191// Total number of register sets for this architecture
1192const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo);
1193
1194
1195DNBArchProtocol *
1196DNBArchImplX86_64::Create (MachThread *thread)
1197{
1198    return new DNBArchImplX86_64 (thread);
1199}
1200
1201const uint8_t * const
1202DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size)
1203{
1204    static const uint8_t g_breakpoint_opcode[] = { 0xCC };
1205    if (byte_size == 1)
1206        return g_breakpoint_opcode;
1207    return NULL;
1208}
1209
1210const DNBRegisterSetInfo *
1211DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
1212{
1213    *num_reg_sets = k_num_register_sets;
1214
1215    if (CPUHasAVX() || FORCE_AVX_REGS)
1216        return g_reg_sets_avx;
1217    else
1218        return g_reg_sets_no_avx;
1219}
1220
1221void
1222DNBArchImplX86_64::Initialize()
1223{
1224    DNBArchPluginInfo arch_plugin_info =
1225    {
1226        CPU_TYPE_X86_64,
1227        DNBArchImplX86_64::Create,
1228        DNBArchImplX86_64::GetRegisterSetInfo,
1229        DNBArchImplX86_64::SoftwareBreakpointOpcode
1230    };
1231
1232    // Register this arch plug-in with the main protocol class
1233    DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
1234}
1235
1236bool
1237DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value)
1238{
1239    if (set == REGISTER_SET_GENERIC)
1240    {
1241        switch (reg)
1242        {
1243            case GENERIC_REGNUM_PC:     // Program Counter
1244                set = e_regSetGPR;
1245                reg = gpr_rip;
1246                break;
1247
1248            case GENERIC_REGNUM_SP:     // Stack Pointer
1249                set = e_regSetGPR;
1250                reg = gpr_rsp;
1251                break;
1252
1253            case GENERIC_REGNUM_FP:     // Frame Pointer
1254                set = e_regSetGPR;
1255                reg = gpr_rbp;
1256                break;
1257
1258            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1259                set = e_regSetGPR;
1260                reg = gpr_rflags;
1261                break;
1262
1263            case GENERIC_REGNUM_RA:     // Return Address
1264            default:
1265                return false;
1266        }
1267    }
1268
1269    if (GetRegisterState(set, false) != KERN_SUCCESS)
1270        return false;
1271
1272    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1273    if (regInfo)
1274    {
1275        value->info = *regInfo;
1276        switch (set)
1277        {
1278            case e_regSetGPR:
1279                if (reg < k_num_gpr_registers)
1280                {
1281                    value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg];
1282                    return true;
1283                }
1284                break;
1285
1286            case e_regSetFPU:
1287                if (CPUHasAVX() || FORCE_AVX_REGS)
1288                {
1289                    switch (reg)
1290                    {
1291                    case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw));    return true;
1292                    case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw));    return true;
1293                    case fpu_ftw:       value->value.uint8  = m_state.context.fpu.avx.__fpu_ftw;                      return true;
1294                    case fpu_fop:       value->value.uint16 = m_state.context.fpu.avx.__fpu_fop;                      return true;
1295                    case fpu_ip:        value->value.uint32 = m_state.context.fpu.avx.__fpu_ip;                       return true;
1296                    case fpu_cs:        value->value.uint16 = m_state.context.fpu.avx.__fpu_cs;                       return true;
1297                    case fpu_dp:        value->value.uint32 = m_state.context.fpu.avx.__fpu_dp;                       return true;
1298                    case fpu_ds:        value->value.uint16 = m_state.context.fpu.avx.__fpu_ds;                       return true;
1299                    case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr;                    return true;
1300                    case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask;                return true;
1301
1302                    case fpu_stmm0:
1303                    case fpu_stmm1:
1304                    case fpu_stmm2:
1305                    case fpu_stmm3:
1306                    case fpu_stmm4:
1307                    case fpu_stmm5:
1308                    case fpu_stmm6:
1309                    case fpu_stmm7:
1310                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1311                        return true;
1312
1313                    case fpu_xmm0:
1314                    case fpu_xmm1:
1315                    case fpu_xmm2:
1316                    case fpu_xmm3:
1317                    case fpu_xmm4:
1318                    case fpu_xmm5:
1319                    case fpu_xmm6:
1320                    case fpu_xmm7:
1321                    case fpu_xmm8:
1322                    case fpu_xmm9:
1323                    case fpu_xmm10:
1324                    case fpu_xmm11:
1325                    case fpu_xmm12:
1326                    case fpu_xmm13:
1327                    case fpu_xmm14:
1328                    case fpu_xmm15:
1329                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1330                        return true;
1331
1332                    case fpu_ymm0:
1333                    case fpu_ymm1:
1334                    case fpu_ymm2:
1335                    case fpu_ymm3:
1336                    case fpu_ymm4:
1337                    case fpu_ymm5:
1338                    case fpu_ymm6:
1339                    case fpu_ymm7:
1340                    case fpu_ymm8:
1341                    case fpu_ymm9:
1342                    case fpu_ymm10:
1343                    case fpu_ymm11:
1344                    case fpu_ymm12:
1345                    case fpu_ymm13:
1346                    case fpu_ymm14:
1347                    case fpu_ymm15:
1348                        memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
1349                        memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
1350                        return true;
1351                    }
1352                }
1353                else
1354                {
1355                    switch (reg)
1356                    {
1357                        case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw));    return true;
1358                        case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw));    return true;
1359                        case fpu_ftw:       value->value.uint8  = m_state.context.fpu.no_avx.__fpu_ftw;                      return true;
1360                        case fpu_fop:       value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop;                      return true;
1361                        case fpu_ip:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip;                       return true;
1362                        case fpu_cs:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs;                       return true;
1363                        case fpu_dp:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp;                       return true;
1364                        case fpu_ds:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds;                       return true;
1365                        case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr;                    return true;
1366                        case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask;                return true;
1367
1368                        case fpu_stmm0:
1369                        case fpu_stmm1:
1370                        case fpu_stmm2:
1371                        case fpu_stmm3:
1372                        case fpu_stmm4:
1373                        case fpu_stmm5:
1374                        case fpu_stmm6:
1375                        case fpu_stmm7:
1376                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1377                            return true;
1378
1379                        case fpu_xmm0:
1380                        case fpu_xmm1:
1381                        case fpu_xmm2:
1382                        case fpu_xmm3:
1383                        case fpu_xmm4:
1384                        case fpu_xmm5:
1385                        case fpu_xmm6:
1386                        case fpu_xmm7:
1387                        case fpu_xmm8:
1388                        case fpu_xmm9:
1389                        case fpu_xmm10:
1390                        case fpu_xmm11:
1391                        case fpu_xmm12:
1392                        case fpu_xmm13:
1393                        case fpu_xmm14:
1394                        case fpu_xmm15:
1395                            memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1396                            return true;
1397                    }
1398                }
1399                break;
1400
1401            case e_regSetEXC:
1402                switch (reg)
1403                {
1404                case exc_trapno:    value->value.uint32 = m_state.context.exc.__trapno; return true;
1405                case exc_err:       value->value.uint32 = m_state.context.exc.__err; return true;
1406                case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true;
1407                }
1408                break;
1409        }
1410    }
1411    return false;
1412}
1413
1414
1415bool
1416DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value)
1417{
1418    if (set == REGISTER_SET_GENERIC)
1419    {
1420        switch (reg)
1421        {
1422            case GENERIC_REGNUM_PC:     // Program Counter
1423                set = e_regSetGPR;
1424                reg = gpr_rip;
1425                break;
1426
1427            case GENERIC_REGNUM_SP:     // Stack Pointer
1428                set = e_regSetGPR;
1429                reg = gpr_rsp;
1430                break;
1431
1432            case GENERIC_REGNUM_FP:     // Frame Pointer
1433                set = e_regSetGPR;
1434                reg = gpr_rbp;
1435                break;
1436
1437            case GENERIC_REGNUM_FLAGS:  // Processor flags register
1438                set = e_regSetGPR;
1439                reg = gpr_rflags;
1440                break;
1441
1442            case GENERIC_REGNUM_RA:     // Return Address
1443            default:
1444                return false;
1445        }
1446    }
1447
1448    if (GetRegisterState(set, false) != KERN_SUCCESS)
1449        return false;
1450
1451    bool success = false;
1452    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1453    if (regInfo)
1454    {
1455        switch (set)
1456        {
1457            case e_regSetGPR:
1458                if (reg < k_num_gpr_registers)
1459                {
1460                    ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64;
1461                    success = true;
1462                }
1463                break;
1464
1465            case e_regSetFPU:
1466                if (CPUHasAVX() || FORCE_AVX_REGS)
1467                {
1468                    switch (reg)
1469                    {
1470                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1471                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1472                    case fpu_ftw:       m_state.context.fpu.avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1473                    case fpu_fop:       m_state.context.fpu.avx.__fpu_fop = value->value.uint16;                      success = true; break;
1474                    case fpu_ip:        m_state.context.fpu.avx.__fpu_ip = value->value.uint32;                       success = true; break;
1475                    case fpu_cs:        m_state.context.fpu.avx.__fpu_cs = value->value.uint16;                       success = true; break;
1476                    case fpu_dp:        m_state.context.fpu.avx.__fpu_dp = value->value.uint32;                       success = true; break;
1477                    case fpu_ds:        m_state.context.fpu.avx.__fpu_ds = value->value.uint16;                       success = true; break;
1478                    case fpu_mxcsr:     m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1479                    case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1480
1481                    case fpu_stmm0:
1482                    case fpu_stmm1:
1483                    case fpu_stmm2:
1484                    case fpu_stmm3:
1485                    case fpu_stmm4:
1486                    case fpu_stmm5:
1487                    case fpu_stmm6:
1488                    case fpu_stmm7:
1489                        memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1490                        success = true;
1491                        break;
1492
1493                    case fpu_xmm0:
1494                    case fpu_xmm1:
1495                    case fpu_xmm2:
1496                    case fpu_xmm3:
1497                    case fpu_xmm4:
1498                    case fpu_xmm5:
1499                    case fpu_xmm6:
1500                    case fpu_xmm7:
1501                    case fpu_xmm8:
1502                    case fpu_xmm9:
1503                    case fpu_xmm10:
1504                    case fpu_xmm11:
1505                    case fpu_xmm12:
1506                    case fpu_xmm13:
1507                    case fpu_xmm14:
1508                    case fpu_xmm15:
1509                        memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1510                        success = true;
1511                        break;
1512
1513                    case fpu_ymm0:
1514                    case fpu_ymm1:
1515                    case fpu_ymm2:
1516                    case fpu_ymm3:
1517                    case fpu_ymm4:
1518                    case fpu_ymm5:
1519                    case fpu_ymm6:
1520                    case fpu_ymm7:
1521                    case fpu_ymm8:
1522                    case fpu_ymm9:
1523                    case fpu_ymm10:
1524                    case fpu_ymm11:
1525                    case fpu_ymm12:
1526                    case fpu_ymm13:
1527                    case fpu_ymm14:
1528                    case fpu_ymm15:
1529                        memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16);
1530                        memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16);
1531                        return true;
1532                    }
1533                }
1534                else
1535                {
1536                    switch (reg)
1537                    {
1538                    case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1539                    case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1540                    case fpu_ftw:       m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1541                    case fpu_fop:       m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16;                      success = true; break;
1542                    case fpu_ip:        m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32;                       success = true; break;
1543                    case fpu_cs:        m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16;                       success = true; break;
1544                    case fpu_dp:        m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32;                       success = true; break;
1545                    case fpu_ds:        m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16;                       success = true; break;
1546                    case fpu_mxcsr:     m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1547                    case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1548
1549                    case fpu_stmm0:
1550                    case fpu_stmm1:
1551                    case fpu_stmm2:
1552                    case fpu_stmm3:
1553                    case fpu_stmm4:
1554                    case fpu_stmm5:
1555                    case fpu_stmm6:
1556                    case fpu_stmm7:
1557                        memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1558                        success = true;
1559                        break;
1560
1561                    case fpu_xmm0:
1562                    case fpu_xmm1:
1563                    case fpu_xmm2:
1564                    case fpu_xmm3:
1565                    case fpu_xmm4:
1566                    case fpu_xmm5:
1567                    case fpu_xmm6:
1568                    case fpu_xmm7:
1569                    case fpu_xmm8:
1570                    case fpu_xmm9:
1571                    case fpu_xmm10:
1572                    case fpu_xmm11:
1573                    case fpu_xmm12:
1574                    case fpu_xmm13:
1575                    case fpu_xmm14:
1576                    case fpu_xmm15:
1577                        memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1578                        success = true;
1579                        break;
1580                    }
1581                }
1582                break;
1583
1584            case e_regSetEXC:
1585                switch (reg)
1586            {
1587                case exc_trapno:    m_state.context.exc.__trapno = value->value.uint32;     success = true; break;
1588                case exc_err:       m_state.context.exc.__err = value->value.uint32;        success = true; break;
1589                case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break;
1590            }
1591                break;
1592        }
1593    }
1594
1595    if (success)
1596        return SetRegisterState(set) == KERN_SUCCESS;
1597    return false;
1598}
1599
1600
1601nub_size_t
1602DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len)
1603{
1604    nub_size_t size = sizeof (m_state.context);
1605
1606    if (buf && buf_len)
1607    {
1608        if (size > buf_len)
1609            size = buf_len;
1610
1611        bool force = false;
1612        if (GetGPRState(force) | GetFPUState(force) | GetEXCState(force))
1613            return 0;
1614        ::memcpy (buf, &m_state.context, size);
1615    }
1616    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1617    // Return the size of the register context even if NULL was passed in
1618    return size;
1619}
1620
1621nub_size_t
1622DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len)
1623{
1624    nub_size_t size = sizeof (m_state.context);
1625    if (buf == NULL || buf_len == 0)
1626        size = 0;
1627
1628    if (size)
1629    {
1630        if (size > buf_len)
1631            size = buf_len;
1632
1633        ::memcpy (&m_state.context, buf, size);
1634        SetGPRState();
1635        SetFPUState();
1636        SetEXCState();
1637    }
1638    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1639    return size;
1640}
1641
1642
1643kern_return_t
1644DNBArchImplX86_64::GetRegisterState(int set, bool force)
1645{
1646    switch (set)
1647    {
1648        case e_regSetALL:    return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
1649        case e_regSetGPR:    return GetGPRState(force);
1650        case e_regSetFPU:    return GetFPUState(force);
1651        case e_regSetEXC:    return GetEXCState(force);
1652        default: break;
1653    }
1654    return KERN_INVALID_ARGUMENT;
1655}
1656
1657kern_return_t
1658DNBArchImplX86_64::SetRegisterState(int set)
1659{
1660    // Make sure we have a valid context to set.
1661    if (RegisterSetStateIsValid(set))
1662    {
1663        switch (set)
1664        {
1665            case e_regSetALL:    return SetGPRState() | SetFPUState() | SetEXCState();
1666            case e_regSetGPR:    return SetGPRState();
1667            case e_regSetFPU:    return SetFPUState();
1668            case e_regSetEXC:    return SetEXCState();
1669            default: break;
1670        }
1671    }
1672    return KERN_INVALID_ARGUMENT;
1673}
1674
1675bool
1676DNBArchImplX86_64::RegisterSetStateIsValid (int set) const
1677{
1678    return m_state.RegsAreValid(set);
1679}
1680
1681
1682
1683#endif    // #if defined (__i386__) || defined (__x86_64__)
1684