DNBArchImplI386.cpp revision 3dd956eb7ee3e93df6b68a9c7c528e92359b6026
1//===-- DNBArchImplI386.cpp -------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  Created by Greg Clayton on 6/25/07.
11//
12//===----------------------------------------------------------------------===//
13
14#if defined (__i386__) || defined (__x86_64__)
15
16#include <sys/cdefs.h>
17
18#include "MacOSX/i386/DNBArchImplI386.h"
19#include "DNBLog.h"
20#include "MachThread.h"
21#include "MachProcess.h"
22
23#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
24enum debugState {
25    debugStateUnknown,
26    debugStateOff,
27    debugStateOn
28};
29
30static debugState sFPUDebugState = debugStateUnknown;
31static debugState sAVXForceState = debugStateUnknown;
32
33static bool DebugFPURegs ()
34{
35    if (sFPUDebugState == debugStateUnknown)
36    {
37        if (getenv("DNB_DEBUG_FPU_REGS"))
38            sFPUDebugState = debugStateOn;
39        else
40            sFPUDebugState = debugStateOff;
41    }
42
43    return (sFPUDebugState == debugStateOn);
44}
45
46static bool ForceAVXRegs ()
47{
48    if (sFPUDebugState == debugStateUnknown)
49    {
50        if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
51            sAVXForceState = debugStateOn;
52        else
53            sAVXForceState = debugStateOff;
54    }
55
56    return (sAVXForceState == debugStateOn);
57}
58
59#define DEBUG_FPU_REGS (DebugFPURegs())
60#define FORCE_AVX_REGS (ForceAVXRegs())
61#else
62#define DEBUG_FPU_REGS (0)
63#define FORCE_AVX_REGS (0)
64#endif
65
66enum
67{
68    gpr_eax         = 0,
69    gpr_ebx         = 1,
70    gpr_ecx         = 2,
71    gpr_edx         = 3,
72    gpr_edi         = 4,
73    gpr_esi         = 5,
74    gpr_ebp         = 6,
75    gpr_esp         = 7,
76    gpr_ss          = 8,
77    gpr_eflags      = 9,
78    gpr_eip         = 10,
79    gpr_cs          = 11,
80    gpr_ds          = 12,
81    gpr_es          = 13,
82    gpr_fs          = 14,
83    gpr_gs          = 15,
84    k_num_gpr_regs
85};
86
87enum {
88    fpu_fcw,
89    fpu_fsw,
90    fpu_ftw,
91    fpu_fop,
92    fpu_ip,
93    fpu_cs,
94    fpu_dp,
95    fpu_ds,
96    fpu_mxcsr,
97    fpu_mxcsrmask,
98    fpu_stmm0,
99    fpu_stmm1,
100    fpu_stmm2,
101    fpu_stmm3,
102    fpu_stmm4,
103    fpu_stmm5,
104    fpu_stmm6,
105    fpu_stmm7,
106    fpu_xmm0,
107    fpu_xmm1,
108    fpu_xmm2,
109    fpu_xmm3,
110    fpu_xmm4,
111    fpu_xmm5,
112    fpu_xmm6,
113    fpu_xmm7,
114    fpu_ymm0,
115    fpu_ymm1,
116    fpu_ymm2,
117    fpu_ymm3,
118    fpu_ymm4,
119    fpu_ymm5,
120    fpu_ymm6,
121    fpu_ymm7,
122    k_num_fpu_regs,
123
124    // Aliases
125    fpu_fctrl = fpu_fcw,
126    fpu_fstat = fpu_fsw,
127    fpu_ftag  = fpu_ftw,
128    fpu_fiseg = fpu_cs,
129    fpu_fioff = fpu_ip,
130    fpu_foseg = fpu_ds,
131    fpu_fooff = fpu_dp
132};
133
134enum {
135    exc_trapno,
136    exc_err,
137    exc_faultvaddr,
138    k_num_exc_regs,
139};
140
141
142enum
143{
144    gcc_eax = 0,
145    gcc_ecx,
146    gcc_edx,
147    gcc_ebx,
148    gcc_ebp,
149    gcc_esp,
150    gcc_esi,
151    gcc_edi,
152    gcc_eip,
153    gcc_eflags
154};
155
156enum
157{
158    dwarf_eax = 0,
159    dwarf_ecx,
160    dwarf_edx,
161    dwarf_ebx,
162    dwarf_esp,
163    dwarf_ebp,
164    dwarf_esi,
165    dwarf_edi,
166    dwarf_eip,
167    dwarf_eflags,
168    dwarf_stmm0 = 11,
169    dwarf_stmm1,
170    dwarf_stmm2,
171    dwarf_stmm3,
172    dwarf_stmm4,
173    dwarf_stmm5,
174    dwarf_stmm6,
175    dwarf_stmm7,
176    dwarf_xmm0 = 21,
177    dwarf_xmm1,
178    dwarf_xmm2,
179    dwarf_xmm3,
180    dwarf_xmm4,
181    dwarf_xmm5,
182    dwarf_xmm6,
183    dwarf_xmm7,
184    dwarf_ymm0 = dwarf_xmm0,
185    dwarf_ymm1 = dwarf_xmm1,
186    dwarf_ymm2 = dwarf_xmm2,
187    dwarf_ymm3 = dwarf_xmm3,
188    dwarf_ymm4 = dwarf_xmm4,
189    dwarf_ymm5 = dwarf_xmm5,
190    dwarf_ymm6 = dwarf_xmm6,
191    dwarf_ymm7 = dwarf_xmm7,
192};
193
194enum
195{
196    gdb_eax        =  0,
197    gdb_ecx        =  1,
198    gdb_edx        =  2,
199    gdb_ebx        =  3,
200    gdb_esp        =  4,
201    gdb_ebp        =  5,
202    gdb_esi        =  6,
203    gdb_edi        =  7,
204    gdb_eip        =  8,
205    gdb_eflags     =  9,
206    gdb_cs         = 10,
207    gdb_ss         = 11,
208    gdb_ds         = 12,
209    gdb_es         = 13,
210    gdb_fs         = 14,
211    gdb_gs         = 15,
212    gdb_stmm0      = 16,
213    gdb_stmm1      = 17,
214    gdb_stmm2      = 18,
215    gdb_stmm3      = 19,
216    gdb_stmm4      = 20,
217    gdb_stmm5      = 21,
218    gdb_stmm6      = 22,
219    gdb_stmm7      = 23,
220    gdb_fctrl      = 24,    gdb_fcw     = gdb_fctrl,
221    gdb_fstat      = 25,    gdb_fsw     = gdb_fstat,
222    gdb_ftag       = 26,    gdb_ftw     = gdb_ftag,
223    gdb_fiseg      = 27,    gdb_fpu_cs  = gdb_fiseg,
224    gdb_fioff      = 28,    gdb_ip      = gdb_fioff,
225    gdb_foseg      = 29,    gdb_fpu_ds  = gdb_foseg,
226    gdb_fooff      = 30,    gdb_dp      = gdb_fooff,
227    gdb_fop        = 31,
228    gdb_xmm0       = 32,
229    gdb_xmm1       = 33,
230    gdb_xmm2       = 34,
231    gdb_xmm3       = 35,
232    gdb_xmm4       = 36,
233    gdb_xmm5       = 37,
234    gdb_xmm6       = 38,
235    gdb_xmm7       = 39,
236    gdb_mxcsr      = 40,
237    gdb_mm0        = 41,
238    gdb_mm1        = 42,
239    gdb_mm2        = 43,
240    gdb_mm3        = 44,
241    gdb_mm4        = 45,
242    gdb_mm5        = 46,
243    gdb_mm6        = 47,
244    gdb_mm7        = 48,
245    gdb_ymm0       = gdb_xmm0,
246    gdb_ymm1       = gdb_xmm1,
247    gdb_ymm2       = gdb_xmm2,
248    gdb_ymm3       = gdb_xmm3,
249    gdb_ymm4       = gdb_xmm4,
250    gdb_ymm5       = gdb_xmm5,
251    gdb_ymm6       = gdb_xmm6,
252    gdb_ymm7       = gdb_xmm7
253};
254
255// AVX support isn't working at all from user space, so disable it for now.
256enum DNBArchImplI386::AVXPresence DNBArchImplI386::s_has_avx = DNBArchImplI386::kAVXNotPresent;
257
258uint64_t
259DNBArchImplI386::GetPC(uint64_t failValue)
260{
261    // Get program counter
262    if (GetGPRState(false) == KERN_SUCCESS)
263        return m_state.context.gpr.__eip;
264    return failValue;
265}
266
267kern_return_t
268DNBArchImplI386::SetPC(uint64_t value)
269{
270    // Get program counter
271    kern_return_t err = GetGPRState(false);
272    if (err == KERN_SUCCESS)
273    {
274        m_state.context.gpr.__eip = value;
275        err = SetGPRState();
276    }
277    return err == KERN_SUCCESS;
278}
279
280uint64_t
281DNBArchImplI386::GetSP(uint64_t failValue)
282{
283    // Get stack pointer
284    if (GetGPRState(false) == KERN_SUCCESS)
285        return m_state.context.gpr.__esp;
286    return failValue;
287}
288
289// Uncomment the value below to verify the values in the debugger.
290//#define DEBUG_GPR_VALUES 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
291//#define SET_GPR(reg) m_state.context.gpr.__##reg = gpr_##reg
292
293kern_return_t
294DNBArchImplI386::GetGPRState(bool force)
295{
296    if (force || m_state.GetError(e_regSetGPR, Read))
297    {
298#if DEBUG_GPR_VALUES
299        SET_GPR(eax);
300        SET_GPR(ebx);
301        SET_GPR(ecx);
302        SET_GPR(edx);
303        SET_GPR(edi);
304        SET_GPR(esi);
305        SET_GPR(ebp);
306        SET_GPR(esp);
307        SET_GPR(ss);
308        SET_GPR(eflags);
309        SET_GPR(eip);
310        SET_GPR(cs);
311        SET_GPR(ds);
312        SET_GPR(es);
313        SET_GPR(fs);
314        SET_GPR(gs);
315        m_state.SetError(e_regSetGPR, Read, 0);
316#else
317        mach_msg_type_number_t count = e_regSetWordSizeGPR;
318        m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->ThreadID(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
319#endif
320    }
321    return m_state.GetError(e_regSetGPR, Read);
322}
323
324// Uncomment the value below to verify the values in the debugger.
325//#define DEBUG_FPU_VALUES 1    // DO NOT CHECK IN WITH THIS DEFINE ENABLED
326
327kern_return_t
328DNBArchImplI386::GetFPUState(bool force)
329{
330    if (force || m_state.GetError(e_regSetFPU, Read))
331    {
332        if (DEBUG_FPU_REGS)
333        {
334            if (CPUHasAVX() || FORCE_AVX_REGS)
335            {
336                m_state.context.fpu.avx.__fpu_reserved[0] = -1;
337                m_state.context.fpu.avx.__fpu_reserved[1] = -1;
338                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
339                *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
340                m_state.context.fpu.avx.__fpu_ftw = 1;
341                m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
342                m_state.context.fpu.avx.__fpu_fop = 2;
343                m_state.context.fpu.avx.__fpu_ip = 3;
344                m_state.context.fpu.avx.__fpu_cs = 4;
345                m_state.context.fpu.avx.__fpu_rsrv2 = 5;
346                m_state.context.fpu.avx.__fpu_dp = 6;
347                m_state.context.fpu.avx.__fpu_ds = 7;
348                m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
349                m_state.context.fpu.avx.__fpu_mxcsr = 8;
350                m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
351                int i;
352                for (i=0; i<16; ++i)
353                {
354                    if (i<10)
355                    {
356                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
357                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
358                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
359                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
360                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
361                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
362                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
363                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
364                    }
365                    else
366                    {
367                        m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
368                        m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
369                        m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
370                        m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
371                        m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
372                        m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
373                        m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
374                        m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
375                    }
376
377                    m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
378                    m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
379                    m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
380                    m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
381                    m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
382                    m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
383                    m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
384                    m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
385                }
386                for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
387                    m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
388                m_state.context.fpu.avx.__fpu_reserved1 = -1;
389                for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
390                    m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
391
392                for (i = 0; i < 16; ++i)
393                {
394                    m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
395                    m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
396                    m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
397                    m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
398                    m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
399                    m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
400                    m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
401                    m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
402                }
403            }
404            else
405            {
406                m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
407                m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
408                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
409                *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
410                m_state.context.fpu.no_avx.__fpu_ftw = 1;
411                m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
412                m_state.context.fpu.no_avx.__fpu_fop = 2;
413                m_state.context.fpu.no_avx.__fpu_ip = 3;
414                m_state.context.fpu.no_avx.__fpu_cs = 4;
415                m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
416                m_state.context.fpu.no_avx.__fpu_dp = 6;
417                m_state.context.fpu.no_avx.__fpu_ds = 7;
418                m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
419                m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
420                m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
421                int i;
422                for (i=0; i<16; ++i)
423                {
424                    if (i<10)
425                    {
426                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
427                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
428                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
429                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
430                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
431                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
432                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
433                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
434                    }
435                    else
436                    {
437                        m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
438                        m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
439                        m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
440                        m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
441                        m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
442                        m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
443                        m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
444                        m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
445                    }
446
447                    m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
448                    m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
449                    m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
450                    m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
451                    m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
452                    m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
453                    m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
454                    m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
455                }
456                for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
457                    m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
458                m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
459            }
460            m_state.SetError(e_regSetFPU, Read, 0);
461        }
462        else
463        {
464            if (CPUHasAVX() || FORCE_AVX_REGS)
465            {
466                mach_msg_type_number_t count = e_regSetWordSizeAVX;
467                m_state.SetError (e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
468                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in)) => 0x%8.8x",
469                                  m_thread->ThreadID(), __i386_AVX_STATE, count, e_regSetWordSizeAVX,
470                                  m_state.GetError(e_regSetFPU, Read));
471            }
472            else
473            {
474                mach_msg_type_number_t count = e_regSetWordSizeFPU;
475                m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->ThreadID(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
476                DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x",
477                                  m_thread->ThreadID(), __i386_FLOAT_STATE, count, e_regSetWordSizeFPU,
478                                  m_state.GetError(e_regSetFPU, Read));
479            }
480        }
481    }
482    return m_state.GetError(e_regSetFPU, Read);
483}
484
485kern_return_t
486DNBArchImplI386::GetEXCState(bool force)
487{
488    if (force || m_state.GetError(e_regSetEXC, Read))
489    {
490        mach_msg_type_number_t count = e_regSetWordSizeEXC;
491        m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->ThreadID(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
492    }
493    return m_state.GetError(e_regSetEXC, Read);
494}
495
496kern_return_t
497DNBArchImplI386::SetGPRState()
498{
499    m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->ThreadID(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
500    return m_state.GetError(e_regSetGPR, Write);
501}
502
503kern_return_t
504DNBArchImplI386::SetFPUState()
505{
506    if (DEBUG_FPU_REGS)
507    {
508        m_state.SetError(e_regSetFPU, Write, 0);
509        return m_state.GetError(e_regSetFPU, Write);
510    }
511    else
512    {
513        if (CPUHasAVX() || FORCE_AVX_REGS)
514            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
515        else
516            m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->ThreadID(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU));
517        return m_state.GetError(e_regSetFPU, Write);
518    }
519}
520
521kern_return_t
522DNBArchImplI386::SetEXCState()
523{
524    m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->ThreadID(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
525    return m_state.GetError(e_regSetEXC, Write);
526}
527
528kern_return_t
529DNBArchImplI386::GetDBGState(bool force)
530{
531    if (force || m_state.GetError(e_regSetDBG, Read))
532    {
533        mach_msg_type_number_t count = e_regSetWordSizeDBG;
534        m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->ThreadID(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
535    }
536    return m_state.GetError(e_regSetDBG, Read);
537}
538
539kern_return_t
540DNBArchImplI386::SetDBGState()
541{
542    m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->ThreadID(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
543    return m_state.GetError(e_regSetDBG, Write);
544}
545
546void
547DNBArchImplI386::ThreadWillResume()
548{
549    // Do we need to step this thread? If so, let the mach thread tell us so.
550    if (m_thread->IsStepping())
551    {
552        // This is the primary thread, let the arch do anything it needs
553        EnableHardwareSingleStep(true);
554    }
555
556    // Reset the debug status register, if necessary, before we resume.
557    kern_return_t kret = GetDBGState(false);
558    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
559    if (kret != KERN_SUCCESS)
560        return;
561
562    DBG &debug_state = m_state.context.dbg;
563    bool need_reset = false;
564    uint32_t i, num = NumSupportedHardwareWatchpoints();
565    for (i = 0; i < num; ++i)
566        if (IsWatchpointHit(debug_state, i))
567            need_reset = true;
568
569    if (need_reset)
570    {
571        ClearWatchpointHits(debug_state);
572        kret = SetDBGState();
573        DNBLogThreadedIf(LOG_WATCHPOINTS,"DNBArchImplI386::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
574    }
575}
576
577bool
578DNBArchImplI386::ThreadDidStop()
579{
580    bool success = true;
581
582    m_state.InvalidateAllRegisterStates();
583
584    // Are we stepping a single instruction?
585    if (GetGPRState(true) == KERN_SUCCESS)
586    {
587        // We are single stepping, was this the primary thread?
588        if (m_thread->IsStepping())
589        {
590            // This was the primary thread, we need to clear the trace
591            // bit if so.
592            success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
593        }
594        else
595        {
596            // The MachThread will automatically restore the suspend count
597            // in ThreadDidStop(), so we don't need to do anything here if
598            // we weren't the primary thread the last time
599        }
600    }
601    return success;
602}
603
604bool
605DNBArchImplI386::NotifyException(MachException::Data& exc)
606{
607    switch (exc.exc_type)
608    {
609    case EXC_BAD_ACCESS:
610        break;
611    case EXC_BAD_INSTRUCTION:
612        break;
613    case EXC_ARITHMETIC:
614        break;
615    case EXC_EMULATION:
616        break;
617    case EXC_SOFTWARE:
618        break;
619    case EXC_BREAKPOINT:
620        if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
621        {
622            // exc_code = EXC_I386_BPT
623            //
624            nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
625            if (pc != INVALID_NUB_ADDRESS && pc > 0)
626            {
627                pc -= 1;
628                // Check for a breakpoint at one byte prior to the current PC value
629                // since the PC will be just past the trap.
630
631                nub_break_t breakID = m_thread->Process()->Breakpoints().FindIDByAddress(pc);
632                if (NUB_BREAK_ID_IS_VALID(breakID))
633                {
634                    // Backup the PC for i386 since the trap was taken and the PC
635                    // is at the address following the single byte trap instruction.
636                    if (m_state.context.gpr.__eip > 0)
637                    {
638                        m_state.context.gpr.__eip = pc;
639                        // Write the new PC back out
640                        SetGPRState ();
641                    }
642                }
643                return true;
644            }
645        }
646        else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1)
647        {
648            // exc_code = EXC_I386_SGL
649            //
650            // Check whether this corresponds to a watchpoint hit event.
651            // If yes, set the exc_sub_code to the data break address.
652            nub_addr_t addr = 0;
653            uint32_t hw_index = GetHardwareWatchpointHit(addr);
654            if (hw_index != INVALID_NUB_HW_INDEX)
655            {
656                exc.exc_data[1] = addr;
657                // Piggyback the hw_index in the exc.data.
658                exc.exc_data.push_back(hw_index);
659            }
660
661            return true;
662        }
663        break;
664    case EXC_SYSCALL:
665        break;
666    case EXC_MACH_SYSCALL:
667        break;
668    case EXC_RPC_ALERT:
669        break;
670    }
671    return false;
672}
673
674uint32_t
675DNBArchImplI386::NumSupportedHardwareWatchpoints()
676{
677    // Available debug address registers: dr0, dr1, dr2, dr3.
678    return 4;
679}
680
681static uint32_t
682size_and_rw_bits(nub_size_t size, bool read, bool write)
683{
684    uint32_t rw;
685    if (read) {
686        rw = 0x3; // READ or READ/WRITE
687    } else if (write) {
688        rw = 0x1; // WRITE
689    } else {
690        assert(0 && "read and write cannot both be false");
691    }
692
693    switch (size) {
694    case 1:
695        return rw;
696    case 2:
697        return (0x1 << 2) | rw;
698    case 4:
699        return (0x3 << 2) | rw;
700    case 8:
701        return (0x2 << 2) | rw;
702    default:
703        assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
704    }
705}
706void
707DNBArchImplI386::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
708{
709    // Set both dr7 (debug control register) and dri (debug address register).
710
711    // dr7{7-0} encodes the local/gloabl enable bits:
712    //  global enable --. .-- local enable
713    //                  | |
714    //                  v v
715    //      dr0 -> bits{1-0}
716    //      dr1 -> bits{3-2}
717    //      dr2 -> bits{5-4}
718    //      dr3 -> bits{7-6}
719    //
720    // dr7{31-16} encodes the rw/len bits:
721    //  b_x+3, b_x+2, b_x+1, b_x
722    //      where bits{x+1, x} => rw
723    //            0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
724    //      and bits{x+3, x+2} => len
725    //            0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
726    //
727    //      dr0 -> bits{19-16}
728    //      dr1 -> bits{23-20}
729    //      dr2 -> bits{27-24}
730    //      dr3 -> bits{31-28}
731    debug_state.__dr7 |= (1 << (2*hw_index) |
732                          size_and_rw_bits(size, read, write) << (16+4*hw_index));
733    uint32_t addr_32 = addr & 0xffffffff;
734    switch (hw_index) {
735    case 0:
736        debug_state.__dr0 = addr_32; break;
737    case 1:
738        debug_state.__dr1 = addr_32; break;
739    case 2:
740        debug_state.__dr2 = addr_32; break;
741    case 3:
742        debug_state.__dr3 = addr_32; break;
743    default:
744        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
745    }
746    return;
747}
748
749void
750DNBArchImplI386::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
751{
752    debug_state.__dr7 &= ~(3 << (2*hw_index));
753    switch (hw_index) {
754    case 0:
755        debug_state.__dr0 = 0; break;
756    case 1:
757        debug_state.__dr1 = 0; break;
758    case 2:
759        debug_state.__dr2 = 0; break;
760    case 3:
761        debug_state.__dr3 = 0; break;
762    default:
763        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
764    }
765    return;
766}
767
768bool
769DNBArchImplI386::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
770{
771    // Check dr7 (debug control register) for local/global enable bits:
772    //  global enable --. .-- local enable
773    //                  | |
774    //                  v v
775    //      dr0 -> bits{1-0}
776    //      dr1 -> bits{3-2}
777    //      dr2 -> bits{5-4}
778    //      dr3 -> bits{7-6}
779    return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
780}
781
782// Resets local copy of debug status register to wait for the next debug excpetion.
783void
784DNBArchImplI386::ClearWatchpointHits(DBG &debug_state)
785{
786    // See also IsWatchpointHit().
787    debug_state.__dr6 = 0;
788    return;
789}
790
791bool
792DNBArchImplI386::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
793{
794    // Check dr6 (debug status register) whether a watchpoint hits:
795    //          is watchpoint hit?
796    //                  |
797    //                  v
798    //      dr0 -> bits{0}
799    //      dr1 -> bits{1}
800    //      dr2 -> bits{2}
801    //      dr3 -> bits{3}
802    return (debug_state.__dr6 & (1 << hw_index));
803}
804
805nub_addr_t
806DNBArchImplI386::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
807{
808    switch (hw_index) {
809    case 0:
810        return debug_state.__dr0;
811    case 1:
812        return debug_state.__dr1;
813    case 2:
814        return debug_state.__dr2;
815    case 3:
816        return debug_state.__dr3;
817    default:
818        assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
819    }
820}
821
822uint32_t
823DNBArchImplI386::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write)
824{
825    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(addr = %8.8p, size = %u, read = %u, write = %u)", addr, size, read, write);
826
827    const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
828
829    // Can only watch 1, 2, 4, or 8 bytes.
830    if (!(size == 1 || size == 2 || size == 4 || size == 8))
831        return INVALID_NUB_HW_INDEX;
832
833    // We must watch for either read or write
834    if (read == false && write == false)
835        return INVALID_NUB_HW_INDEX;
836
837    // Read the debug state
838    kern_return_t kret = GetDBGState(false);
839
840    if (kret == KERN_SUCCESS)
841    {
842        // Check to make sure we have the needed hardware support
843        uint32_t i = 0;
844
845        DBG &debug_state = m_state.context.dbg;
846        for (i = 0; i < num_hw_watchpoints; ++i)
847        {
848            if (IsWatchpointVacant(debug_state, i))
849                break;
850        }
851
852        // See if we found an available hw breakpoint slot above
853        if (i < num_hw_watchpoints)
854        {
855            // Modify our local copy of the debug state, first.
856            SetWatchpoint(debug_state, i, addr, size, read, write);
857            // Now set the watch point in the inferior.
858            kret = SetDBGState();
859            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
860
861            if (kret == KERN_SUCCESS)
862                return i;
863        }
864        else
865        {
866            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
867        }
868    }
869    return INVALID_NUB_HW_INDEX;
870}
871
872bool
873DNBArchImplI386::DisableHardwareWatchpoint (uint32_t hw_index)
874{
875    kern_return_t kret = GetDBGState(false);
876
877    const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
878    if (kret == KERN_SUCCESS)
879    {
880        DBG &debug_state = m_state.context.dbg;
881        if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
882        {
883            // Modify our local copy of the debug state, first.
884            ClearWatchpoint(debug_state, hw_index);
885            // Now disable the watch point in the inferior.
886            kret = SetDBGState();
887            DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::DisableHardwareWatchpoint( %u )",
888                             hw_index);
889
890            if (kret == KERN_SUCCESS)
891                return true;
892        }
893    }
894    return false;
895}
896
897DNBArchImplI386::DBG DNBArchImplI386::Global_Debug_State = {0,0,0,0,0,0,0,0};
898bool DNBArchImplI386::Valid_Global_Debug_State = false;
899
900// Use this callback from MachThread, which in turn was called from MachThreadList, to update
901// the global view of the hardware watchpoint state, so that when new thread comes along, they
902// get to inherit the existing hardware watchpoint state.
903void
904DNBArchImplI386::HardwareWatchpointStateChanged ()
905{
906    Global_Debug_State = m_state.context.dbg;
907    Valid_Global_Debug_State = true;
908}
909
910// Iterate through the debug status register; return the index of the first hit.
911uint32_t
912DNBArchImplI386::GetHardwareWatchpointHit(nub_addr_t &addr)
913{
914    // Read the debug state
915    kern_return_t kret = GetDBGState(false);
916    DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
917    if (kret == KERN_SUCCESS)
918    {
919        DBG &debug_state = m_state.context.dbg;
920        uint32_t i, num = NumSupportedHardwareWatchpoints();
921        for (i = 0; i < num; ++i)
922        {
923            if (IsWatchpointHit(debug_state, i))
924            {
925                addr = GetWatchAddress(debug_state, i);
926                DNBLogThreadedIf(LOG_WATCHPOINTS,
927                                 "DNBArchImplI386::GetHardwareWatchpointHit() found => %u (addr = %8.8p).",
928                                 i, addr);
929                return i;
930            }
931        }
932    }
933    return INVALID_NUB_HW_INDEX;
934}
935
936// Set the single step bit in the processor status register.
937kern_return_t
938DNBArchImplI386::EnableHardwareSingleStep (bool enable)
939{
940    if (GetGPRState(false) == KERN_SUCCESS)
941    {
942        const uint32_t trace_bit = 0x100u;
943        if (enable)
944            m_state.context.gpr.__eflags |= trace_bit;
945        else
946            m_state.context.gpr.__eflags &= ~trace_bit;
947        return SetGPRState();
948    }
949    return m_state.GetError(e_regSetGPR, Read);
950}
951
952
953//----------------------------------------------------------------------
954// Register information defintions
955//----------------------------------------------------------------------
956
957
958#define GPR_OFFSET(reg) (offsetof (DNBArchImplI386::GPR, __##reg))
959#define FPU_OFFSET(reg) (offsetof (DNBArchImplI386::FPU, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.no_avx))
960#define AVX_OFFSET(reg) (offsetof (DNBArchImplI386::AVX, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.avx))
961#define EXC_OFFSET(reg) (offsetof (DNBArchImplI386::EXC, __##reg)     + offsetof (DNBArchImplI386::Context, exc))
962
963#define GPR_SIZE(reg)       (sizeof(((DNBArchImplI386::GPR *)NULL)->__##reg))
964#define FPU_SIZE_UINT(reg)  (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg))
965#define FPU_SIZE_MMST(reg)  (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__mmst_reg))
966#define FPU_SIZE_XMM(reg)   (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__xmm_reg))
967#define FPU_SIZE_YMM(reg)   (32)
968#define EXC_SIZE(reg)       (sizeof(((DNBArchImplI386::EXC *)NULL)->__##reg))
969
970// This does not accurately identify the location of ymm0...7 in
971// Context.fpu.avx.  That is because there is a bunch of padding
972// in Context.fpu.avx that we don't need.  Offset macros lay out
973// the register state that Debugserver transmits to the debugger
974// -- not to interpret the thread_get_state info.
975#define AVX_OFFSET_YMM(n)   (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
976
977// These macros will auto define the register name, alt name, register size,
978// register offset, encoding, format and native register. This ensures that
979// the register state structures are defined correctly and have the correct
980// sizes and offsets.
981
982// General purpose registers for 64 bit
983const DNBRegisterInfo
984DNBArchImplI386::g_gpr_registers[] =
985{
986{ e_regSetGPR, gpr_eax,     "eax"   , NULL      , Uint, Hex, GPR_SIZE(eax),     GPR_OFFSET(eax)     , gcc_eax   , dwarf_eax     , -1                    , gdb_eax   },
987{ e_regSetGPR, gpr_ebx,     "ebx"   , NULL      , Uint, Hex, GPR_SIZE(ebx),     GPR_OFFSET(ebx)     , gcc_ebx   , dwarf_ebx     , -1                    , gdb_ebx   },
988{ e_regSetGPR, gpr_ecx,     "ecx"   , NULL      , Uint, Hex, GPR_SIZE(ecx),     GPR_OFFSET(ecx)     , gcc_ecx   , dwarf_ecx     , -1                    , gdb_ecx   },
989{ e_regSetGPR, gpr_edx,     "edx"   , NULL      , Uint, Hex, GPR_SIZE(edx),     GPR_OFFSET(edx)     , gcc_edx   , dwarf_edx     , -1                    , gdb_edx   },
990{ e_regSetGPR, gpr_edi,     "edi"   , NULL      , Uint, Hex, GPR_SIZE(edi),     GPR_OFFSET(edi)     , gcc_edi   , dwarf_edi     , -1                    , gdb_edi   },
991{ e_regSetGPR, gpr_esi,     "esi"   , NULL      , Uint, Hex, GPR_SIZE(esi),     GPR_OFFSET(esi)     , gcc_esi   , dwarf_esi     , -1                    , gdb_esi   },
992{ e_regSetGPR, gpr_ebp,     "ebp"   , "fp"      , Uint, Hex, GPR_SIZE(ebp),     GPR_OFFSET(ebp)     , gcc_ebp   , dwarf_ebp     , GENERIC_REGNUM_FP     , gdb_ebp   },
993{ e_regSetGPR, gpr_esp,     "esp"   , "sp"      , Uint, Hex, GPR_SIZE(esp),     GPR_OFFSET(esp)     , gcc_esp   , dwarf_esp     , GENERIC_REGNUM_SP     , gdb_esp   },
994{ e_regSetGPR, gpr_ss,      "ss"    , NULL      , Uint, Hex, GPR_SIZE(ss),      GPR_OFFSET(ss)      , -1        , -1            , -1                    , gdb_ss    },
995{ e_regSetGPR, gpr_eflags,  "eflags", "flags"   , Uint, Hex, GPR_SIZE(eflags),  GPR_OFFSET(eflags)  , gcc_eflags, dwarf_eflags  , GENERIC_REGNUM_FLAGS  , gdb_eflags},
996{ e_regSetGPR, gpr_eip,     "eip"   , "pc"      , Uint, Hex, GPR_SIZE(eip),     GPR_OFFSET(eip)     , gcc_eip   , dwarf_eip     , GENERIC_REGNUM_PC     , gdb_eip   },
997{ e_regSetGPR, gpr_cs,      "cs"    , NULL      , Uint, Hex, GPR_SIZE(cs),      GPR_OFFSET(cs)      , -1        , -1            , -1                    , gdb_cs    },
998{ e_regSetGPR, gpr_ds,      "ds"    , NULL      , Uint, Hex, GPR_SIZE(ds),      GPR_OFFSET(ds)      , -1        , -1            , -1                    , gdb_ds    },
999{ e_regSetGPR, gpr_es,      "es"    , NULL      , Uint, Hex, GPR_SIZE(es),      GPR_OFFSET(es)      , -1        , -1            , -1                    , gdb_es    },
1000{ e_regSetGPR, gpr_fs,      "fs"    , NULL      , Uint, Hex, GPR_SIZE(fs),      GPR_OFFSET(fs)      , -1        , -1            , -1                    , gdb_fs    },
1001{ e_regSetGPR, gpr_gs,      "gs"    , NULL      , Uint, Hex, GPR_SIZE(gs),      GPR_OFFSET(gs)      , -1        , -1            , -1                    , gdb_gs    }
1002};
1003
1004
1005const DNBRegisterInfo
1006DNBArchImplI386::g_fpu_registers_no_avx[] =
1007{
1008{ e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , FPU_OFFSET(fcw)       , -1, -1, -1, -1 },
1009{ e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , FPU_OFFSET(fsw)       , -1, -1, -1, -1 },
1010{ e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , FPU_OFFSET(ftw)       , -1, -1, -1, -1 },
1011{ e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , FPU_OFFSET(fop)       , -1, -1, -1, -1 },
1012{ e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , FPU_OFFSET(ip)        , -1, -1, -1, -1 },
1013{ e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , FPU_OFFSET(cs)        , -1, -1, -1, -1 },
1014{ e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , FPU_OFFSET(dp)        , -1, -1, -1, -1 },
1015{ e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , FPU_OFFSET(ds)        , -1, -1, -1, -1 },
1016{ e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , FPU_OFFSET(mxcsr)     , -1, -1, -1, -1 },
1017{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1, -1, -1, -1 },
1018
1019{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), -1, dwarf_stmm0, -1, gdb_stmm0 },
1020{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), -1, dwarf_stmm1, -1, gdb_stmm1 },
1021{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), -1, dwarf_stmm2, -1, gdb_stmm2 },
1022{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), -1, dwarf_stmm3, -1, gdb_stmm3 },
1023{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), -1, dwarf_stmm4, -1, gdb_stmm4 },
1024{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), -1, dwarf_stmm5, -1, gdb_stmm5 },
1025{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), -1, dwarf_stmm6, -1, gdb_stmm6 },
1026{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), -1, dwarf_stmm7, -1, gdb_stmm7 },
1027
1028{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), -1, dwarf_xmm0, -1, gdb_xmm0 },
1029{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), -1, dwarf_xmm1, -1, gdb_xmm1 },
1030{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), -1, dwarf_xmm2, -1, gdb_xmm2 },
1031{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), -1, dwarf_xmm3, -1, gdb_xmm3 },
1032{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), -1, dwarf_xmm4, -1, gdb_xmm4 },
1033{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), -1, dwarf_xmm5, -1, gdb_xmm5 },
1034{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), -1, dwarf_xmm6, -1, gdb_xmm6 },
1035{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), -1, dwarf_xmm7, -1, gdb_xmm7 }
1036};
1037
1038const DNBRegisterInfo
1039DNBArchImplI386::g_fpu_registers_avx[] =
1040{
1041{ e_regSetFPU, fpu_fcw      , "fctrl"       , NULL, Uint, Hex, FPU_SIZE_UINT(fcw)       , AVX_OFFSET(fcw)       , -1, -1, -1, -1 },
1042{ e_regSetFPU, fpu_fsw      , "fstat"       , NULL, Uint, Hex, FPU_SIZE_UINT(fsw)       , AVX_OFFSET(fsw)       , -1, -1, -1, -1 },
1043{ e_regSetFPU, fpu_ftw      , "ftag"        , NULL, Uint, Hex, FPU_SIZE_UINT(ftw)       , AVX_OFFSET(ftw)       , -1, -1, -1, -1 },
1044{ e_regSetFPU, fpu_fop      , "fop"         , NULL, Uint, Hex, FPU_SIZE_UINT(fop)       , AVX_OFFSET(fop)       , -1, -1, -1, -1 },
1045{ e_regSetFPU, fpu_ip       , "fioff"       , NULL, Uint, Hex, FPU_SIZE_UINT(ip)        , AVX_OFFSET(ip)        , -1, -1, -1, -1 },
1046{ e_regSetFPU, fpu_cs       , "fiseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(cs)        , AVX_OFFSET(cs)        , -1, -1, -1, -1 },
1047{ e_regSetFPU, fpu_dp       , "fooff"       , NULL, Uint, Hex, FPU_SIZE_UINT(dp)        , AVX_OFFSET(dp)        , -1, -1, -1, -1 },
1048{ e_regSetFPU, fpu_ds       , "foseg"       , NULL, Uint, Hex, FPU_SIZE_UINT(ds)        , AVX_OFFSET(ds)        , -1, -1, -1, -1 },
1049{ e_regSetFPU, fpu_mxcsr    , "mxcsr"       , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr)     , AVX_OFFSET(mxcsr)     , -1, -1, -1, -1 },
1050{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask"   , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1, -1, -1, -1 },
1051
1052{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), -1, dwarf_stmm0, -1, gdb_stmm0 },
1053{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), -1, dwarf_stmm1, -1, gdb_stmm1 },
1054{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), -1, dwarf_stmm2, -1, gdb_stmm2 },
1055{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), -1, dwarf_stmm3, -1, gdb_stmm3 },
1056{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), -1, dwarf_stmm4, -1, gdb_stmm4 },
1057{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), -1, dwarf_stmm5, -1, gdb_stmm5 },
1058{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), -1, dwarf_stmm6, -1, gdb_stmm6 },
1059{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), -1, dwarf_stmm7, -1, gdb_stmm7 },
1060
1061{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), AVX_OFFSET(xmm0), -1, dwarf_xmm0, -1, gdb_xmm0 },
1062{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), AVX_OFFSET(xmm1), -1, dwarf_xmm1, -1, gdb_xmm1 },
1063{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), AVX_OFFSET(xmm2), -1, dwarf_xmm2, -1, gdb_xmm2 },
1064{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), AVX_OFFSET(xmm3), -1, dwarf_xmm3, -1, gdb_xmm3 },
1065{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), AVX_OFFSET(xmm4), -1, dwarf_xmm4, -1, gdb_xmm4 },
1066{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), AVX_OFFSET(xmm5), -1, dwarf_xmm5, -1, gdb_xmm5 },
1067{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), AVX_OFFSET(xmm6), -1, dwarf_xmm6, -1, gdb_xmm6 },
1068{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), AVX_OFFSET(xmm7), -1, dwarf_xmm7, -1, gdb_xmm7 },
1069
1070{ e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), -1, dwarf_ymm0, -1, gdb_ymm0 },
1071{ e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), -1, dwarf_ymm1, -1, gdb_ymm1 },
1072{ e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), -1, dwarf_ymm2, -1, gdb_ymm2 },
1073{ e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), -1, dwarf_ymm3, -1, gdb_ymm3 },
1074{ e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), -1, dwarf_ymm4, -1, gdb_ymm4 },
1075{ e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), -1, dwarf_ymm5, -1, gdb_ymm5 },
1076{ e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), -1, dwarf_ymm6, -1, gdb_ymm6 },
1077{ e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), -1, dwarf_ymm7, -1, gdb_ymm7 },
1078};
1079
1080const DNBRegisterInfo
1081DNBArchImplI386::g_exc_registers[] =
1082{
1083{ e_regSetEXC, exc_trapno,      "trapno"    , NULL, Uint, Hex, EXC_SIZE (trapno)    , EXC_OFFSET (trapno)       , -1, -1, -1, -1 },
1084{ e_regSetEXC, exc_err,         "err"       , NULL, Uint, Hex, EXC_SIZE (err)       , EXC_OFFSET (err)          , -1, -1, -1, -1 },
1085{ e_regSetEXC, exc_faultvaddr,  "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr)   , -1, -1, -1, -1 }
1086};
1087
1088// Number of registers in each register set
1089const size_t DNBArchImplI386::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
1090const size_t DNBArchImplI386::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
1091const size_t DNBArchImplI386::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
1092const size_t DNBArchImplI386::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
1093const size_t DNBArchImplI386::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
1094const size_t DNBArchImplI386::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
1095
1096//----------------------------------------------------------------------
1097// Register set definitions. The first definitions at register set index
1098// of zero is for all registers, followed by other registers sets. The
1099// register information for the all register set need not be filled in.
1100//----------------------------------------------------------------------
1101const DNBRegisterSetInfo
1102DNBArchImplI386::g_reg_sets_no_avx[] =
1103{
1104    { "i386 Registers",             NULL,                   k_num_all_registers_no_avx },
1105    { "General Purpose Registers",  g_gpr_registers,        k_num_gpr_registers        },
1106    { "Floating Point Registers",   g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
1107    { "Exception State Registers",  g_exc_registers,        k_num_exc_registers        }
1108};
1109
1110const DNBRegisterSetInfo
1111DNBArchImplI386::g_reg_sets_avx[] =
1112{
1113    { "i386 Registers",             NULL,                   k_num_all_registers_avx },
1114    { "General Purpose Registers",  g_gpr_registers,        k_num_gpr_registers     },
1115    { "Floating Point Registers",   g_fpu_registers_avx,    k_num_fpu_registers_avx },
1116    { "Exception State Registers",  g_exc_registers,        k_num_exc_registers     }
1117};
1118
1119// Total number of register sets for this architecture
1120const size_t DNBArchImplI386::k_num_register_sets = sizeof(g_reg_sets_no_avx)/sizeof(DNBRegisterSetInfo);
1121
1122DNBArchProtocol *
1123DNBArchImplI386::Create (MachThread *thread)
1124{
1125    DNBArchImplI386 *obj = new DNBArchImplI386 (thread);
1126
1127    // When new thread comes along, it tries to inherit from the global debug state, if it is valid.
1128    if (Valid_Global_Debug_State)
1129    {
1130        obj->m_state.context.dbg = Global_Debug_State;
1131        kern_return_t kret = obj->SetDBGState();
1132        DNBLogThreadedIf(LOG_WATCHPOINTS,
1133                         "DNBArchImplX86_64::Create() Inherit and SetDBGState() => 0x%8.8x.", kret);
1134    }
1135    return obj;
1136}
1137
1138const uint8_t * const
1139DNBArchImplI386::SoftwareBreakpointOpcode (nub_size_t byte_size)
1140{
1141    static const uint8_t g_breakpoint_opcode[] = { 0xCC };
1142    if (byte_size == 1)
1143        return g_breakpoint_opcode;
1144    return NULL;
1145}
1146
1147const DNBRegisterSetInfo *
1148DNBArchImplI386::GetRegisterSetInfo(nub_size_t *num_reg_sets)
1149{
1150    *num_reg_sets = k_num_register_sets;
1151    if (CPUHasAVX() || FORCE_AVX_REGS)
1152        return g_reg_sets_avx;
1153    else
1154        return g_reg_sets_no_avx;
1155}
1156
1157
1158void
1159DNBArchImplI386::Initialize()
1160{
1161    DNBArchPluginInfo arch_plugin_info =
1162    {
1163        CPU_TYPE_I386,
1164        DNBArchImplI386::Create,
1165        DNBArchImplI386::GetRegisterSetInfo,
1166        DNBArchImplI386::SoftwareBreakpointOpcode
1167    };
1168
1169    // Register this arch plug-in with the main protocol class
1170    DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
1171}
1172
1173bool
1174DNBArchImplI386::GetRegisterValue(int set, int reg, DNBRegisterValue *value)
1175{
1176    if (set == REGISTER_SET_GENERIC)
1177    {
1178        switch (reg)
1179        {
1180        case GENERIC_REGNUM_PC:     // Program Counter
1181            set = e_regSetGPR;
1182            reg = gpr_eip;
1183            break;
1184
1185        case GENERIC_REGNUM_SP:     // Stack Pointer
1186            set = e_regSetGPR;
1187            reg = gpr_esp;
1188            break;
1189
1190        case GENERIC_REGNUM_FP:     // Frame Pointer
1191            set = e_regSetGPR;
1192            reg = gpr_ebp;
1193            break;
1194
1195        case GENERIC_REGNUM_FLAGS:  // Processor flags register
1196            set = e_regSetGPR;
1197            reg = gpr_eflags;
1198            break;
1199
1200        case GENERIC_REGNUM_RA:     // Return Address
1201        default:
1202            return false;
1203        }
1204    }
1205
1206    if (GetRegisterState(set, false) != KERN_SUCCESS)
1207        return false;
1208
1209    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1210    if (regInfo)
1211    {
1212        value->info = *regInfo;
1213        switch (set)
1214        {
1215        case e_regSetGPR:
1216            if (reg < k_num_gpr_registers)
1217            {
1218                value->value.uint32 = ((uint32_t*)(&m_state.context.gpr))[reg];
1219                return true;
1220            }
1221            break;
1222
1223        case e_regSetFPU:
1224            if (CPUHasAVX() || FORCE_AVX_REGS)
1225            {
1226                switch (reg)
1227                {
1228                case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw));    return true;
1229                case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw));    return true;
1230                case fpu_ftw:       value->value.uint8  = m_state.context.fpu.avx.__fpu_ftw;                      return true;
1231                case fpu_fop:       value->value.uint16 = m_state.context.fpu.avx.__fpu_fop;                      return true;
1232                case fpu_ip:        value->value.uint32 = m_state.context.fpu.avx.__fpu_ip;                       return true;
1233                case fpu_cs:        value->value.uint16 = m_state.context.fpu.avx.__fpu_cs;                       return true;
1234                case fpu_dp:        value->value.uint32 = m_state.context.fpu.avx.__fpu_dp;                       return true;
1235                case fpu_ds:        value->value.uint16 = m_state.context.fpu.avx.__fpu_ds;                       return true;
1236                case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr;                    return true;
1237                case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask;                return true;
1238
1239                case fpu_stmm0:     memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, 10);    return true;
1240                case fpu_stmm1:     memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, 10);    return true;
1241                case fpu_stmm2:     memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, 10);    return true;
1242                case fpu_stmm3:     memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, 10);    return true;
1243                case fpu_stmm4:     memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, 10);    return true;
1244                case fpu_stmm5:     memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, 10);    return true;
1245                case fpu_stmm6:     memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, 10);    return true;
1246                case fpu_stmm7:     memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, 10);    return true;
1247
1248                case fpu_xmm0:      memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, 16);    return true;
1249                case fpu_xmm1:      memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, 16);    return true;
1250                case fpu_xmm2:      memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, 16);    return true;
1251                case fpu_xmm3:      memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, 16);    return true;
1252                case fpu_xmm4:      memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, 16);    return true;
1253                case fpu_xmm5:      memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, 16);    return true;
1254                case fpu_xmm6:      memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, 16);    return true;
1255                case fpu_xmm7:      memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, 16);    return true;
1256
1257#define MEMCPY_YMM(n)                                                                           \
1258    memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, 16);            \
1259    memcpy((&value->value.uint8) + 16, m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, 16);
1260                case fpu_ymm0:      MEMCPY_YMM(0);  return true;
1261                case fpu_ymm1:      MEMCPY_YMM(1);  return true;
1262                case fpu_ymm2:      MEMCPY_YMM(2);  return true;
1263                case fpu_ymm3:      MEMCPY_YMM(3);  return true;
1264                case fpu_ymm4:      MEMCPY_YMM(4);  return true;
1265                case fpu_ymm5:      MEMCPY_YMM(5);  return true;
1266                case fpu_ymm6:      MEMCPY_YMM(6);  return true;
1267                case fpu_ymm7:      MEMCPY_YMM(7);  return true;
1268#undef MEMCPY_YMM
1269                }
1270            }
1271            else
1272            {
1273                switch (reg)
1274                {
1275                case fpu_fcw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw));    return true;
1276                case fpu_fsw:       value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw));    return true;
1277                case fpu_ftw:       value->value.uint8  = m_state.context.fpu.no_avx.__fpu_ftw;                      return true;
1278                case fpu_fop:       value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop;                      return true;
1279                case fpu_ip:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip;                       return true;
1280                case fpu_cs:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs;                       return true;
1281                case fpu_dp:        value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp;                       return true;
1282                case fpu_ds:        value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds;                       return true;
1283                case fpu_mxcsr:     value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr;                    return true;
1284                case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask;                return true;
1285
1286                case fpu_stmm0:     memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 10);    return true;
1287                case fpu_stmm1:     memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 10);    return true;
1288                case fpu_stmm2:     memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 10);    return true;
1289                case fpu_stmm3:     memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 10);    return true;
1290                case fpu_stmm4:     memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 10);    return true;
1291                case fpu_stmm5:     memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 10);    return true;
1292                case fpu_stmm6:     memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 10);    return true;
1293                case fpu_stmm7:     memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 10);    return true;
1294
1295                case fpu_xmm0:      memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 16);    return true;
1296                case fpu_xmm1:      memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 16);    return true;
1297                case fpu_xmm2:      memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 16);    return true;
1298                case fpu_xmm3:      memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 16);    return true;
1299                case fpu_xmm4:      memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 16);    return true;
1300                case fpu_xmm5:      memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 16);    return true;
1301                case fpu_xmm6:      memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 16);    return true;
1302                case fpu_xmm7:      memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 16);    return true;
1303                }
1304            }
1305            break;
1306
1307        case e_regSetEXC:
1308            if (reg < k_num_exc_registers)
1309            {
1310                value->value.uint32 = (&m_state.context.exc.__trapno)[reg];
1311                return true;
1312            }
1313            break;
1314        }
1315    }
1316    return false;
1317}
1318
1319
1320bool
1321DNBArchImplI386::SetRegisterValue(int set, int reg, const DNBRegisterValue *value)
1322{
1323    if (set == REGISTER_SET_GENERIC)
1324    {
1325        switch (reg)
1326        {
1327        case GENERIC_REGNUM_PC:     // Program Counter
1328            set = e_regSetGPR;
1329            reg = gpr_eip;
1330            break;
1331
1332        case GENERIC_REGNUM_SP:     // Stack Pointer
1333            set = e_regSetGPR;
1334            reg = gpr_esp;
1335            break;
1336
1337        case GENERIC_REGNUM_FP:     // Frame Pointer
1338            set = e_regSetGPR;
1339            reg = gpr_ebp;
1340            break;
1341
1342        case GENERIC_REGNUM_FLAGS:  // Processor flags register
1343            set = e_regSetGPR;
1344            reg = gpr_eflags;
1345            break;
1346
1347        case GENERIC_REGNUM_RA:     // Return Address
1348        default:
1349            return false;
1350        }
1351    }
1352
1353    if (GetRegisterState(set, false) != KERN_SUCCESS)
1354        return false;
1355
1356    bool success = false;
1357    const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1358    if (regInfo)
1359    {
1360        switch (set)
1361        {
1362        case e_regSetGPR:
1363            if (reg < k_num_gpr_registers)
1364            {
1365                ((uint32_t*)(&m_state.context.gpr))[reg] = value->value.uint32;
1366                success = true;
1367            }
1368            break;
1369
1370        case e_regSetFPU:
1371            if (CPUHasAVX() || FORCE_AVX_REGS)
1372            {
1373                switch (reg)
1374                {
1375                case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1376                case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1377                case fpu_ftw:       m_state.context.fpu.avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1378                case fpu_fop:       m_state.context.fpu.avx.__fpu_fop = value->value.uint16;                      success = true; break;
1379                case fpu_ip:        m_state.context.fpu.avx.__fpu_ip = value->value.uint32;                       success = true; break;
1380                case fpu_cs:        m_state.context.fpu.avx.__fpu_cs = value->value.uint16;                       success = true; break;
1381                case fpu_dp:        m_state.context.fpu.avx.__fpu_dp = value->value.uint32;                       success = true; break;
1382                case fpu_ds:        m_state.context.fpu.avx.__fpu_ds = value->value.uint16;                       success = true; break;
1383                case fpu_mxcsr:     m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1384                case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1385
1386                case fpu_stmm0:     memcpy (m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1387                case fpu_stmm1:     memcpy (m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1388                case fpu_stmm2:     memcpy (m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1389                case fpu_stmm3:     memcpy (m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1390                case fpu_stmm4:     memcpy (m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1391                case fpu_stmm5:     memcpy (m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1392                case fpu_stmm6:     memcpy (m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1393                case fpu_stmm7:     memcpy (m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1394
1395                case fpu_xmm0:      memcpy(m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1396                case fpu_xmm1:      memcpy(m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1397                case fpu_xmm2:      memcpy(m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1398                case fpu_xmm3:      memcpy(m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1399                case fpu_xmm4:      memcpy(m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1400                case fpu_xmm5:      memcpy(m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1401                case fpu_xmm6:      memcpy(m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1402                case fpu_xmm7:      memcpy(m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1403
1404#define MEMCPY_YMM(n)                                                                           \
1405    memcpy(m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, &value->value.uint8, 16);            \
1406    memcpy(m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, (&value->value.uint8) + 16, 16);
1407                case fpu_ymm0:      MEMCPY_YMM(0);  return true;
1408                case fpu_ymm1:      MEMCPY_YMM(1);  return true;
1409                case fpu_ymm2:      MEMCPY_YMM(2);  return true;
1410                case fpu_ymm3:      MEMCPY_YMM(3);  return true;
1411                case fpu_ymm4:      MEMCPY_YMM(4);  return true;
1412                case fpu_ymm5:      MEMCPY_YMM(5);  return true;
1413                case fpu_ymm6:      MEMCPY_YMM(6);  return true;
1414                case fpu_ymm7:      MEMCPY_YMM(7);  return true;
1415#undef MEMCPY_YMM
1416                }
1417            }
1418            else
1419            {
1420                switch (reg)
1421                {
1422                case fpu_fcw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16;    success = true; break;
1423                case fpu_fsw:       *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16;    success = true; break;
1424                case fpu_ftw:       m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8;                       success = true; break;
1425                case fpu_fop:       m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16;                      success = true; break;
1426                case fpu_ip:        m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32;                       success = true; break;
1427                case fpu_cs:        m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16;                       success = true; break;
1428                case fpu_dp:        m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32;                       success = true; break;
1429                case fpu_ds:        m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16;                       success = true; break;
1430                case fpu_mxcsr:     m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32;                    success = true; break;
1431                case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32;                success = true; break;
1432
1433                case fpu_stmm0:     memcpy (m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1434                case fpu_stmm1:     memcpy (m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1435                case fpu_stmm2:     memcpy (m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1436                case fpu_stmm3:     memcpy (m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1437                case fpu_stmm4:     memcpy (m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1438                case fpu_stmm5:     memcpy (m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1439                case fpu_stmm6:     memcpy (m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1440                case fpu_stmm7:     memcpy (m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10);    success = true; break;
1441
1442                case fpu_xmm0:      memcpy(m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1443                case fpu_xmm1:      memcpy(m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1444                case fpu_xmm2:      memcpy(m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1445                case fpu_xmm3:      memcpy(m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1446                case fpu_xmm4:      memcpy(m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1447                case fpu_xmm5:      memcpy(m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1448                case fpu_xmm6:      memcpy(m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1449                case fpu_xmm7:      memcpy(m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16);    success = true; break;
1450                }
1451            }
1452            break;
1453
1454        case e_regSetEXC:
1455            if (reg < k_num_exc_registers)
1456            {
1457                (&m_state.context.exc.__trapno)[reg] = value->value.uint32;
1458                success = true;
1459            }
1460            break;
1461        }
1462    }
1463
1464    if (success)
1465        return SetRegisterState(set) == KERN_SUCCESS;
1466    return false;
1467}
1468
1469
1470nub_size_t
1471DNBArchImplI386::GetRegisterContext (void *buf, nub_size_t buf_len)
1472{
1473    nub_size_t size = sizeof (m_state.context);
1474
1475    if (buf && buf_len)
1476    {
1477        if (size > buf_len)
1478            size = buf_len;
1479
1480        bool force = false;
1481        kern_return_t kret;
1482        if ((kret = GetGPRState(force)) != KERN_SUCCESS)
1483        {
1484            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to read: %u ", buf, buf_len, kret);
1485            size = 0;
1486        }
1487        else if ((kret = GetFPUState(force)) != KERN_SUCCESS)
1488        {
1489            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %zu) error: %s regs failed to read: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1490            size = 0;
1491        }
1492        else if ((kret = GetEXCState(force)) != KERN_SUCCESS)
1493        {
1494            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %zu) error: EXC regs failed to read: %u", buf, buf_len, kret);
1495            size = 0;
1496        }
1497        else
1498        {
1499            // Success
1500            ::memcpy (buf, &m_state.context, size);
1501        }
1502    }
1503    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1504    // Return the size of the register context even if NULL was passed in
1505    return size;
1506}
1507
1508nub_size_t
1509DNBArchImplI386::SetRegisterContext (const void *buf, nub_size_t buf_len)
1510{
1511    nub_size_t size = sizeof (m_state.context);
1512    if (buf == NULL || buf_len == 0)
1513        size = 0;
1514
1515    if (size)
1516    {
1517        if (size > buf_len)
1518            size = buf_len;
1519
1520        ::memcpy (&m_state.context, buf, size);
1521        kern_return_t kret;
1522        if ((kret = SetGPRState()) != KERN_SUCCESS)
1523            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %zu) error: GPR regs failed to write: %u", buf, buf_len, kret);
1524        if ((kret = SetFPUState()) != KERN_SUCCESS)
1525            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %zu) error: %s regs failed to write: %u", buf, buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1526        if ((kret = SetEXCState()) != KERN_SUCCESS)
1527            DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %zu) error: EXP regs failed to write: %u", buf, buf_len, kret);
1528    }
1529    DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
1530    return size;
1531}
1532
1533
1534
1535kern_return_t
1536DNBArchImplI386::GetRegisterState(int set, bool force)
1537{
1538    switch (set)
1539    {
1540    case e_regSetALL:    return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
1541    case e_regSetGPR:    return GetGPRState(force);
1542    case e_regSetFPU:    return GetFPUState(force);
1543    case e_regSetEXC:    return GetEXCState(force);
1544    default: break;
1545    }
1546    return KERN_INVALID_ARGUMENT;
1547}
1548
1549kern_return_t
1550DNBArchImplI386::SetRegisterState(int set)
1551{
1552    // Make sure we have a valid context to set.
1553    if (RegisterSetStateIsValid(set))
1554    {
1555        switch (set)
1556        {
1557        case e_regSetALL:    return SetGPRState() | SetFPUState() | SetEXCState();
1558        case e_regSetGPR:    return SetGPRState();
1559        case e_regSetFPU:    return SetFPUState();
1560        case e_regSetEXC:    return SetEXCState();
1561        default: break;
1562        }
1563    }
1564    return KERN_INVALID_ARGUMENT;
1565}
1566
1567bool
1568DNBArchImplI386::RegisterSetStateIsValid (int set) const
1569{
1570    return m_state.RegsAreValid(set);
1571}
1572
1573#endif    // #if defined (__i386__)
1574