1//===------------------------ UnwindRegistersSave.S -----------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "assembly.h"
11
12    .text
13
14#if defined(__i386__)
15
16#
17# extern int unw_getcontext(unw_context_t* thread_state)
18#
19# On entry:
20#   +                       +
21#   +-----------------------+
22#   + thread_state pointer  +
23#   +-----------------------+
24#   + return address        +
25#   +-----------------------+   <-- SP
26#   +                       +
27#
28DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
29  push  %eax
30  movl  8(%esp), %eax
31  movl  %ebx,  4(%eax)
32  movl  %ecx,  8(%eax)
33  movl  %edx, 12(%eax)
34  movl  %edi, 16(%eax)
35  movl  %esi, 20(%eax)
36  movl  %ebp, 24(%eax)
37  movl  %esp, %edx
38  addl  $8, %edx
39  movl  %edx, 28(%eax)  # store what sp was at call site as esp
40  # skip ss
41  # skip eflags
42  movl  4(%esp), %edx
43  movl  %edx, 40(%eax)  # store return address as eip
44  # skip cs
45  # skip ds
46  # skip es
47  # skip fs
48  # skip gs
49  movl  (%esp), %edx
50  movl  %edx, (%eax)  # store original eax
51  popl  %eax
52  xorl  %eax, %eax    # return UNW_ESUCCESS
53  ret
54
55#elif defined(__x86_64__)
56
57#
58# extern int unw_getcontext(unw_context_t* thread_state)
59#
60# On entry:
61#  thread_state pointer is in rdi
62#
63DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
64  movq  %rax,   (%rdi)
65  movq  %rbx,  8(%rdi)
66  movq  %rcx, 16(%rdi)
67  movq  %rdx, 24(%rdi)
68  movq  %rdi, 32(%rdi)
69  movq  %rsi, 40(%rdi)
70  movq  %rbp, 48(%rdi)
71  movq  %rsp, 56(%rdi)
72  addq  $8,   56(%rdi)
73  movq  %r8,  64(%rdi)
74  movq  %r9,  72(%rdi)
75  movq  %r10, 80(%rdi)
76  movq  %r11, 88(%rdi)
77  movq  %r12, 96(%rdi)
78  movq  %r13,104(%rdi)
79  movq  %r14,112(%rdi)
80  movq  %r15,120(%rdi)
81  movq  (%rsp),%rsi
82  movq  %rsi,128(%rdi) # store return address as rip
83  # skip rflags
84  # skip cs
85  # skip fs
86  # skip gs
87  xorl  %eax, %eax    # return UNW_ESUCCESS
88  ret
89
90#elif defined(__ppc__)
91
92;
93; extern int unw_getcontext(unw_context_t* thread_state)
94;
95; On entry:
96;  thread_state pointer is in r3
97;
98DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
99  stw    r0,  8(r3)
100  mflr  r0
101  stw    r0,  0(r3)  ; store lr as ssr0
102  stw    r1, 12(r3)
103  stw    r2, 16(r3)
104  stw    r3, 20(r3)
105  stw    r4, 24(r3)
106  stw    r5, 28(r3)
107  stw    r6, 32(r3)
108  stw    r7, 36(r3)
109  stw    r8, 40(r3)
110  stw    r9, 44(r3)
111  stw     r10, 48(r3)
112  stw     r11, 52(r3)
113  stw     r12, 56(r3)
114  stw     r13, 60(r3)
115  stw     r14, 64(r3)
116  stw     r15, 68(r3)
117  stw     r16, 72(r3)
118  stw     r17, 76(r3)
119  stw     r18, 80(r3)
120  stw     r19, 84(r3)
121  stw     r20, 88(r3)
122  stw     r21, 92(r3)
123  stw     r22, 96(r3)
124  stw     r23,100(r3)
125  stw     r24,104(r3)
126  stw     r25,108(r3)
127  stw     r26,112(r3)
128  stw     r27,116(r3)
129  stw     r28,120(r3)
130  stw     r29,124(r3)
131  stw     r30,128(r3)
132  stw     r31,132(r3)
133
134  ; save VRSave register
135  mfspr  r0,256
136  stw    r0,156(r3)
137  ; save CR registers
138  mfcr  r0
139  stw    r0,136(r3)
140  ; save CTR register
141  mfctr  r0
142  stw    r0,148(r3)
143
144  ; save float registers
145  stfd    f0, 160(r3)
146  stfd    f1, 168(r3)
147  stfd    f2, 176(r3)
148  stfd    f3, 184(r3)
149  stfd    f4, 192(r3)
150  stfd    f5, 200(r3)
151  stfd    f6, 208(r3)
152  stfd    f7, 216(r3)
153  stfd    f8, 224(r3)
154  stfd    f9, 232(r3)
155  stfd    f10,240(r3)
156  stfd    f11,248(r3)
157  stfd    f12,256(r3)
158  stfd    f13,264(r3)
159  stfd    f14,272(r3)
160  stfd    f15,280(r3)
161  stfd    f16,288(r3)
162  stfd    f17,296(r3)
163  stfd    f18,304(r3)
164  stfd    f19,312(r3)
165  stfd    f20,320(r3)
166  stfd    f21,328(r3)
167  stfd    f22,336(r3)
168  stfd    f23,344(r3)
169  stfd    f24,352(r3)
170  stfd    f25,360(r3)
171  stfd    f26,368(r3)
172  stfd    f27,376(r3)
173  stfd    f28,384(r3)
174  stfd    f29,392(r3)
175  stfd    f30,400(r3)
176  stfd    f31,408(r3)
177
178
179  ; save vector registers
180
181  subi  r4,r1,16
182  rlwinm  r4,r4,0,0,27  ; mask low 4-bits
183  ; r4 is now a 16-byte aligned pointer into the red zone
184
185#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
186  stvx  _vec,0,r4           @\
187  lwz    r5, 0(r4)          @\
188  stw    r5, _offset(r3)    @\
189  lwz    r5, 4(r4)          @\
190  stw    r5, _offset+4(r3)  @\
191  lwz    r5, 8(r4)          @\
192  stw    r5, _offset+8(r3)  @\
193  lwz    r5, 12(r4)         @\
194  stw    r5, _offset+12(r3)
195
196  SAVE_VECTOR_UNALIGNED( v0, 424+0x000)
197  SAVE_VECTOR_UNALIGNED( v1, 424+0x010)
198  SAVE_VECTOR_UNALIGNED( v2, 424+0x020)
199  SAVE_VECTOR_UNALIGNED( v3, 424+0x030)
200  SAVE_VECTOR_UNALIGNED( v4, 424+0x040)
201  SAVE_VECTOR_UNALIGNED( v5, 424+0x050)
202  SAVE_VECTOR_UNALIGNED( v6, 424+0x060)
203  SAVE_VECTOR_UNALIGNED( v7, 424+0x070)
204  SAVE_VECTOR_UNALIGNED( v8, 424+0x080)
205  SAVE_VECTOR_UNALIGNED( v9, 424+0x090)
206  SAVE_VECTOR_UNALIGNED(v10, 424+0x0A0)
207  SAVE_VECTOR_UNALIGNED(v11, 424+0x0B0)
208  SAVE_VECTOR_UNALIGNED(v12, 424+0x0C0)
209  SAVE_VECTOR_UNALIGNED(v13, 424+0x0D0)
210  SAVE_VECTOR_UNALIGNED(v14, 424+0x0E0)
211  SAVE_VECTOR_UNALIGNED(v15, 424+0x0F0)
212  SAVE_VECTOR_UNALIGNED(v16, 424+0x100)
213  SAVE_VECTOR_UNALIGNED(v17, 424+0x110)
214  SAVE_VECTOR_UNALIGNED(v18, 424+0x120)
215  SAVE_VECTOR_UNALIGNED(v19, 424+0x130)
216  SAVE_VECTOR_UNALIGNED(v20, 424+0x140)
217  SAVE_VECTOR_UNALIGNED(v21, 424+0x150)
218  SAVE_VECTOR_UNALIGNED(v22, 424+0x160)
219  SAVE_VECTOR_UNALIGNED(v23, 424+0x170)
220  SAVE_VECTOR_UNALIGNED(v24, 424+0x180)
221  SAVE_VECTOR_UNALIGNED(v25, 424+0x190)
222  SAVE_VECTOR_UNALIGNED(v26, 424+0x1A0)
223  SAVE_VECTOR_UNALIGNED(v27, 424+0x1B0)
224  SAVE_VECTOR_UNALIGNED(v28, 424+0x1C0)
225  SAVE_VECTOR_UNALIGNED(v29, 424+0x1D0)
226  SAVE_VECTOR_UNALIGNED(v30, 424+0x1E0)
227  SAVE_VECTOR_UNALIGNED(v31, 424+0x1F0)
228
229  li  r3, 0    ; return UNW_ESUCCESS
230  blr
231
232
233#elif defined(__arm64__)
234
235;
236; extern int unw_getcontext(unw_context_t* thread_state)
237;
238; On entry:
239;  thread_state pointer is in x0
240;
241  .p2align 2
242DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
243  stp    x0, x1,  [x0, #0x000]
244  stp    x2, x3,  [x0, #0x010]
245  stp    x4, x5,  [x0, #0x020]
246  stp    x6, x7,  [x0, #0x030]
247  stp    x8, x9,  [x0, #0x040]
248  stp    x10,x11, [x0, #0x050]
249  stp    x12,x13, [x0, #0x060]
250  stp    x14,x15, [x0, #0x070]
251  stp    x16,x17, [x0, #0x080]
252  stp    x18,x19, [x0, #0x090]
253  stp    x20,x21, [x0, #0x0A0]
254  stp    x22,x23, [x0, #0x0B0]
255  stp    x24,x25, [x0, #0x0C0]
256  stp    x26,x27, [x0, #0x0D0]
257  stp    x28,fp,  [x0, #0x0E0]
258  str    lr,      [x0, #0x0F0]
259  mov    x1,sp
260  str    x1,      [x0, #0x0F8]
261  str    lr,      [x0, #0x100]    ; store return address as pc
262  ; skip cpsr
263  stp    d0, d1,  [x0, #0x110]
264  stp    d2, d3,  [x0, #0x120]
265  stp    d4, d5,  [x0, #0x130]
266  stp    d6, d7,  [x0, #0x140]
267  stp    d8, d9,  [x0, #0x150]
268  stp    d10,d11, [x0, #0x160]
269  stp    d12,d13, [x0, #0x170]
270  stp    d14,d15, [x0, #0x180]
271  stp    d16,d17, [x0, #0x190]
272  stp    d18,d19, [x0, #0x1A0]
273  stp    d20,d21, [x0, #0x1B0]
274  stp    d22,d23, [x0, #0x1C0]
275  stp    d24,d25, [x0, #0x1D0]
276  stp    d26,d27, [x0, #0x1E0]
277  stp    d28,d29, [x0, #0x1F0]
278  str    d30,     [x0, #0x200]
279  str    d31,     [x0, #0x208]
280  ldr    x0, #0      ; return UNW_ESUCCESS
281  ret
282
283#elif defined(__arm__) && !defined(__APPLE__)
284
285#if !defined(__ARM_ARCH_ISA_ARM)
286  .thumb
287#endif
288
289@
290@ extern int unw_getcontext(unw_context_t* thread_state)
291@
292@ On entry:
293@  thread_state pointer is in r0
294@
295@ Per EHABI #4.7 this only saves the core integer registers.
296@ EHABI #7.4.5 notes that in general all VRS registers should be restored
297@ however this is very hard to do for VFP registers because it is unknown
298@ to the library how many registers are implemented by the architecture.
299@ Instead, VFP registers are demand saved by logic external to unw_getcontext.
300@
301  .p2align 2
302DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
303#if !defined(__ARM_ARCH_ISA_ARM)
304  stm r0!, {r0-r7}
305  mov r2, sp
306  mov r3, lr
307  str r2, [r0, #52]
308  str r3, [r0, #56]
309  str r3, [r0, #60]  @ store return address as pc
310#else
311  @ 32bit thumb-2 restrictions for stm:
312  @ . the sp (r13) cannot be in the list
313  @ . the pc (r15) cannot be in the list in an STM instruction
314  stm r0, {r0-r12}
315  str sp, [r0, #52]
316  str lr, [r0, #56]
317  str lr, [r0, #60]  @ store return address as pc
318#endif
319#if __ARM_ARCH_ISA_THUMB == 1
320  @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
321  @ It is safe to use here though because we are about to return, and cpsr is
322  @ not expected to be preserved.
323  movs r0, #0        @ return UNW_ESUCCESS
324#else
325  mov r0, #0         @ return UNW_ESUCCESS
326#endif
327  JMP(lr)
328
329@
330@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
331@
332@ On entry:
333@  values pointer is in r0
334@
335  .p2align 2
336  .fpu vfpv3-d16
337DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPy)
338  vstmia r0, {d0-d15}
339  JMP(lr)
340
341@
342@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
343@
344@ On entry:
345@  values pointer is in r0
346@
347  .p2align 2
348  .fpu vfpv3-d16
349DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPy)
350  vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
351  JMP(lr)
352
353@
354@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
355@
356@ On entry:
357@  values pointer is in r0
358@
359  .p2align 2
360  .fpu vfpv3
361DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPy)
362  @ VFP and iwMMX instructions are only available when compiling with the flags
363  @ that enable them. We do not want to do that in the library (because we do not
364  @ want the compiler to generate instructions that access those) but this is
365  @ only accessed if the personality routine needs these registers. Use of
366  @ these registers implies they are, actually, available on the target, so
367  @ it's ok to execute.
368  @ So, generate the instructions using the corresponding coprocessor mnemonic.
369  vstmia r0, {d16-d31}
370  JMP(lr)
371
372@
373@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
374@
375@ On entry:
376@  values pointer is in r0
377@
378  .p2align 2
379DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPy)
380#if (!defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_6SM__)) || defined(__ARM_WMMX)
381  stcl p1, cr0, [r0], #8  @ wstrd wR0, [r0], #8
382  stcl p1, cr1, [r0], #8  @ wstrd wR1, [r0], #8
383  stcl p1, cr2, [r0], #8  @ wstrd wR2, [r0], #8
384  stcl p1, cr3, [r0], #8  @ wstrd wR3, [r0], #8
385  stcl p1, cr4, [r0], #8  @ wstrd wR4, [r0], #8
386  stcl p1, cr5, [r0], #8  @ wstrd wR5, [r0], #8
387  stcl p1, cr6, [r0], #8  @ wstrd wR6, [r0], #8
388  stcl p1, cr7, [r0], #8  @ wstrd wR7, [r0], #8
389  stcl p1, cr8, [r0], #8  @ wstrd wR8, [r0], #8
390  stcl p1, cr9, [r0], #8  @ wstrd wR9, [r0], #8
391  stcl p1, cr10, [r0], #8  @ wstrd wR10, [r0], #8
392  stcl p1, cr11, [r0], #8  @ wstrd wR11, [r0], #8
393  stcl p1, cr12, [r0], #8  @ wstrd wR12, [r0], #8
394  stcl p1, cr13, [r0], #8  @ wstrd wR13, [r0], #8
395  stcl p1, cr14, [r0], #8  @ wstrd wR14, [r0], #8
396  stcl p1, cr15, [r0], #8  @ wstrd wR15, [r0], #8
397#endif
398  JMP(lr)
399
400@
401@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
402@
403@ On entry:
404@  values pointer is in r0
405@
406  .p2align 2
407DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
408#if (!defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_6SM__)) || defined(__ARM_WMMX)
409  stc2 p1, cr8, [r0], #4  @ wstrw wCGR0, [r0], #4
410  stc2 p1, cr9, [r0], #4  @ wstrw wCGR1, [r0], #4
411  stc2 p1, cr10, [r0], #4  @ wstrw wCGR2, [r0], #4
412  stc2 p1, cr11, [r0], #4  @ wstrw wCGR3, [r0], #4
413#endif
414  JMP(lr)
415
416#endif
417