UnwindRegistersRestore.S revision 7c61d80c68ef9af39fbc49ef532c2252fa719ac9
1//===-------------------- UnwindRegistersRestore.S ------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "assembly.h"
11
12  .text
13
14#if __i386__
15DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
16#
17# void libunwind::Registers_x86::jumpto()
18#
19# On entry:
20#  +                       +
21#  +-----------------------+
22#  + thread_state pointer  +
23#  +-----------------------+
24#  + return address        +
25#  +-----------------------+   <-- SP
26#  +                       +
27  movl   4(%esp), %eax
28  # set up eax and ret on new stack location
29  movl  28(%eax), %edx # edx holds new stack pointer
30  subl  $8,%edx
31  movl  %edx, 28(%eax)
32  movl  0(%eax), %ebx
33  movl  %ebx, 0(%edx)
34  movl  40(%eax), %ebx
35  movl  %ebx, 4(%edx)
36  # we now have ret and eax pushed onto where new stack will be
37  # restore all registers
38  movl   4(%eax), %ebx
39  movl   8(%eax), %ecx
40  movl  12(%eax), %edx
41  movl  16(%eax), %edi
42  movl  20(%eax), %esi
43  movl  24(%eax), %ebp
44  movl  28(%eax), %esp
45  # skip ss
46  # skip eflags
47  pop    %eax  # eax was already pushed on new stack
48  ret        # eip was already pushed on new stack
49  # skip cs
50  # skip ds
51  # skip es
52  # skip fs
53  # skip gs
54
55#elif __x86_64__
56
57DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
58#
59# void libunwind::Registers_x86_64::jumpto()
60#
61# On entry, thread_state pointer is in rdi
62
63  movq  56(%rdi), %rax # rax holds new stack pointer
64  subq  $16, %rax
65  movq  %rax, 56(%rdi)
66  movq  32(%rdi), %rbx  # store new rdi on new stack
67  movq  %rbx, 0(%rax)
68  movq  128(%rdi), %rbx # store new rip on new stack
69  movq  %rbx, 8(%rax)
70  # restore all registers
71  movq    0(%rdi), %rax
72  movq    8(%rdi), %rbx
73  movq   16(%rdi), %rcx
74  movq   24(%rdi), %rdx
75  # restore rdi later
76  movq   40(%rdi), %rsi
77  movq   48(%rdi), %rbp
78  # restore rsp later
79  movq   64(%rdi), %r8
80  movq   72(%rdi), %r9
81  movq   80(%rdi), %r10
82  movq   88(%rdi), %r11
83  movq   96(%rdi), %r12
84  movq  104(%rdi), %r13
85  movq  112(%rdi), %r14
86  movq  120(%rdi), %r15
87  # skip rflags
88  # skip cs
89  # skip fs
90  # skip gs
91  movq  56(%rdi), %rsp  # cut back rsp to new location
92  pop    %rdi      # rdi was saved here earlier
93  ret            # rip was saved here
94
95
96#elif __ppc__
97
98DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
99;
100; void libunwind::Registers_ppc::jumpto()
101;
102; On entry:
103;  thread_state pointer is in r3
104;
105
106  ; restore integral registerrs
107  ; skip r0 for now
108  ; skip r1 for now
109  lwz     r2, 16(r3)
110  ; skip r3 for now
111  ; skip r4 for now
112  ; skip r5 for now
113  lwz     r6, 32(r3)
114  lwz     r7, 36(r3)
115  lwz     r8, 40(r3)
116  lwz     r9, 44(r3)
117  lwz    r10, 48(r3)
118  lwz    r11, 52(r3)
119  lwz    r12, 56(r3)
120  lwz    r13, 60(r3)
121  lwz    r14, 64(r3)
122  lwz    r15, 68(r3)
123  lwz    r16, 72(r3)
124  lwz    r17, 76(r3)
125  lwz    r18, 80(r3)
126  lwz    r19, 84(r3)
127  lwz    r20, 88(r3)
128  lwz    r21, 92(r3)
129  lwz    r22, 96(r3)
130  lwz    r23,100(r3)
131  lwz    r24,104(r3)
132  lwz    r25,108(r3)
133  lwz    r26,112(r3)
134  lwz    r27,116(r3)
135  lwz    r28,120(r3)
136  lwz    r29,124(r3)
137  lwz    r30,128(r3)
138  lwz    r31,132(r3)
139
140  ; restore float registers
141  lfd    f0, 160(r3)
142  lfd    f1, 168(r3)
143  lfd    f2, 176(r3)
144  lfd    f3, 184(r3)
145  lfd    f4, 192(r3)
146  lfd    f5, 200(r3)
147  lfd    f6, 208(r3)
148  lfd    f7, 216(r3)
149  lfd    f8, 224(r3)
150  lfd    f9, 232(r3)
151  lfd    f10,240(r3)
152  lfd    f11,248(r3)
153  lfd    f12,256(r3)
154  lfd    f13,264(r3)
155  lfd    f14,272(r3)
156  lfd    f15,280(r3)
157  lfd    f16,288(r3)
158  lfd    f17,296(r3)
159  lfd    f18,304(r3)
160  lfd    f19,312(r3)
161  lfd    f20,320(r3)
162  lfd    f21,328(r3)
163  lfd    f22,336(r3)
164  lfd    f23,344(r3)
165  lfd    f24,352(r3)
166  lfd    f25,360(r3)
167  lfd    f26,368(r3)
168  lfd    f27,376(r3)
169  lfd    f28,384(r3)
170  lfd    f29,392(r3)
171  lfd    f30,400(r3)
172  lfd    f31,408(r3)
173
174  ; restore vector registers if any are in use
175  lwz    r5,156(r3)  ; test VRsave
176  cmpwi  r5,0
177  beq    Lnovec
178
179  subi  r4,r1,16
180  rlwinm  r4,r4,0,0,27  ; mask low 4-bits
181  ; r4 is now a 16-byte aligned pointer into the red zone
182  ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
183
184
185#define LOAD_VECTOR_UNALIGNEDl(_index) \
186  andis.  r0,r5,(1<<(15-_index))  @\
187  beq    Ldone  ## _index     @\
188  lwz    r0, 424+_index*16(r3)  @\
189  stw    r0, 0(r4)        @\
190  lwz    r0, 424+_index*16+4(r3)  @\
191  stw    r0, 4(r4)        @\
192  lwz    r0, 424+_index*16+8(r3)  @\
193  stw    r0, 8(r4)        @\
194  lwz    r0, 424+_index*16+12(r3)@\
195  stw    r0, 12(r4)        @\
196  lvx    v ## _index,0,r4    @\
197Ldone  ## _index:
198
199#define LOAD_VECTOR_UNALIGNEDh(_index) \
200  andi.  r0,r5,(1<<(31-_index))  @\
201  beq    Ldone  ## _index    @\
202  lwz    r0, 424+_index*16(r3)  @\
203  stw    r0, 0(r4)        @\
204  lwz    r0, 424+_index*16+4(r3)  @\
205  stw    r0, 4(r4)        @\
206  lwz    r0, 424+_index*16+8(r3)  @\
207  stw    r0, 8(r4)        @\
208  lwz    r0, 424+_index*16+12(r3)@\
209  stw    r0, 12(r4)        @\
210  lvx    v ## _index,0,r4    @\
211  Ldone  ## _index:
212
213
214  LOAD_VECTOR_UNALIGNEDl(0)
215  LOAD_VECTOR_UNALIGNEDl(1)
216  LOAD_VECTOR_UNALIGNEDl(2)
217  LOAD_VECTOR_UNALIGNEDl(3)
218  LOAD_VECTOR_UNALIGNEDl(4)
219  LOAD_VECTOR_UNALIGNEDl(5)
220  LOAD_VECTOR_UNALIGNEDl(6)
221  LOAD_VECTOR_UNALIGNEDl(7)
222  LOAD_VECTOR_UNALIGNEDl(8)
223  LOAD_VECTOR_UNALIGNEDl(9)
224  LOAD_VECTOR_UNALIGNEDl(10)
225  LOAD_VECTOR_UNALIGNEDl(11)
226  LOAD_VECTOR_UNALIGNEDl(12)
227  LOAD_VECTOR_UNALIGNEDl(13)
228  LOAD_VECTOR_UNALIGNEDl(14)
229  LOAD_VECTOR_UNALIGNEDl(15)
230  LOAD_VECTOR_UNALIGNEDh(16)
231  LOAD_VECTOR_UNALIGNEDh(17)
232  LOAD_VECTOR_UNALIGNEDh(18)
233  LOAD_VECTOR_UNALIGNEDh(19)
234  LOAD_VECTOR_UNALIGNEDh(20)
235  LOAD_VECTOR_UNALIGNEDh(21)
236  LOAD_VECTOR_UNALIGNEDh(22)
237  LOAD_VECTOR_UNALIGNEDh(23)
238  LOAD_VECTOR_UNALIGNEDh(24)
239  LOAD_VECTOR_UNALIGNEDh(25)
240  LOAD_VECTOR_UNALIGNEDh(26)
241  LOAD_VECTOR_UNALIGNEDh(27)
242  LOAD_VECTOR_UNALIGNEDh(28)
243  LOAD_VECTOR_UNALIGNEDh(29)
244  LOAD_VECTOR_UNALIGNEDh(30)
245  LOAD_VECTOR_UNALIGNEDh(31)
246
247Lnovec:
248  lwz    r0, 136(r3) ; __cr
249  mtocrf  255,r0
250  lwz    r0, 148(r3) ; __ctr
251  mtctr  r0
252  lwz    r0, 0(r3)  ; __ssr0
253  mtctr  r0
254  lwz    r0, 8(r3)  ; do r0 now
255  lwz    r5,28(r3)  ; do r5 now
256  lwz    r4,24(r3)  ; do r4 now
257  lwz    r1,12(r3)  ; do sp now
258  lwz    r3,20(r3)  ; do r3 last
259  bctr
260
261#elif __arm64__
262
263;
264; void libunwind::Registers_arm64::jumpto()
265;
266; On entry:
267;  thread_state pointer is in x0
268;
269DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
270  ; skip restore of x0,x1 for now
271  ldp    x2, x3,  [x0, #0x010]
272  ldp    x4, x5,  [x0, #0x020]
273  ldp    x6, x7,  [x0, #0x030]
274  ldp    x8, x9,  [x0, #0x040]
275  ldp    x10,x11, [x0, #0x050]
276  ldp    x12,x13, [x0, #0x060]
277  ldp    x14,x15, [x0, #0x070]
278  ldp    x16,x17, [x0, #0x080]
279  ldp    x18,x19, [x0, #0x090]
280  ldp    x20,x21, [x0, #0x0A0]
281  ldp    x22,x23, [x0, #0x0B0]
282  ldp    x24,x25, [x0, #0x0C0]
283  ldp    x26,x27, [x0, #0x0D0]
284  ldp    x28,fp,  [x0, #0x0E0]
285  ldr    lr,      [x0, #0x100]  ; restore pc into lr
286  ldr    x1,      [x0, #0x0F8]
287  mov    sp,x1          ; restore sp
288
289  ldp    d0, d1,  [x0, #0x110]
290  ldp    d2, d3,  [x0, #0x120]
291  ldp    d4, d5,  [x0, #0x130]
292  ldp    d6, d7,  [x0, #0x140]
293  ldp    d8, d9,  [x0, #0x150]
294  ldp    d10,d11, [x0, #0x160]
295  ldp    d12,d13, [x0, #0x170]
296  ldp    d14,d15, [x0, #0x180]
297  ldp    d16,d17, [x0, #0x190]
298  ldp    d18,d19, [x0, #0x1A0]
299  ldp    d20,d21, [x0, #0x1B0]
300  ldp    d22,d23, [x0, #0x1C0]
301  ldp    d24,d25, [x0, #0x1D0]
302  ldp    d26,d27, [x0, #0x1E0]
303  ldp    d28,d29, [x0, #0x1F0]
304  ldr    d30,     [x0, #0x200]
305  ldr    d31,     [x0, #0x208]
306
307  ldp    x0, x1,  [x0, #0x000]  ; restore x0,x1
308  ret    lr            ; jump to pc
309
310#endif
311