quick_entrypoints_arm64.S revision 4fc046e78efbc98541388cdda986b5d8a2b951ad
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "asm_support_arm64.S"
18
19#include "arch/quick_alloc_entrypoints.S"
20
21
22    /*
23     * Macro that sets up the callee save frame to conform with
24     * Runtime::CreateCalleeSaveMethod(kSaveAll)
25     */
26.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
27    adrp x9, :got:_ZN3art7Runtime9instance_E
28    ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
29
30    // Our registers aren't intermixed - just spill in order.
31    ldr x9,[x9]  // x9 = & (art::Runtime * art::Runtime.instance_) .
32
33    // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
34    ldr x9, [x9, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
35
36    sub sp, sp, #368
37    .cfi_adjust_cfa_offset 368
38
39    // Ugly compile-time check, but we only have the preprocessor.
40#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 368)
41#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
42#endif
43
44    // FP args
45    stp d1, d2,   [sp, #8]
46    stp d2, d3, [sp, #24]
47    stp d4, d5, [sp, #40]
48    stp d6, d7, [sp, #56]
49
50    // FP callee-saves
51    stp d8, d9,   [sp, #72]
52    stp d10, d11, [sp, #88]
53    stp d12, d13, [sp, #104]
54    stp d14, d15, [sp, #120]
55
56    stp d16, d17,   [sp, #136]
57    stp d18, d19,   [sp, #152]
58    stp d20, d21,   [sp, #168]
59    stp d22, d23,   [sp, #184]
60    stp d24, d25,   [sp, #200]
61    stp d26, d27,   [sp, #216]
62    stp d28, d29,   [sp, #232]
63    stp d30, d31,   [sp, #248]
64
65
66    // Callee saved.
67    stp xSELF, x19, [sp, #264]
68    .cfi_rel_offset x18, 264
69    .cfi_rel_offset x19, 272
70
71    stp x20, x21, [sp, #280]
72    .cfi_rel_offset x20, 280
73    .cfi_rel_offset x21, 288
74
75    stp x22, x23, [sp, #296]
76    .cfi_rel_offset x22, 296
77    .cfi_rel_offset x23, 304
78
79    stp x24, x25, [sp, #312]
80    .cfi_rel_offset x24, 312
81    .cfi_rel_offset x25, 320
82
83    stp x26, x27, [sp, #328]
84    .cfi_rel_offset x26, 328
85    .cfi_rel_offset x27, 336
86
87    stp x28, xFP, [sp, #344]    // Save FP.
88    .cfi_rel_offset x28, 344
89    .cfi_rel_offset x29, 352
90
91    str xLR, [sp, #360]
92    .cfi_rel_offset x30, 360
93
94    // Loads appropriate callee-save-method
95    str x9, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
96
97.endm
98
99    /*
100     * Macro that sets up the callee save frame to conform with
101     * Runtime::CreateCalleeSaveMethod(kRefsOnly).
102     */
103// WIP.
104.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
105    adrp x9, :got:_ZN3art7Runtime9instance_E
106    ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
107
108    // Our registers aren't intermixed - just spill in order.
109    ldr x9,[x9]  // x9 = & (art::Runtime * art::Runtime.instance_) .
110
111    // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
112    ldr x9, [x9, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
113
114    sub sp, sp, #176
115    .cfi_adjust_cfa_offset 176
116
117    // Ugly compile-time check, but we only have the preprocessor.
118#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 176)
119#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
120#endif
121
122    // FP callee-saves
123    stp d8, d9,   [sp, #8]
124    stp d10, d11, [sp, #24]
125    stp d12, d13, [sp, #40]
126    stp d14, d15, [sp, #56]
127
128    // Callee saved.
129    stp xSELF, x19, [sp, #72]
130    .cfi_rel_offset x18, 72
131    .cfi_rel_offset x19, 80
132
133    stp x20, x21, [sp, #88]
134    .cfi_rel_offset x20, 88
135    .cfi_rel_offset x21, 96
136
137    stp x22, x23, [sp, #104]
138    .cfi_rel_offset x22, 104
139    .cfi_rel_offset x23, 112
140
141    stp x24, x25, [sp, #120]
142    .cfi_rel_offset x24, 120
143    .cfi_rel_offset x25, 128
144
145    stp x26, x27, [sp, #136]
146    .cfi_rel_offset x26, 136
147    .cfi_rel_offset x27, 144
148
149    stp x28, xFP, [sp, #152]    // Save FP.
150    .cfi_rel_offset x28, 152
151    .cfi_rel_offset x29, 160
152
153    str xLR, [sp, #168]
154    .cfi_rel_offset x30, 168
155
156    // Loads appropriate callee-save-method
157    str x9, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
158.endm
159
160.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
161    // FP callee saves
162    ldp d8, d9,   [sp, #8]
163    ldp d10, d11, [sp, #24]
164    ldp d12, d13, [sp, #40]
165    ldp d14, d15, [sp, #56]
166
167    // Callee saved.
168    ldp xSELF, x19, [sp, #72]
169    .cfi_restore x18
170    .cfi_restore x19
171
172    ldp x20, x21, [sp, #88]
173    .cfi_restore x20
174    .cfi_restore x21
175
176    ldp x22, x23, [sp, #104]
177    .cfi_restore x22
178    .cfi_restore x23
179
180    ldp x24, x25, [sp, #120]
181    .cfi_restore x24
182    .cfi_restore x25
183
184    ldp x26, x27, [sp, #136]
185    .cfi_restore x26
186    .cfi_restore x27
187
188    ldp x28, xFP, [sp, #152]    // Save FP.
189    .cfi_restore x28
190    .cfi_restore x29
191
192    ldr xLR, [sp, #168]
193    .cfi_restore x30
194
195    add sp, sp, #176
196    .cfi_adjust_cfa_offset -176
197.endm
198
199.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
200    brk 0
201.endm
202
203
204.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
205    sub sp, sp, #304
206    .cfi_adjust_cfa_offset 304
207
208    // Ugly compile-time check, but we only have the preprocessor.
209#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 304)
210#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
211#endif
212
213    stp d0, d1,   [sp, #16]
214    stp d2, d3,   [sp, #32]
215    stp d4, d5,   [sp, #48]
216    stp d6, d7,   [sp, #64]
217    stp d8, d9,   [sp, #80]
218    stp d10, d11, [sp, #96]
219    stp d12, d13, [sp, #112]
220    stp d14, d15, [sp, #128]
221
222    stp x1,  x2, [sp, #144]
223    .cfi_rel_offset x1, 144
224    .cfi_rel_offset x2, 152
225
226    stp x3,  x4, [sp, #160]
227    .cfi_rel_offset x3, 160
228    .cfi_rel_offset x4, 168
229
230    stp x5,  x6, [sp, #176]
231    .cfi_rel_offset x5, 176
232    .cfi_rel_offset x6, 184
233
234    stp x7,  xSELF, [sp, #192]
235    .cfi_rel_offset x7, 192
236    .cfi_rel_offset x18, 200
237
238    stp x19, x20, [sp, #208]
239    .cfi_rel_offset x19, 208
240    .cfi_rel_offset x20, 216
241
242    stp x21, x22, [sp, #224]
243    .cfi_rel_offset x21, 224
244    .cfi_rel_offset x22, 232
245
246    stp x23, x24, [sp, #240]
247    .cfi_rel_offset x23, 240
248    .cfi_rel_offset x24, 248
249
250    stp x25, x26, [sp, #256]
251    .cfi_rel_offset x25, 256
252    .cfi_rel_offset x26, 264
253
254    stp x27, x28, [sp, #272]
255    .cfi_rel_offset x27, 272
256    .cfi_rel_offset x28, 280
257
258    stp xFP, xLR, [sp, #288]
259    .cfi_rel_offset x29, 288
260    .cfi_rel_offset x30, 296
261.endm
262
263    /*
264     * Macro that sets up the callee save frame to conform with
265     * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
266     *
267     * TODO This is probably too conservative - saving FP & LR.
268     */
269.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
270    adrp x9, :got:_ZN3art7Runtime9instance_E
271    ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
272
273    // Our registers aren't intermixed - just spill in order.
274    ldr x9,[x9]  // x9 = & (art::Runtime * art::Runtime.instance_) .
275
276    // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
277    ldr x9, [x9, RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
278
279    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
280
281    str x9, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
282.endm
283
284.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
285
286    ldp d0, d1,   [sp, #16]
287    ldp d2, d3,   [sp, #32]
288    ldp d4, d5,   [sp, #48]
289    ldp d6, d7,   [sp, #64]
290    ldp d8, d9,   [sp, #80]
291    ldp d10, d11, [sp, #96]
292    ldp d12, d13, [sp, #112]
293    ldp d14, d15, [sp, #128]
294
295    // args.
296    ldp x1,  x2, [sp, #144]
297    .cfi_restore x1
298    .cfi_restore x2
299
300    ldp x3,  x4, [sp, #160]
301    .cfi_restore x3
302    .cfi_restore x4
303
304    ldp x5,  x6, [sp, #176]
305    .cfi_restore x5
306    .cfi_restore x6
307
308    ldp x7,  xSELF, [sp, #192]
309    .cfi_restore x7
310    .cfi_restore x18
311
312    ldp x19, x20, [sp, #208]
313    .cfi_restore x19
314    .cfi_restore x20
315
316    ldp x21, x22, [sp, #224]
317    .cfi_restore x21
318    .cfi_restore x22
319
320    ldp x23, x24, [sp, #240]
321    .cfi_restore x23
322    .cfi_restore x24
323
324    ldp x25, x26, [sp, #256]
325    .cfi_restore x25
326    .cfi_restore x26
327
328    ldp x27, x28, [sp, #272]
329    .cfi_restore x27
330    .cfi_restore x28
331
332    ldp xFP, xLR, [sp, #288]
333    .cfi_restore x29
334    .cfi_restore x30
335
336    add sp, sp, #304
337    .cfi_adjust_cfa_offset -304
338.endm
339
340.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME_NO_D0
341
342    ldr d1,   [sp, #24]
343    ldp d2, d3,   [sp, #32]
344    ldp d4, d5,   [sp, #48]
345    ldp d6, d7,   [sp, #64]
346    ldp d8, d9,   [sp, #80]
347    ldp d10, d11, [sp, #96]
348    ldp d12, d13, [sp, #112]
349    ldp d14, d15, [sp, #128]
350
351    // args.
352    ldp x1,  x2, [sp, #144]
353    .cfi_restore x1
354    .cfi_restore x2
355
356    ldp x3,  x4, [sp, #160]
357    .cfi_restore x3
358    .cfi_restore x4
359
360    ldp x5,  x6, [sp, #176]
361    .cfi_restore x5
362    .cfi_restore x6
363
364    ldp x7,  xSELF, [sp, #192]
365    .cfi_restore x7
366    .cfi_restore x18
367
368    ldp x19, x20, [sp, #208]
369    .cfi_restore x19
370    .cfi_restore x20
371
372    ldp x21, x22, [sp, #224]
373    .cfi_restore x21
374    .cfi_restore x22
375
376    ldp x23, x24, [sp, #240]
377    .cfi_restore x23
378    .cfi_restore x24
379
380    ldp x25, x26, [sp, #256]
381    .cfi_restore x25
382    .cfi_restore x26
383
384    ldp x27, x28, [sp, #272]
385    .cfi_restore x27
386    .cfi_restore x28
387
388    ldp xFP, xLR, [sp, #288]
389    .cfi_restore x29
390    .cfi_restore x30
391
392    add sp, sp, #304
393    .cfi_adjust_cfa_offset -304
394.endm
395
396.macro RETURN_IF_RESULT_IS_ZERO
397    cbnz x0, 1f                // result non-zero branch over
398    ret                        // return
3991:
400.endm
401
402.macro RETURN_IF_RESULT_IS_NON_ZERO
403    cbz x0, 1f                 // result zero branch over
404    ret                        // return
4051:
406.endm
407
408    /*
409     * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
410     * exception is Thread::Current()->exception_
411     */
412.macro DELIVER_PENDING_EXCEPTION
413    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
414    mov x0, xSELF
415    mov x1, sp
416
417    // Point of no return.
418    b artDeliverPendingExceptionFromCode  // artDeliverPendingExceptionFromCode(Thread*, SP)
419    brk 0  // Unreached
420.endm
421
422.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
423    ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET]   // Get exception field.
424    cbnz \reg, 1f
425    ret
4261:
427    DELIVER_PENDING_EXCEPTION
428.endm
429
430.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
431    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x9
432.endm
433
434// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
435.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
436    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1
437.endm
438
439.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER
440    cbnz w0, 1f                // result non-zero branch over
441    ret                        // return
4421:
443    DELIVER_PENDING_EXCEPTION
444.endm
445
446.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
447    .extern \cxx_name
448ENTRY \c_name
449    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
450    mov x0, xSELF                        // pass Thread::Current
451    mov x1, sp                        // pass SP
452    b   \cxx_name                     // \cxx_name(Thread*, SP)
453END \c_name
454.endm
455
456.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
457    .extern \cxx_name
458ENTRY \c_name
459    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context.
460    mov x1, xSELF                       // pass Thread::Current.
461    mov x2, sp                        // pass SP.
462    b   \cxx_name                     // \cxx_name(arg, Thread*, SP).
463    brk 0
464END \c_name
465.endm
466
467.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
468    .extern \cxx_name
469ENTRY \c_name
470    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
471    mov x2, xSELF                       // pass Thread::Current
472    mov x3, sp                        // pass SP
473    b   \cxx_name                     // \cxx_name(arg1, arg2, Thread*, SP)
474    brk 0
475END \c_name
476.endm
477
478    /*
479     * Called by managed code, saves callee saves and then calls artThrowException
480     * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
481     */
482ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
483
484    /*
485     * Called by managed code to create and deliver a NullPointerException.
486     */
487NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
488
489    /*
490     * Called by managed code to create and deliver an ArithmeticException.
491     */
492NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
493
494    /*
495     * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
496     * index, arg2 holds limit.
497     */
498TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
499
500    /*
501     * Called by managed code to create and deliver a StackOverflowError.
502     */
503NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
504
505    /*
506     * Called by managed code to create and deliver a NoSuchMethodError.
507     */
508ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
509
510    /*
511     * TODO arm64 specifics need to be fleshed out.
512     * All generated callsites for interface invokes and invocation slow paths will load arguments
513     * as usual - except instead of loading x0 with the target Method*, x0 will contain
514     * the method_idx.  This wrapper will save x1-x3, load the caller's Method*, align the
515     * stack and call the appropriate C helper.
516     * NOTE: "this" is first visible argument of the target, and so can be found in x1.
517     *
518     * The helper will attempt to locate the target and return a result in x0 consisting
519     * of the target Method* in x0 and method->code_ in x1.
520     *
521     * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
522     * thread and we branch to another stub to deliver it.
523     *
524     * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
525     * pointing back to the original caller.
526     */
527.macro INVOKE_TRAMPOLINE c_name, cxx_name
528    .extern \cxx_name
529ENTRY \c_name
530    brk 0
531END \c_name
532.endm
533
534INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
535INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
536
537INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
538INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
539INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
540INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
541
542
543.macro INVOKE_STUB_CREATE_FRAME
544
545SAVE_SIZE=5*8   // x4, x5, SP, LR & FP saved.
546SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
547
548    mov x9, sp                          // Save stack pointer.
549    .cfi_register sp,x9
550
551    add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame.
552    sub x10, sp, x10                    // Calculate SP position - saves + ArtMethod* +  args
553    and x10, x10, # ~0xf                // Enforce 16 byte stack alignment.
554    mov sp, x10                         // Set new SP.
555
556    sub x10, x9, #SAVE_SIZE             // Calculate new FP (later). Done here as we must move SP
557    .cfi_def_cfa_register x10           // before this.
558    .cfi_adjust_cfa_offset SAVE_SIZE
559
560    str x9, [x10, #32]                  // Save old stack pointer.
561    .cfi_rel_offset sp, 32
562
563    stp x4, x5, [x10, #16]              // Save result and shorty addresses.
564    .cfi_rel_offset x4, 16
565    .cfi_rel_offset x5, 24
566
567    stp xFP, xLR, [x10]                 // Store LR & FP.
568    .cfi_rel_offset x29, 0
569    .cfi_rel_offset x30, 8
570
571    mov xFP, x10                        // Use xFP now, as it's callee-saved.
572    .cfi_def_cfa_register x29
573    mov xSELF, x3                       // Move thread pointer into SELF register.
574
575    // Copy arguments into stack frame.
576    // Use simple copy routine for now.
577    // 4 bytes per slot.
578    // X1 - source address
579    // W2 - args length
580    // X9 - destination address.
581    // W10 - temporary
582    add x9, sp, #8     // Destination address is bottom of stack + NULL.
583
584    // Use \@ to differentiate between macro invocations.
585.LcopyParams\@:
586    cmp w2, #0
587    beq .LendCopyParams\@
588    sub w2, w2, #4      // Need 65536 bytes of range.
589    ldr w10, [x1, x2]
590    str w10, [x9, x2]
591
592    b .LcopyParams\@
593
594.LendCopyParams\@:
595
596    // Store NULL into Method* at bottom of frame.
597    str xzr, [sp]
598
599.endm
600
601.macro INVOKE_STUB_CALL_AND_RETURN
602
603    // load method-> METHOD_QUICK_CODE_OFFSET
604    ldr x9, [x0 , #METHOD_QUICK_CODE_OFFSET]
605    // Branch to method.
606    blr x9
607
608    // Restore return value address and shorty address.
609    ldp x4,x5, [xFP, #16]
610    .cfi_restore x4
611    .cfi_restore x5
612
613    // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
614    ldrb w10, [x5]
615
616    // Don't set anything for a void type.
617    cmp w10, #'V'
618    beq .Lexit_art_quick_invoke_stub\@
619
620    cmp w10, #'D'
621    bne .Lreturn_is_float\@
622    str d0, [x4]
623    b .Lexit_art_quick_invoke_stub\@
624
625.Lreturn_is_float\@:
626    cmp w10, #'F'
627    bne .Lreturn_is_int\@
628    str s0, [x4]
629    b .Lexit_art_quick_invoke_stub\@
630
631    // Just store x0. Doesn't matter if it is 64 or 32 bits.
632.Lreturn_is_int\@:
633    str x0, [x4]
634
635.Lexit_art_quick_invoke_stub\@:
636    ldr x2, [x29, #32]   // Restore stack pointer.
637    mov sp, x2
638    .cfi_restore sp
639
640    ldp x29, x30, [x29]    // Restore old frame pointer and link register.
641    .cfi_restore x29
642    .cfi_restore x30
643
644    ret
645
646.endm
647
648
649/*
650 *  extern"C" void art_quick_invoke_stub(ArtMethod *method,   x0
651 *                                       uint32_t  *args,     x1
652 *                                       uint32_t argsize,    w2
653 *                                       Thread *self,        x3
654 *                                       JValue *result,      x4
655 *                                       char   *shorty);     x5
656 *  +----------------------+
657 *  |                      |
658 *  |  C/C++ frame         |
659 *  |       LR''           |
660 *  |       FP''           | <- SP'
661 *  +----------------------+
662 *  +----------------------+
663 *  |        SP'           |
664 *  |        X5            |
665 *  |        X4            |        Saved registers
666 *  |        LR'           |
667 *  |        FP'           | <- FP
668 *  +----------------------+
669 *  | uint32_t out[n-1]    |
670 *  |    :      :          |        Outs
671 *  | uint32_t out[0]      |
672 *  | ArtMethod* NULL      | <- SP
673 *  +----------------------+
674 *
675 * Outgoing registers:
676 *  x0    - Method*
677 *  x1-x7 - integer parameters.
678 *  d0-d7 - Floating point parameters.
679 *  xSELF = self
680 *  SP = & of ArtMethod*
681 *  x1 = "this" pointer.
682 *
683 */
684ENTRY art_quick_invoke_stub
685    // Spill registers as per AACPS64 calling convention.
686    INVOKE_STUB_CREATE_FRAME
687
688    // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
689    // Parse the passed shorty to determine which register to load.
690    // Load addresses for routines that load WXSD registers.
691    adr  x11, .LstoreW2
692    adr  x12, .LstoreX2
693    adr  x13, .LstoreS0
694    adr  x14, .LstoreD0
695
696    // Initialize routine offsets to 0 for integers and floats.
697    // x8 for integers, x15 for floating point.
698    mov x8, #0
699    mov x15, #0
700
701    add x10, x5, #1         // Load shorty address, plus one to skip return value.
702    ldr w1, [x9],#4         // Load "this" parameter, and increment arg pointer.
703
704    // Loop to fill registers.
705.LfillRegisters:
706    ldrb w17, [x10], #1       // Load next character in signature, and increment.
707    cbz w17, .LcallFunction   // Exit at end of signature. Shorty 0 terminated.
708
709    cmp  w17, #'F' // is this a float?
710    bne .LisDouble
711
712    cmp x15, # 8*12         // Skip this load if all registers full.
713    beq .Ladvance4
714
715    add x17, x13, x15       // Calculate subroutine to jump to.
716    br  x17
717
718.LisDouble:
719    cmp w17, #'D'           // is this a double?
720    bne .LisLong
721
722    cmp x15, # 8*12         // Skip this load if all registers full.
723    beq .Ladvance8
724
725    add x17, x14, x15       // Calculate subroutine to jump to.
726    br x17
727
728.LisLong:
729    cmp w17, #'J'           // is this a long?
730    bne .LisOther
731
732    cmp x8, # 6*12          // Skip this load if all registers full.
733    beq .Ladvance8
734
735    add x17, x12, x8        // Calculate subroutine to jump to.
736    br x17
737
738.LisOther:                  // Everything else takes one vReg.
739    cmp x8, # 6*12          // Skip this load if all registers full.
740    beq .Ladvance4
741
742    add x17, x11, x8        // Calculate subroutine to jump to.
743    br x17
744
745.Ladvance4:
746    add x9, x9, #4
747    b .LfillRegisters
748
749.Ladvance8:
750    add x9, x9, #8
751    b .LfillRegisters
752
753// Macro for loading a parameter into a register.
754//  counter - the register with offset into these tables
755//  size - the size of the register - 4 or 8 bytes.
756//  register - the name of the register to be loaded.
757.macro LOADREG counter size register return
758    ldr \register , [x9], #\size
759    add \counter, \counter, 12
760    b \return
761.endm
762
763// Store ints.
764.LstoreW2:
765    LOADREG x8 4 w2 .LfillRegisters
766    LOADREG x8 4 w3 .LfillRegisters
767    LOADREG x8 4 w4 .LfillRegisters
768    LOADREG x8 4 w5 .LfillRegisters
769    LOADREG x8 4 w6 .LfillRegisters
770    LOADREG x8 4 w7 .LfillRegisters
771
772// Store longs.
773.LstoreX2:
774    LOADREG x8 8 x2 .LfillRegisters
775    LOADREG x8 8 x3 .LfillRegisters
776    LOADREG x8 8 x4 .LfillRegisters
777    LOADREG x8 8 x5 .LfillRegisters
778    LOADREG x8 8 x6 .LfillRegisters
779    LOADREG x8 8 x7 .LfillRegisters
780
781// Store singles.
782.LstoreS0:
783    LOADREG x15 4 s0 .LfillRegisters
784    LOADREG x15 4 s1 .LfillRegisters
785    LOADREG x15 4 s2 .LfillRegisters
786    LOADREG x15 4 s3 .LfillRegisters
787    LOADREG x15 4 s4 .LfillRegisters
788    LOADREG x15 4 s5 .LfillRegisters
789    LOADREG x15 4 s6 .LfillRegisters
790    LOADREG x15 4 s7 .LfillRegisters
791
792// Store doubles.
793.LstoreD0:
794    LOADREG x15 8 d0 .LfillRegisters
795    LOADREG x15 8 d1 .LfillRegisters
796    LOADREG x15 8 d2 .LfillRegisters
797    LOADREG x15 8 d3 .LfillRegisters
798    LOADREG x15 8 d4 .LfillRegisters
799    LOADREG x15 8 d5 .LfillRegisters
800    LOADREG x15 8 d6 .LfillRegisters
801    LOADREG x15 8 d7 .LfillRegisters
802
803
804.LcallFunction:
805
806    INVOKE_STUB_CALL_AND_RETURN
807
808END art_quick_invoke_stub
809
810/*  extern"C"
811 *     void art_quick_invoke_static_stub(ArtMethod *method,   x0
812 *                                       uint32_t  *args,     x1
813 *                                       uint32_t argsize,    w2
814 *                                       Thread *self,        x3
815 *                                       JValue *result,      x4
816 *                                       char   *shorty);     x5
817 */
818ENTRY art_quick_invoke_static_stub
819    // Spill registers as per AACPS64 calling convention.
820    INVOKE_STUB_CREATE_FRAME
821
822    // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
823    // Parse the passed shorty to determine which register to load.
824    // Load addresses for routines that load WXSD registers.
825    adr  x11, .LstoreW1_2
826    adr  x12, .LstoreX1_2
827    adr  x13, .LstoreS0_2
828    adr  x14, .LstoreD0_2
829
830    // Initialize routine offsets to 0 for integers and floats.
831    // x8 for integers, x15 for floating point.
832    mov x8, #0
833    mov x15, #0
834
835    add x10, x5, #1     // Load shorty address, plus one to skip return value.
836
837    // Loop to fill registers.
838.LfillRegisters2:
839    ldrb w17, [x10], #1         // Load next character in signature, and increment.
840    cbz w17, .LcallFunction2    // Exit at end of signature. Shorty 0 terminated.
841
842    cmp  w17, #'F'          // is this a float?
843    bne .LisDouble2
844
845    cmp x15, # 8*12         // Skip this load if all registers full.
846    beq .Ladvance4_2
847
848    add x17, x13, x15       // Calculate subroutine to jump to.
849    br  x17
850
851.LisDouble2:
852    cmp w17, #'D'           // is this a double?
853    bne .LisLong2
854
855    cmp x15, # 8*12         // Skip this load if all registers full.
856    beq .Ladvance8_2
857
858    add x17, x14, x15       // Calculate subroutine to jump to.
859    br x17
860
861.LisLong2:
862    cmp w17, #'J'           // is this a long?
863    bne .LisOther2
864
865    cmp x8, # 7*12          // Skip this load if all registers full.
866    beq .Ladvance8_2
867
868    add x17, x12, x8        // Calculate subroutine to jump to.
869    br x17
870
871.LisOther2:                 // Everything else takes one vReg.
872    cmp x8, # 7*12          // Skip this load if all registers full.
873    beq .Ladvance4_2
874
875    add x17, x11, x8        // Calculate subroutine to jump to.
876    br x17
877
878.Ladvance4_2:
879    add x9, x9, #4
880    b .LfillRegisters2
881
882.Ladvance8_2:
883    add x9, x9, #8
884    b .LfillRegisters2
885
886// Store ints.
887.LstoreW1_2:
888    LOADREG x8 4 w1 .LfillRegisters2
889    LOADREG x8 4 w2 .LfillRegisters2
890    LOADREG x8 4 w3 .LfillRegisters2
891    LOADREG x8 4 w4 .LfillRegisters2
892    LOADREG x8 4 w5 .LfillRegisters2
893    LOADREG x8 4 w6 .LfillRegisters2
894    LOADREG x8 4 w7 .LfillRegisters2
895
896// Store longs.
897.LstoreX1_2:
898    LOADREG x8 8 x1 .LfillRegisters2
899    LOADREG x8 8 x2 .LfillRegisters2
900    LOADREG x8 8 x3 .LfillRegisters2
901    LOADREG x8 8 x4 .LfillRegisters2
902    LOADREG x8 8 x5 .LfillRegisters2
903    LOADREG x8 8 x6 .LfillRegisters2
904    LOADREG x8 8 x7 .LfillRegisters2
905
906// Store singles.
907.LstoreS0_2:
908    LOADREG x15 4 s0 .LfillRegisters2
909    LOADREG x15 4 s1 .LfillRegisters2
910    LOADREG x15 4 s2 .LfillRegisters2
911    LOADREG x15 4 s3 .LfillRegisters2
912    LOADREG x15 4 s4 .LfillRegisters2
913    LOADREG x15 4 s5 .LfillRegisters2
914    LOADREG x15 4 s6 .LfillRegisters2
915    LOADREG x15 4 s7 .LfillRegisters2
916
917// Store doubles.
918.LstoreD0_2:
919    LOADREG x15 8 d0 .LfillRegisters2
920    LOADREG x15 8 d1 .LfillRegisters2
921    LOADREG x15 8 d2 .LfillRegisters2
922    LOADREG x15 8 d3 .LfillRegisters2
923    LOADREG x15 8 d4 .LfillRegisters2
924    LOADREG x15 8 d5 .LfillRegisters2
925    LOADREG x15 8 d6 .LfillRegisters2
926    LOADREG x15 8 d7 .LfillRegisters2
927
928
929.LcallFunction2:
930
931    INVOKE_STUB_CALL_AND_RETURN
932
933END art_quick_invoke_static_stub
934
935
936
937    /*
938     * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_
939     */
940
941ENTRY art_quick_do_long_jump
942    // Load FPRs
943    ldp d0, d1, [x1], #16
944    ldp d2, d3, [x1], #16
945    ldp d4, d5, [x1], #16
946    ldp d6, d7, [x1], #16
947    ldp d8, d9, [x1], #16
948    ldp d10, d11, [x1], #16
949    ldp d12, d13, [x1], #16
950    ldp d14, d15, [x1], #16
951    ldp d16, d17, [x1], #16
952    ldp d18, d19, [x1], #16
953    ldp d20, d21, [x1], #16
954    ldp d22, d23, [x1], #16
955    ldp d24, d25, [x1], #16
956    ldp d26, d27, [x1], #16
957    ldp d28, d29, [x1], #16
958    ldp d30, d31, [x1]
959
960    // Load GPRs
961    // TODO: lots of those are smashed, could optimize.
962    add x0, x0, #30*8
963    ldp x30, x1, [x0], #-16
964    ldp x28, x29, [x0], #-16
965    ldp x26, x27, [x0], #-16
966    ldp x24, x25, [x0], #-16
967    ldp x22, x23, [x0], #-16
968    ldp x20, x21, [x0], #-16
969    ldp x18, x19, [x0], #-16
970    ldp x16, x17, [x0], #-16
971    ldp x14, x15, [x0], #-16
972    ldp x12, x13, [x0], #-16
973    ldp x10, x11, [x0], #-16
974    ldp x8, x9, [x0], #-16
975    ldp x6, x7, [x0], #-16
976    ldp x4, x5, [x0], #-16
977    ldp x2, x3, [x0], #-16
978    mov sp, x1
979
980    // TODO: Is it really OK to use LR for the target PC?
981    mov x0, #0
982    mov x1, #0
983    br  xLR
984END art_quick_do_long_jump
985
986    /*
987     * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
988     * failure.
989     */
990    .extern artHandleFillArrayDataFromCode
991ENTRY art_quick_handle_fill_data
992    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // Save callee saves in case exception allocation triggers GC.
993    mov    x2, xSELF                       // Pass Thread::Current.
994    mov    x3, sp                          // Pass SP.
995    bl     artHandleFillArrayDataFromCode  // (Array*, const DexFile::Payload*, Thread*, SP)
996    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
997    RETURN_IF_RESULT_IS_ZERO
998    DELIVER_PENDING_EXCEPTION
999END art_quick_handle_fill_data
1000
1001    /*
1002     * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the
1003     * possibly null object to lock.
1004     *
1005     * Derived from arm32 code.
1006     */
1007    .extern artLockObjectFromCode
1008ENTRY art_quick_lock_object
1009    cbz    w0, .Lslow_lock
1010    add    x4, x0, #LOCK_WORD_OFFSET  // exclusive load/store had no immediate anymore
1011.Lretry_lock:
1012    ldr    w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
1013    ldxr   w1, [x4]
1014    cbnz   w1, .Lnot_unlocked         // already thin locked
1015    stxr   w3, w2, [x4]
1016    cbnz   w3, .Lstrex_fail           // store failed, retry
1017    dmb    ishld                      // full (LoadLoad) memory barrier, TODO: acquire-release
1018    ret
1019.Lstrex_fail:
1020    b .Lretry_lock                    // unlikely forward branch, need to reload and recheck r1/r2
1021.Lnot_unlocked:
1022    lsr    w3, w1, 30
1023    cbnz   w3, .Lslow_lock            // if either of the top two bits are set, go slow path
1024    eor    w2, w1, w2                 // lock_word.ThreadId() ^ self->ThreadId()
1025    uxth   w2, w2                     // zero top 16 bits
1026    cbnz   w2, .Lslow_lock            // lock word and self thread id's match -> recursive lock
1027                                      // else contention, go to slow path
1028    add    w2, w1, #65536             // increment count in lock word placing in w2 for storing
1029    lsr    w1, w2, 30                 // if either of the top two bits are set, we overflowed.
1030    cbnz   w1, .Lslow_lock            // if we overflow the count go slow path
1031    str    w2, [x0, #LOCK_WORD_OFFSET]// no need for stxr as we hold the lock
1032    ret
1033.Lslow_lock:
1034    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case we block
1035    mov    x1, xSELF                  // pass Thread::Current
1036    mov    x2, sp                     // pass SP
1037    bl     artLockObjectFromCode      // (Object* obj, Thread*, SP)
1038    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
1039    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1040END art_quick_lock_object
1041
1042    /*
1043     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
1044     * x0 holds the possibly null object to lock.
1045     *
1046     * Derived from arm32 code.
1047     */
1048    .extern artUnlockObjectFromCode
1049ENTRY art_quick_unlock_object
1050    cbz    x0, .Lslow_unlock
1051    ldr    w1, [x0, #LOCK_WORD_OFFSET]
1052    lsr    w2, w1, 30
1053    cbnz   w2, .Lslow_unlock          // if either of the top two bits are set, go slow path
1054    ldr    w2, [xSELF, #THREAD_ID_OFFSET]
1055    eor    w3, w1, w2                 // lock_word.ThreadId() ^ self->ThreadId()
1056    uxth   w3, w3                     // zero top 16 bits
1057    cbnz   w3, .Lslow_unlock          // do lock word and self thread id's match?
1058    cmp    w1, #65536
1059    bpl    .Lrecursive_thin_unlock
1060    // transition to unlocked, w3 holds 0
1061    dmb    ish                        // full (StoreLoad) memory barrier
1062    str    w3, [x0, #LOCK_WORD_OFFSET]
1063    ret
1064.Lrecursive_thin_unlock:
1065    sub    w1, w1, #65536
1066    str    w1, [x0, #LOCK_WORD_OFFSET]
1067    ret
1068.Lslow_unlock:
1069    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case exception allocation triggers GC
1070    mov    x1, xSELF                  // pass Thread::Current
1071    mov    x2, sp                     // pass SP
1072    bl     artUnlockObjectFromCode    // (Object* obj, Thread*, SP)
1073    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
1074    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1075END art_quick_unlock_object
1076
1077    /*
1078     * Entry from managed code that calls artIsAssignableFromCode and on failure calls
1079     * artThrowClassCastException.
1080     */
1081    .extern artThrowClassCastException
1082ENTRY art_quick_check_cast
1083    // Store arguments and link register
1084    sub sp, sp, #32                     // Stack needs to be 16b aligned on calls
1085    .cfi_adjust_cfa_offset 32
1086    stp x0, x1, [sp]
1087    .cfi_rel_offset x0, 0
1088    .cfi_rel_offset x1, 8
1089    stp xSELF, xLR, [sp, #16]
1090    .cfi_rel_offset x18, 16
1091    .cfi_rel_offset x30, 24
1092
1093    // Call runtime code
1094    bl artIsAssignableFromCode
1095
1096    // Check for exception
1097    cbz x0, .Lthrow_class_cast_exception
1098
1099    // Restore and return
1100    ldp x0, x1, [sp]
1101    .cfi_restore x0
1102    .cfi_restore x1
1103    ldp xSELF, xLR, [sp, #16]
1104    .cfi_restore x18
1105    .cfi_restore x30
1106    add sp, sp, #32
1107    .cfi_adjust_cfa_offset -32
1108    ret
1109
1110.Lthrow_class_cast_exception:
1111    // Restore
1112    ldp x0, x1, [sp]
1113    .cfi_restore x0
1114    .cfi_restore x1
1115    ldp xSELF, xLR, [sp, #16]
1116    .cfi_restore x18
1117    .cfi_restore x30
1118    add sp, sp, #32
1119    .cfi_adjust_cfa_offset -32
1120
1121    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
1122    mov x2, xSELF                     // pass Thread::Current
1123    mov x3, sp                        // pass SP
1124    b artThrowClassCastException      // (Class*, Class*, Thread*, SP)
1125    brk 0                             // We should not return here...
1126END art_quick_check_cast
1127
1128    /*
1129     * Entry from managed code for array put operations of objects where the value being stored
1130     * needs to be checked for compatibility.
1131     * x0 = array, x1 = index, x2 = value
1132     *
1133     * Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We
1134     * assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by
1135     * using index-zero-extension in load/stores.
1136     *
1137     * Temporaries: x3, x4
1138     * TODO: x4 OK? ip seems wrong here.
1139     */
1140ENTRY art_quick_aput_obj_with_null_and_bound_check
1141    tst x0, x0
1142    bne art_quick_aput_obj_with_bound_check
1143    b art_quick_throw_null_pointer_exception
1144END art_quick_aput_obj_with_null_and_bound_check
1145
1146ENTRY art_quick_aput_obj_with_bound_check
1147    ldr w3, [x0, #ARRAY_LENGTH_OFFSET]
1148    cmp w3, w1
1149    bhi art_quick_aput_obj
1150    mov x0, x1
1151    mov x1, x3
1152    b art_quick_throw_array_bounds
1153END art_quick_aput_obj_with_bound_check
1154
1155ENTRY art_quick_aput_obj
1156    cbz x2, .Ldo_aput_null
1157    ldr w3, [x0, #CLASS_OFFSET]                          // Heap reference = 32b
1158                                                         // This also zero-extends to x3
1159    ldr w4, [x2, #CLASS_OFFSET]                          // Heap reference = 32b
1160                                                         // This also zero-extends to x4
1161    ldr w3, [x3, #CLASS_COMPONENT_TYPE_OFFSET]           // Heap reference = 32b
1162                                                         // This also zero-extends to x3
1163    cmp w3, w4  // value's type == array's component type - trivial assignability
1164    bne .Lcheck_assignability
1165.Ldo_aput:
1166    add x3, x0, #OBJECT_ARRAY_DATA_OFFSET
1167                                                         // "Compress" = do nothing
1168    str w2, [x3, x1, lsl #2]                             // Heap reference = 32b
1169    ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
1170    lsr x0, x0, #7
1171    strb w3, [x3, x0]
1172    ret
1173.Ldo_aput_null:
1174    add x3, x0, #OBJECT_ARRAY_DATA_OFFSET
1175                                                         // "Compress" = do nothing
1176    str w2, [x3, x1, lsl #2]                             // Heap reference = 32b
1177    ret
1178.Lcheck_assignability:
1179    // Store arguments and link register
1180    sub sp, sp, #48                     // Stack needs to be 16b aligned on calls
1181    .cfi_adjust_cfa_offset 48
1182    stp x0, x1, [sp]
1183    .cfi_rel_offset x0, 0
1184    .cfi_rel_offset x1, 8
1185    stp x2, xSELF, [sp, #16]
1186    .cfi_rel_offset x2, 16
1187    .cfi_rel_offset x18, 24
1188    str xLR, [sp, #32]
1189    .cfi_rel_offset x30, 32
1190
1191    // Call runtime code
1192    mov x0, x3              // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
1193    mov x1, x4              // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
1194    bl artIsAssignableFromCode
1195
1196    // Check for exception
1197    cbz x0, .Lthrow_array_store_exception
1198
1199    // Restore
1200    ldp x0, x1, [sp]
1201    .cfi_restore x0
1202    .cfi_restore x1
1203    ldp x2, xSELF, [sp, #16]
1204    .cfi_restore x2
1205    .cfi_restore x18
1206    ldr xLR, [sp, #32]
1207    .cfi_restore x30
1208    add sp, sp, #48
1209    .cfi_adjust_cfa_offset -48
1210
1211    add x3, x0, #OBJECT_ARRAY_DATA_OFFSET
1212                                                          // "Compress" = do nothing
1213    str w2, [x3, x1, lsl #2]                              // Heap reference = 32b
1214    ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
1215    lsr x0, x0, #7
1216    strb w3, [x3, x0]
1217    ret
1218.Lthrow_array_store_exception:
1219    ldp x0, x1, [sp]
1220    .cfi_restore x0
1221    .cfi_restore x1
1222    ldp x2, xSELF, [sp, #16]
1223    .cfi_restore x2
1224    .cfi_restore x18
1225    ldr xLR, [sp, #32]
1226    .cfi_restore x30
1227    add sp, sp, #48
1228    .cfi_adjust_cfa_offset -48
1229
1230    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
1231    mov x1, x2                    // Pass value.
1232    mov x2, xSELF                 // Pass Thread::Current.
1233    mov x3, sp                    // Pass SP.
1234    b artThrowArrayStoreException // (Object*, Object*, Thread*, SP).
1235    brk 0                         // Unreached.
1236END art_quick_aput_obj
1237
1238// Macro to facilitate adding new allocation entrypoints.
1239.macro TWO_ARG_DOWNCALL name, entrypoint, return
1240    .extern \entrypoint
1241ENTRY \name
1242    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1243    mov    x2, xSELF                  // pass Thread::Current
1244    mov    x3, sp                     // pass SP
1245    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*, SP)
1246    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
1247    \return
1248    DELIVER_PENDING_EXCEPTION
1249END \name
1250.endm
1251
1252// Macro to facilitate adding new array allocation entrypoints.
1253.macro THREE_ARG_DOWNCALL name, entrypoint, return
1254    .extern \entrypoint
1255ENTRY \name
1256    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1257    mov    x3, xSELF                  // pass Thread::Current
1258    mov    x4, sp                     // pass SP
1259    bl     \entrypoint
1260    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
1261    \return
1262    DELIVER_PENDING_EXCEPTION
1263END \name
1264.endm
1265
1266// Macros taking opportunity of code similarities for downcalls with referrer.
1267
1268// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
1269.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
1270    .extern \entrypoint
1271ENTRY \name
1272    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1273    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1274    mov    x2, xSELF                  // pass Thread::Current
1275    mov    x3, sp                     // pass SP
1276    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*, SP)
1277    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
1278    \return
1279END \name
1280.endm
1281
1282// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
1283.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
1284    .extern \entrypoint
1285ENTRY \name
1286    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1287    ldr    x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1288    mov    x3, xSELF                  // pass Thread::Current
1289    mov    x4, sp                     // pass SP
1290    bl     \entrypoint
1291    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
1292    \return
1293END \name
1294.endm
1295
1296// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
1297.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
1298    .extern \entrypoint
1299ENTRY \name
1300    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1301    ldr    x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1302    mov    x4, xSELF                  // pass Thread::Current
1303    mov    x5, sp                     // pass SP
1304    bl     \entrypoint
1305    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
1306    \return
1307END \name
1308.endm
1309
1310    /*
1311     * Entry from managed code when uninitialized static storage, this stub will run the class
1312     * initializer and deliver the exception on error. On success the static storage base is
1313     * returned.
1314     */
1315TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
1316
1317UNIMPLEMENTED art_quick_initialize_type
1318UNIMPLEMENTED art_quick_initialize_type_and_verify_access
1319
1320ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1321ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1322ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1323
1324TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1325TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1326TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1327
1328TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1329TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1330
1331THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1332THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1333THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1334
1335// This is separated out as the argument order is different.
1336    .extern artSet64StaticFromCode
1337ENTRY art_quick_set64_static
1338    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1339    mov    x3, x1                     // Store value
1340    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1341    mov    x2, x3                     // Put value param
1342    mov    x3, xSELF                  // pass Thread::Current
1343    mov    x4, sp                     // pass SP
1344    bl     artSet64StaticFromCode
1345    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
1346    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1347END art_quick_set64_static
1348
1349    /*
1350     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
1351     * exception on error. On success the String is returned. x0 holds the referring method,
1352     * w1 holds the string index. The fast path check for hit in strings cache has already been
1353     * performed.
1354     */
1355TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
1356
1357// Generate the allocation entrypoints for each allocator.
1358GENERATE_ALL_ALLOC_ENTRYPOINTS
1359
1360UNIMPLEMENTED art_quick_test_suspend
1361
1362     /*
1363     * Called by managed code that is attempting to call a method on a proxy class. On entry
1364     * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
1365     * method agrees with a ref and args callee save frame.
1366     */
1367     .extern artQuickProxyInvokeHandler
1368ENTRY art_quick_proxy_invoke_handler
1369    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
1370    str     x0, [sp, #0]                // place proxy method at bottom of frame
1371    mov     x2, xSELF                   // pass Thread::Current
1372    mov     x3, sp                      // pass SP
1373    bl      artQuickProxyInvokeHandler  // (Method* proxy method, receiver, Thread*, SP)
1374    ldr     xSELF, [sp, #200]           // Restore self pointer.
1375    ldr     x2, [xSELF, THREAD_EXCEPTION_OFFSET]
1376    cbnz    x2, .Lexception_in_proxy    // success if no exception is pending
1377    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME_NO_D0 // keep d0
1378    ret                                 // return on success
1379.Lexception_in_proxy:
1380    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
1381    DELIVER_PENDING_EXCEPTION
1382END art_quick_proxy_invoke_handler
1383
1384UNIMPLEMENTED art_quick_imt_conflict_trampoline
1385
1386
1387ENTRY art_quick_resolution_trampoline
1388    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
1389    mov x2, xSELF
1390    mov x3, sp
1391    bl artQuickResolutionTrampoline  // (called, receiver, Thread*, SP)
1392    cbz x0, 1f
1393    mov x9, x0              // Remember returned code pointer in x9.
1394    ldr x0, [sp, #0]        // artQuickResolutionTrampoline puts called method in *SP.
1395    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
1396    br x9
13971:
1398    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
1399    DELIVER_PENDING_EXCEPTION
1400END art_quick_resolution_trampoline
1401
1402/*
1403 * Generic JNI frame layout:
1404 *
1405 * #-------------------#
1406 * |                   |
1407 * | caller method...  |
1408 * #-------------------#    <--- SP on entry
1409 * | Return X30/LR     |
1410 * | X29/FP            |    callee save
1411 * | X28               |    callee save
1412 * | X27               |    callee save
1413 * | X26               |    callee save
1414 * | X25               |    callee save
1415 * | X24               |    callee save
1416 * | X23               |    callee save
1417 * | X22               |    callee save
1418 * | X21               |    callee save
1419 * | X20               |    callee save
1420 * | X19               |    callee save
1421 * | X7                |    arg7
1422 * | X6                |    arg6
1423 * | X5                |    arg5
1424 * | X4                |    arg4
1425 * | X3                |    arg3
1426 * | X2                |    arg2
1427 * | X1                |    arg1
1428 * | D15               |    float arg 8
1429 * | D14               |    float arg 8
1430 * | D13               |    float arg 8
1431 * | D12               |    callee save
1432 * | D11               |    callee save
1433 * | D10               |    callee save
1434 * | D9                |    callee save
1435 * | D8                |    callee save
1436 * | D7                |    float arg 8
1437 * | D6                |    float arg 7
1438 * | D5                |    float arg 6
1439 * | D4                |    float arg 5
1440 * | D3                |    float arg 4
1441 * | D2                |    float arg 3
1442 * | D1                |    float arg 2
1443 * | D0                |    float arg 1
1444 * | RDI/Method*       |  <- X0
1445 * #-------------------#
1446 * | local ref cookie  | // 4B
1447 * |   SIRT size       | // 4B
1448 * #-------------------#
1449 * | JNI Call Stack    |
1450 * #-------------------#    <--- SP on native call
1451 * |                   |
1452 * | Stack for Regs    |    The trampoline assembly will pop these values
1453 * |                   |    into registers for native call
1454 * #-------------------#
1455 * | Native code ptr   |
1456 * #-------------------#
1457 * | Free scratch      |
1458 * #-------------------#
1459 * | Ptr to (1)        |    <--- SP
1460 * #-------------------#
1461 */
1462    /*
1463     * Called to do a generic JNI down-call
1464     */
1465ENTRY art_quick_generic_jni_trampoline
1466    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
1467    str x0, [sp, #0]  // Store native ArtMethod* to bottom of stack.
1468
1469    // Save SP , so we can have static CFI info.
1470    mov x28, sp
1471    .cfi_def_cfa_register x28
1472
1473    // This looks the same, but is different: this will be updated to point to the bottom
1474    // of the frame when the SIRT is inserted.
1475    mov xFP, sp
1476
1477    mov x8, #5120
1478    sub sp, sp, x8
1479
1480    // prepare for artQuickGenericJniTrampoline call
1481    // (Thread*,  SP)
1482    //    x0      x1   <= C calling convention
1483    //   xSELF    xFP  <= where they are
1484
1485    mov x0, xSELF   // Thread*
1486    mov x1, xFP
1487    bl artQuickGenericJniTrampoline  // (Thread*, sp)
1488
1489    // Get the updated pointer. This is the bottom of the frame _with_ SIRT.
1490    ldr xFP, [sp]
1491    add x9, sp, #8
1492
1493    cmp x0, #0
1494    b.mi .Lentry_error      // Check for error, negative value.
1495
1496    // release part of the alloca.
1497    add x9, x9, x0
1498
1499    // Get the code pointer
1500    ldr xIP0, [x9, #0]
1501
1502    // Load parameters from frame into registers.
1503    // TODO Check with artQuickGenericJniTrampoline.
1504    //      Also, check again APPCS64 - the stack arguments are interleaved.
1505    ldp x0, x1, [x9, #8]
1506    ldp x2, x3, [x9, #24]
1507    ldp x4, x5, [x9, #40]
1508    ldp x6, x7, [x9, #56]
1509
1510    ldp d0, d1, [x9, #72]
1511    ldp d2, d3, [x9, #88]
1512    ldp d4, d5, [x9, #104]
1513    ldp d6, d7, [x9, #120]
1514
1515    add sp, x9, #136
1516
1517    blr xIP0           // native call.
1518
1519    // Restore self pointer.
1520    ldr xSELF, [x28, #200]
1521
1522    // result sign extension is handled in C code
1523    // prepare for artQuickGenericJniEndTrampoline call
1524    // (Thread*,  SP, result, result_f)
1525    //   x0       x1   x2       x3       <= C calling convention
1526    mov x5, x0      // Save return value
1527    mov x0, xSELF   // Thread register
1528    mov x1, xFP     // Stack pointer
1529    mov x2, x5      // Result (from saved)
1530    fmov x3, d0     // d0 will contain floating point result, but needs to go into x3
1531
1532    bl artQuickGenericJniEndTrampoline
1533
1534    // Tear down the alloca.
1535    mov sp, x28
1536    .cfi_def_cfa_register sp
1537
1538    // Restore self pointer.
1539    ldr xSELF, [x28, #200]
1540
1541    // Pending exceptions possible.
1542    ldr x1, [xSELF, THREAD_EXCEPTION_OFFSET]
1543    cbnz x1, .Lexception_in_native
1544
1545    // Tear down the callee-save frame.
1546    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
1547
1548    // store into fpr, for when it's a fpr return...
1549    fmov d0, x0
1550    ret
1551
1552.Lentry_error:
1553    mov sp, x28
1554    .cfi_def_cfa_register sp
1555    ldr xSELF, [x28, #200]
1556.Lexception_in_native:
1557    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
1558    DELIVER_PENDING_EXCEPTION
1559
1560END art_quick_generic_jni_trampoline
1561
1562/*
1563 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those
1564 * of a quick call:
1565 * x0 = method being called/to bridge to.
1566 * x1..x7, d0..d7 = arguments to that method.
1567 */
1568ENTRY art_quick_to_interpreter_bridge
1569    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // Set up frame and save arguments.
1570
1571    //  x0 will contain mirror::ArtMethod* method.
1572    mov x1, xSELF                          // How to get Thread::Current() ???
1573    mov x2, sp
1574
1575    // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
1576    //                                      mirror::ArtMethod** sp)
1577    bl   artQuickToInterpreterBridge
1578
1579    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME  // TODO: no need to restore arguments in this case.
1580
1581    fmov d0, x0
1582
1583    RETURN_OR_DELIVER_PENDING_EXCEPTION
1584END art_quick_to_interpreter_bridge
1585
1586UNIMPLEMENTED art_quick_instrumentation_entry
1587UNIMPLEMENTED art_quick_instrumentation_exit
1588UNIMPLEMENTED art_quick_deoptimize
1589UNIMPLEMENTED art_quick_mul_long
1590UNIMPLEMENTED art_quick_shl_long
1591UNIMPLEMENTED art_quick_shr_long
1592UNIMPLEMENTED art_quick_ushr_long
1593UNIMPLEMENTED art_quick_indexof
1594
1595   /*
1596     * String's compareTo.
1597     *
1598     * TODO: Not very optimized.
1599     *
1600     * On entry:
1601     *    x0:   this object pointer
1602     *    x1:   comp object pointer
1603     *
1604     */
1605    .extern __memcmp16
1606ENTRY art_quick_string_compareto
1607    mov    x2, x0         // x0 is return, use x2 for first input.
1608    sub    x0, x2, x1     // Same string object?
1609    cbnz   x0,1f
1610    ret
16111:                        // Different string objects.
1612
1613    ldr    w6, [x2, #STRING_OFFSET_OFFSET]
1614    ldr    w5, [x1, #STRING_OFFSET_OFFSET]
1615    ldr    w4, [x2, #STRING_COUNT_OFFSET]
1616    ldr    w3, [x1, #STRING_COUNT_OFFSET]
1617    ldr    w2, [x2, #STRING_VALUE_OFFSET]
1618    ldr    w1, [x1, #STRING_VALUE_OFFSET]
1619
1620    /*
1621     * Now:           CharArray*    Offset   Count
1622     *    first arg      x2          w6        w4
1623     *   second arg      x1          w5        w3
1624     */
1625
1626    // x0 := str1.length(w4) - str2.length(w3). ldr zero-extended w3/w4 into x3/x4.
1627    subs x0, x4, x3
1628    // Min(count1, count2) into w3.
1629    csel x3, x3, x4, ge
1630
1631    // Build pointer into string data.
1632
1633    // Add offset in array (substr etc.) (sign extend and << 1).
1634    add x2, x2, w6, sxtw #1
1635    add x1, x1, w5, sxtw #1
1636
1637    // Add offset in CharArray to array.
1638    add x2, x2, #STRING_DATA_OFFSET
1639    add x1, x1, #STRING_DATA_OFFSET
1640
1641    // Check for long string, do memcmp16 for them.
1642    cmp w3, #28  // Constant from arm32.
1643    bgt .Ldo_memcmp16
1644
1645    /*
1646     * Now:
1647     *   x2: *first string data
1648     *   x1: *second string data
1649     *   w3: iteration count
1650     *   x0: return value if comparison equal
1651     *   x4, x5, x6, x7: free
1652     */
1653
1654    // Do a simple unrolled loop.
1655.Lloop:
1656    // At least two more elements?
1657    subs w3, w3, #2
1658    b.lt .Lremainder_or_done
1659
1660    ldrh w4, [x2], #2
1661    ldrh w5, [x1], #2
1662
1663    ldrh w6, [x2], #2
1664    ldrh w7, [x1], #2
1665
1666    subs w4, w4, w5
1667    b.ne .Lw4_result
1668
1669    subs w6, w6, w7
1670    b.ne .Lw6_result
1671
1672    b .Lloop
1673
1674.Lremainder_or_done:
1675    adds w3, w3, #1
1676    b.eq .Lremainder
1677    ret
1678
1679.Lremainder:
1680    ldrh w4, [x2], #2
1681    ldrh w5, [x1], #2
1682    subs w4, w4, w5
1683    b.ne .Lw4_result
1684    ret
1685
1686// Result is in w4
1687.Lw4_result:
1688    sxtw x0, w4
1689    ret
1690
1691// Result is in w6
1692.Lw6_result:
1693    sxtw x0, w6
1694    ret
1695
1696.Ldo_memcmp16:
1697    str x0, [sp,#-16]!           // Save x0
1698
1699    mov x0, x2
1700    uxtw x2, w3
1701    bl __memcmp16
1702
1703    ldr x1, [sp], #16            // Restore old x0 = length diff
1704
1705    cmp x0, #0                   // Check the memcmp difference
1706    csel x0, x0, x1, ne          // x0 := x0 != 0 ? x0 : x1
1707    ret
1708END art_quick_string_compareto
1709