quick_entrypoints_arm64.S revision 3d21bdf8894e780d349c481e5c9e29fe1556051c
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "asm_support_arm64.S"
18
19#include "arch/quick_alloc_entrypoints.S"
20
21
22    /*
23     * Macro that sets up the callee save frame to conform with
24     * Runtime::CreateCalleeSaveMethod(kSaveAll)
25     */
26.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
27    adrp xIP0, :got:_ZN3art7Runtime9instance_E
28    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
29
30    // Our registers aren't intermixed - just spill in order.
31    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
32
33    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
34    THIS_LOAD_REQUIRES_READ_BARRIER
35    // Loads appropriate callee-save-method.
36    ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
37
38    sub sp, sp, #176
39    .cfi_adjust_cfa_offset 176
40
41    // Ugly compile-time check, but we only have the preprocessor.
42#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 176)
43#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
44#endif
45
46    // FP callee-saves
47    stp d8, d9,   [sp, #8]
48    stp d10, d11, [sp, #24]
49    stp d12, d13, [sp, #40]
50    stp d14, d15, [sp, #56]
51
52    // Thread register and x19 (callee-save)
53    stp xSELF, x19, [sp, #72]
54    .cfi_rel_offset x18, 72
55    .cfi_rel_offset x19, 80
56
57    // callee-saves
58    stp x20, x21, [sp, #88]
59    .cfi_rel_offset x20, 88
60    .cfi_rel_offset x21, 96
61
62    stp x22, x23, [sp, #104]
63    .cfi_rel_offset x22, 104
64    .cfi_rel_offset x23, 112
65
66    stp x24, x25, [sp, #120]
67    .cfi_rel_offset x24, 120
68    .cfi_rel_offset x25, 128
69
70    stp x26, x27, [sp, #136]
71    .cfi_rel_offset x26, 136
72    .cfi_rel_offset x27, 144
73
74    stp x28, x29, [sp, #152]
75    .cfi_rel_offset x28, 152
76    .cfi_rel_offset x29, 160
77
78    str xLR, [sp, #168]
79    .cfi_rel_offset x30, 168
80
81    // Loads appropriate callee-save-method
82    str xIP0, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
83    // Place sp in Thread::Current()->top_quick_frame.
84    mov xIP0, sp
85    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
86.endm
87
88    /*
89     * Macro that sets up the callee save frame to conform with
90     * Runtime::CreateCalleeSaveMethod(kRefsOnly).
91     */
92.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
93    adrp xIP0, :got:_ZN3art7Runtime9instance_E
94    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
95
96    // Our registers aren't intermixed - just spill in order.
97    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
98
99    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
100    THIS_LOAD_REQUIRES_READ_BARRIER
101    // Loads appropriate callee-save-method.
102    ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
103
104    sub sp, sp, #112
105    .cfi_adjust_cfa_offset 112
106
107    // Ugly compile-time check, but we only have the preprocessor.
108#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 112)
109#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
110#endif
111
112    // Callee-saves
113    stp x19, x20,  [sp, #16]
114    .cfi_rel_offset x19, 16
115    .cfi_rel_offset x20, 24
116
117    stp x21, x22, [sp, #32]
118    .cfi_rel_offset x21, 32
119    .cfi_rel_offset x22, 40
120
121    stp x23, x24, [sp, #48]
122    .cfi_rel_offset x23, 48
123    .cfi_rel_offset x24, 56
124
125    stp x25, x26, [sp, #64]
126    .cfi_rel_offset x25, 64
127    .cfi_rel_offset x26, 72
128
129    stp x27, x28, [sp, #80]
130    .cfi_rel_offset x27, 80
131    .cfi_rel_offset x28, 88
132
133    // x29(callee-save) and LR
134    stp x29, xLR, [sp, #96]
135    .cfi_rel_offset x29, 96
136    .cfi_rel_offset x30, 104
137
138    // Save xSELF to xETR.
139    mov xETR, xSELF
140
141    // Loads appropriate callee-save-method
142    str xIP0, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kRefsOnly]
143    // Place sp in Thread::Current()->top_quick_frame.
144    mov xIP0, sp
145    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
146.endm
147
148// TODO: Probably no need to restore registers preserved by aapcs64.
149.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
150    // Restore xSELF.
151    mov xSELF, xETR
152
153    // Callee-saves
154    ldp x19, x20,  [sp, #16]
155    .cfi_restore x19
156    .cfi_restore x20
157
158    ldp x21, x22, [sp, #32]
159    .cfi_restore x21
160    .cfi_restore x22
161
162    ldp x23, x24, [sp, #48]
163    .cfi_restore x23
164    .cfi_restore x24
165
166    ldp x25, x26, [sp, #64]
167    .cfi_restore x25
168    .cfi_restore x26
169
170    ldp x27, x28, [sp, #80]
171    .cfi_restore x27
172    .cfi_restore x28
173
174    // x29(callee-save) and LR
175    ldp x29, xLR, [sp, #96]
176    .cfi_restore x29
177    .cfi_restore x30
178
179    add sp, sp, #112
180    .cfi_adjust_cfa_offset -112
181.endm
182
183.macro POP_REFS_ONLY_CALLEE_SAVE_FRAME
184    // Restore xSELF as it might be scratched.
185    mov xSELF, xETR
186    // ETR
187    ldr xETR, [sp, #32]
188    .cfi_restore x21
189
190    add sp, sp, #112
191    .cfi_adjust_cfa_offset -112
192.endm
193
194.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
195    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
196    ret
197.endm
198
199
200.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
201    sub sp, sp, #224
202    .cfi_adjust_cfa_offset 224
203
204    // Ugly compile-time check, but we only have the preprocessor.
205#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 224)
206#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
207#endif
208
209    // FP args.
210    stp d0, d1, [sp, #8]
211    stp d2, d3, [sp, #24]
212    stp d4, d5, [sp, #40]
213    stp d6, d7, [sp, #56]
214
215    // Core args.
216    str x1, [sp, 72]
217    .cfi_rel_offset x1, 72
218
219    stp x2,  x3, [sp, #80]
220    .cfi_rel_offset x2, 80
221    .cfi_rel_offset x3, 88
222
223    stp x4,  x5, [sp, #96]
224    .cfi_rel_offset x4, 96
225    .cfi_rel_offset x5, 104
226
227    stp x6,  x7, [sp, #112]
228    .cfi_rel_offset x6, 112
229    .cfi_rel_offset x7, 120
230
231    // Callee-saves.
232    stp x19, x20, [sp, #128]
233    .cfi_rel_offset x19, 128
234    .cfi_rel_offset x20, 136
235
236    stp x21, x22, [sp, #144]
237    .cfi_rel_offset x21, 144
238    .cfi_rel_offset x22, 152
239
240    stp x23, x24, [sp, #160]
241    .cfi_rel_offset x23, 160
242    .cfi_rel_offset x24, 168
243
244    stp x25, x26, [sp, #176]
245    .cfi_rel_offset x25, 176
246    .cfi_rel_offset x26, 184
247
248    stp x27, x28, [sp, #192]
249    .cfi_rel_offset x27, 192
250    .cfi_rel_offset x28, 200
251
252    // x29(callee-save) and LR
253    stp x29, xLR, [sp, #208]
254    .cfi_rel_offset x29, 208
255    .cfi_rel_offset x30, 216
256
257    // Save xSELF to xETR.
258    mov xETR, xSELF
259.endm
260
261    /*
262     * Macro that sets up the callee save frame to conform with
263     * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
264     *
265     * TODO This is probably too conservative - saving FP & LR.
266     */
267.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
268    adrp xIP0, :got:_ZN3art7Runtime9instance_E
269    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
270
271    // Our registers aren't intermixed - just spill in order.
272    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
273
274    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
275    THIS_LOAD_REQUIRES_READ_BARRIER
276    ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
277
278    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
279
280    str xIP0, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
281    // Place sp in Thread::Current()->top_quick_frame.
282    mov xIP0, sp
283    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
284.endm
285
286.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
287    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
288    str x0, [sp, #0]  // Store ArtMethod* to bottom of stack.
289    // Place sp in Thread::Current()->top_quick_frame.
290    mov xIP0, sp
291    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
292.endm
293
294// TODO: Probably no need to restore registers preserved by aapcs64.
295.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
296    // Restore xSELF.
297    mov xSELF, xETR
298
299    // FP args.
300    ldp d0, d1, [sp, #8]
301    ldp d2, d3, [sp, #24]
302    ldp d4, d5, [sp, #40]
303    ldp d6, d7, [sp, #56]
304
305    // Core args.
306    ldr x1, [sp, 72]
307    .cfi_restore x1
308
309    ldp x2,  x3, [sp, #80]
310    .cfi_restore x2
311    .cfi_restore x3
312
313    ldp x4,  x5, [sp, #96]
314    .cfi_restore x4
315    .cfi_restore x5
316
317    ldp x6,  x7, [sp, #112]
318    .cfi_restore x6
319    .cfi_restore x7
320
321    // Callee-saves.
322    ldp x19, x20, [sp, #128]
323    .cfi_restore x19
324    .cfi_restore x20
325
326    ldp x21, x22, [sp, #144]
327    .cfi_restore x21
328    .cfi_restore x22
329
330    ldp x23, x24, [sp, #160]
331    .cfi_restore x23
332    .cfi_restore x24
333
334    ldp x25, x26, [sp, #176]
335    .cfi_restore x25
336    .cfi_restore x26
337
338    ldp x27, x28, [sp, #192]
339    .cfi_restore x27
340    .cfi_restore x28
341
342    // x29(callee-save) and LR
343    ldp x29, xLR, [sp, #208]
344    .cfi_restore x29
345    .cfi_restore x30
346
347    add sp, sp, #224
348    .cfi_adjust_cfa_offset -224
349.endm
350
351.macro RETURN_IF_RESULT_IS_ZERO
352    cbnz x0, 1f                // result non-zero branch over
353    ret                        // return
3541:
355.endm
356
357.macro RETURN_IF_RESULT_IS_NON_ZERO
358    cbz x0, 1f                 // result zero branch over
359    ret                        // return
3601:
361.endm
362
363    /*
364     * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
365     * exception is Thread::Current()->exception_
366     */
367.macro DELIVER_PENDING_EXCEPTION
368    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
369    mov x0, xSELF
370
371    // Point of no return.
372    b artDeliverPendingExceptionFromCode  // artDeliverPendingExceptionFromCode(Thread*)
373    brk 0  // Unreached
374.endm
375
376.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
377    ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET]   // Get exception field.
378    cbnz \reg, 1f
379    ret
3801:
381    DELIVER_PENDING_EXCEPTION
382.endm
383
384.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
385    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0
386.endm
387
388// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
389.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
390    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1
391.endm
392
393.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER
394    cbnz w0, 1f                // result non-zero branch over
395    ret                        // return
3961:
397    DELIVER_PENDING_EXCEPTION
398.endm
399
400.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
401    .extern \cxx_name
402ENTRY \c_name
403    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
404    mov x0, xSELF                     // pass Thread::Current
405    b   \cxx_name                     // \cxx_name(Thread*)
406END \c_name
407.endm
408
409.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
410    .extern \cxx_name
411ENTRY \c_name
412    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context.
413    mov x1, xSELF                     // pass Thread::Current.
414    b   \cxx_name                     // \cxx_name(arg, Thread*).
415    brk 0
416END \c_name
417.endm
418
419.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
420    .extern \cxx_name
421ENTRY \c_name
422    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
423    mov x2, xSELF                     // pass Thread::Current
424    b   \cxx_name                     // \cxx_name(arg1, arg2, Thread*)
425    brk 0
426END \c_name
427.endm
428
429    /*
430     * Called by managed code, saves callee saves and then calls artThrowException
431     * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
432     */
433ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
434
435    /*
436     * Called by managed code to create and deliver a NullPointerException.
437     */
438NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
439
440    /*
441     * Called by managed code to create and deliver an ArithmeticException.
442     */
443NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
444
445    /*
446     * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
447     * index, arg2 holds limit.
448     */
449TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
450
451    /*
452     * Called by managed code to create and deliver a StackOverflowError.
453     */
454NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
455
456    /*
457     * Called by managed code to create and deliver a NoSuchMethodError.
458     */
459ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
460
461    /*
462     * All generated callsites for interface invokes and invocation slow paths will load arguments
463     * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain
464     * the method_idx.  This wrapper will save arg1-arg3, load the caller's Method*, align the
465     * stack and call the appropriate C helper.
466     * NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1.
467     *
468     * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting
469     * of the target Method* in x0 and method->code_ in x1.
470     *
471     * If unsuccessful, the helper will return null/????. There will be a pending exception in the
472     * thread and we branch to another stub to deliver it.
473     *
474     * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
475     * pointing back to the original caller.
476     *
477     * Adapted from ARM32 code.
478     *
479     * Clobbers xIP0.
480     */
481.macro INVOKE_TRAMPOLINE c_name, cxx_name
482    .extern \cxx_name
483ENTRY \c_name
484    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // save callee saves in case allocation triggers GC
485    // Helper signature is always
486    // (method_idx, *this_object, *caller_method, *self, sp)
487
488    ldr    x2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE]  // pass caller Method*
489    mov    x3, xSELF                      // pass Thread::Current
490    mov    x4, sp
491    bl     \cxx_name                      // (method_idx, this, caller, Thread*, SP)
492    mov    xIP0, x1                       // save Method*->code_
493    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
494    cbz    x0, 1f                         // did we find the target? if not go to exception delivery
495    br     xIP0                           // tail call to target
4961:
497    DELIVER_PENDING_EXCEPTION
498END \c_name
499.endm
500
501INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
502INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
503
504INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
505INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
506INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
507INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
508
509
510.macro INVOKE_STUB_CREATE_FRAME
511
512SAVE_SIZE=15*8   // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
513SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
514
515
516    mov x9, sp                             // Save stack pointer.
517    .cfi_register sp,x9
518
519    add x10, x2, # SAVE_SIZE_AND_METHOD    // calculate size of frame.
520    sub x10, sp, x10                       // Calculate SP position - saves + ArtMethod* + args
521    and x10, x10, # ~0xf                   // Enforce 16 byte stack alignment.
522    mov sp, x10                            // Set new SP.
523
524    sub x10, x9, #SAVE_SIZE                // Calculate new FP (later). Done here as we must move SP
525    .cfi_def_cfa_register x10              // before this.
526    .cfi_adjust_cfa_offset SAVE_SIZE
527
528    str x28, [x10, #112]
529    .cfi_rel_offset x28, 112
530
531    stp x26, x27, [x10, #96]
532    .cfi_rel_offset x26, 96
533    .cfi_rel_offset x27, 104
534
535    stp x24, x25, [x10, #80]
536    .cfi_rel_offset x24, 80
537    .cfi_rel_offset x25, 88
538
539    stp x22, x23, [x10, #64]
540    .cfi_rel_offset x22, 64
541    .cfi_rel_offset x23, 72
542
543    stp x20, x21, [x10, #48]
544    .cfi_rel_offset x20, 48
545    .cfi_rel_offset x21, 56
546
547    stp x9, x19, [x10, #32]                // Save old stack pointer and x19.
548    .cfi_rel_offset sp, 32
549    .cfi_rel_offset x19, 40
550
551    stp x4, x5, [x10, #16]                 // Save result and shorty addresses.
552    .cfi_rel_offset x4, 16
553    .cfi_rel_offset x5, 24
554
555    stp xFP, xLR, [x10]                    // Store LR & FP.
556    .cfi_rel_offset x29, 0
557    .cfi_rel_offset x30, 8
558
559    mov xFP, x10                           // Use xFP now, as it's callee-saved.
560    .cfi_def_cfa_register x29
561    mov xSELF, x3                          // Move thread pointer into SELF register.
562
563    // Copy arguments into stack frame.
564    // Use simple copy routine for now.
565    // 4 bytes per slot.
566    // X1 - source address
567    // W2 - args length
568    // X9 - destination address.
569    // W10 - temporary
570    add x9, sp, #8                         // Destination address is bottom of stack + null.
571
572    // Use \@ to differentiate between macro invocations.
573.LcopyParams\@:
574    cmp w2, #0
575    beq .LendCopyParams\@
576    sub w2, w2, #4      // Need 65536 bytes of range.
577    ldr w10, [x1, x2]
578    str w10, [x9, x2]
579
580    b .LcopyParams\@
581
582.LendCopyParams\@:
583
584    // Store null into ArtMethod* at bottom of frame.
585    str xzr, [sp]
586.endm
587
588.macro INVOKE_STUB_CALL_AND_RETURN
589
590    // load method-> METHOD_QUICK_CODE_OFFSET
591    ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
592    // Branch to method.
593    blr x9
594
595    // Restore return value address and shorty address.
596    ldp x4,x5, [xFP, #16]
597    .cfi_restore x4
598    .cfi_restore x5
599
600    ldr x28, [xFP, #112]
601    .cfi_restore x28
602
603    ldp x26, x27, [xFP, #96]
604    .cfi_restore x26
605    .cfi_restore x27
606
607    ldp x24, x25, [xFP, #80]
608    .cfi_restore x24
609    .cfi_restore x25
610
611    ldp x22, x23, [xFP, #64]
612    .cfi_restore x22
613    .cfi_restore x23
614
615    ldp x20, x21, [xFP, #48]
616    .cfi_restore x20
617    .cfi_restore x21
618
619    // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
620    ldrb w10, [x5]
621
622    // Don't set anything for a void type.
623    cmp w10, #'V'
624    beq .Lexit_art_quick_invoke_stub\@
625
626    cmp w10, #'D'
627    bne .Lreturn_is_float\@
628    str d0, [x4]
629    b .Lexit_art_quick_invoke_stub\@
630
631.Lreturn_is_float\@:
632    cmp w10, #'F'
633    bne .Lreturn_is_int\@
634    str s0, [x4]
635    b .Lexit_art_quick_invoke_stub\@
636
637    // Just store x0. Doesn't matter if it is 64 or 32 bits.
638.Lreturn_is_int\@:
639    str x0, [x4]
640
641.Lexit_art_quick_invoke_stub\@:
642    ldp x2, x19, [xFP, #32]   // Restore stack pointer and x19.
643    .cfi_restore x19
644    mov sp, x2
645    .cfi_restore sp
646
647    ldp xFP, xLR, [xFP]    // Restore old frame pointer and link register.
648    .cfi_restore x29
649    .cfi_restore x30
650
651    ret
652
653.endm
654
655
656/*
657 *  extern"C" void art_quick_invoke_stub(ArtMethod *method,   x0
658 *                                       uint32_t  *args,     x1
659 *                                       uint32_t argsize,    w2
660 *                                       Thread *self,        x3
661 *                                       JValue *result,      x4
662 *                                       char   *shorty);     x5
663 *  +----------------------+
664 *  |                      |
665 *  |  C/C++ frame         |
666 *  |       LR''           |
667 *  |       FP''           | <- SP'
668 *  +----------------------+
669 *  +----------------------+
670 *  |        x28           | <- TODO: Remove callee-saves.
671 *  |         :            |
672 *  |        x19           |
673 *  |        SP'           |
674 *  |        X5            |
675 *  |        X4            |        Saved registers
676 *  |        LR'           |
677 *  |        FP'           | <- FP
678 *  +----------------------+
679 *  | uint32_t out[n-1]    |
680 *  |    :      :          |        Outs
681 *  | uint32_t out[0]      |
682 *  | ArtMethod*           | <- SP  value=null
683 *  +----------------------+
684 *
685 * Outgoing registers:
686 *  x0    - Method*
687 *  x1-x7 - integer parameters.
688 *  d0-d7 - Floating point parameters.
689 *  xSELF = self
690 *  SP = & of ArtMethod*
691 *  x1 = "this" pointer.
692 *
693 */
694ENTRY art_quick_invoke_stub
695    // Spill registers as per AACPS64 calling convention.
696    INVOKE_STUB_CREATE_FRAME
697
698    // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
699    // Parse the passed shorty to determine which register to load.
700    // Load addresses for routines that load WXSD registers.
701    adr  x11, .LstoreW2
702    adr  x12, .LstoreX2
703    adr  x13, .LstoreS0
704    adr  x14, .LstoreD0
705
706    // Initialize routine offsets to 0 for integers and floats.
707    // x8 for integers, x15 for floating point.
708    mov x8, #0
709    mov x15, #0
710
711    add x10, x5, #1         // Load shorty address, plus one to skip return value.
712    ldr w1, [x9],#4         // Load "this" parameter, and increment arg pointer.
713
714    // Loop to fill registers.
715.LfillRegisters:
716    ldrb w17, [x10], #1       // Load next character in signature, and increment.
717    cbz w17, .LcallFunction   // Exit at end of signature. Shorty 0 terminated.
718
719    cmp  w17, #'F' // is this a float?
720    bne .LisDouble
721
722    cmp x15, # 8*12         // Skip this load if all registers full.
723    beq .Ladvance4
724
725    add x17, x13, x15       // Calculate subroutine to jump to.
726    br  x17
727
728.LisDouble:
729    cmp w17, #'D'           // is this a double?
730    bne .LisLong
731
732    cmp x15, # 8*12         // Skip this load if all registers full.
733    beq .Ladvance8
734
735    add x17, x14, x15       // Calculate subroutine to jump to.
736    br x17
737
738.LisLong:
739    cmp w17, #'J'           // is this a long?
740    bne .LisOther
741
742    cmp x8, # 6*12          // Skip this load if all registers full.
743    beq .Ladvance8
744
745    add x17, x12, x8        // Calculate subroutine to jump to.
746    br x17
747
748.LisOther:                  // Everything else takes one vReg.
749    cmp x8, # 6*12          // Skip this load if all registers full.
750    beq .Ladvance4
751
752    add x17, x11, x8        // Calculate subroutine to jump to.
753    br x17
754
755.Ladvance4:
756    add x9, x9, #4
757    b .LfillRegisters
758
759.Ladvance8:
760    add x9, x9, #8
761    b .LfillRegisters
762
763// Macro for loading a parameter into a register.
764//  counter - the register with offset into these tables
765//  size - the size of the register - 4 or 8 bytes.
766//  register - the name of the register to be loaded.
767.macro LOADREG counter size register return
768    ldr \register , [x9], #\size
769    add \counter, \counter, 12
770    b \return
771.endm
772
773// Store ints.
774.LstoreW2:
775    LOADREG x8 4 w2 .LfillRegisters
776    LOADREG x8 4 w3 .LfillRegisters
777    LOADREG x8 4 w4 .LfillRegisters
778    LOADREG x8 4 w5 .LfillRegisters
779    LOADREG x8 4 w6 .LfillRegisters
780    LOADREG x8 4 w7 .LfillRegisters
781
782// Store longs.
783.LstoreX2:
784    LOADREG x8 8 x2 .LfillRegisters
785    LOADREG x8 8 x3 .LfillRegisters
786    LOADREG x8 8 x4 .LfillRegisters
787    LOADREG x8 8 x5 .LfillRegisters
788    LOADREG x8 8 x6 .LfillRegisters
789    LOADREG x8 8 x7 .LfillRegisters
790
791// Store singles.
792.LstoreS0:
793    LOADREG x15 4 s0 .LfillRegisters
794    LOADREG x15 4 s1 .LfillRegisters
795    LOADREG x15 4 s2 .LfillRegisters
796    LOADREG x15 4 s3 .LfillRegisters
797    LOADREG x15 4 s4 .LfillRegisters
798    LOADREG x15 4 s5 .LfillRegisters
799    LOADREG x15 4 s6 .LfillRegisters
800    LOADREG x15 4 s7 .LfillRegisters
801
802// Store doubles.
803.LstoreD0:
804    LOADREG x15 8 d0 .LfillRegisters
805    LOADREG x15 8 d1 .LfillRegisters
806    LOADREG x15 8 d2 .LfillRegisters
807    LOADREG x15 8 d3 .LfillRegisters
808    LOADREG x15 8 d4 .LfillRegisters
809    LOADREG x15 8 d5 .LfillRegisters
810    LOADREG x15 8 d6 .LfillRegisters
811    LOADREG x15 8 d7 .LfillRegisters
812
813
814.LcallFunction:
815
816    INVOKE_STUB_CALL_AND_RETURN
817
818END art_quick_invoke_stub
819
820/*  extern"C"
821 *     void art_quick_invoke_static_stub(ArtMethod *method,   x0
822 *                                       uint32_t  *args,     x1
823 *                                       uint32_t argsize,    w2
824 *                                       Thread *self,        x3
825 *                                       JValue *result,      x4
826 *                                       char   *shorty);     x5
827 */
828ENTRY art_quick_invoke_static_stub
829    // Spill registers as per AACPS64 calling convention.
830    INVOKE_STUB_CREATE_FRAME
831
832    // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
833    // Parse the passed shorty to determine which register to load.
834    // Load addresses for routines that load WXSD registers.
835    adr  x11, .LstoreW1_2
836    adr  x12, .LstoreX1_2
837    adr  x13, .LstoreS0_2
838    adr  x14, .LstoreD0_2
839
840    // Initialize routine offsets to 0 for integers and floats.
841    // x8 for integers, x15 for floating point.
842    mov x8, #0
843    mov x15, #0
844
845    add x10, x5, #1     // Load shorty address, plus one to skip return value.
846
847    // Loop to fill registers.
848.LfillRegisters2:
849    ldrb w17, [x10], #1         // Load next character in signature, and increment.
850    cbz w17, .LcallFunction2    // Exit at end of signature. Shorty 0 terminated.
851
852    cmp  w17, #'F'          // is this a float?
853    bne .LisDouble2
854
855    cmp x15, # 8*12         // Skip this load if all registers full.
856    beq .Ladvance4_2
857
858    add x17, x13, x15       // Calculate subroutine to jump to.
859    br  x17
860
861.LisDouble2:
862    cmp w17, #'D'           // is this a double?
863    bne .LisLong2
864
865    cmp x15, # 8*12         // Skip this load if all registers full.
866    beq .Ladvance8_2
867
868    add x17, x14, x15       // Calculate subroutine to jump to.
869    br x17
870
871.LisLong2:
872    cmp w17, #'J'           // is this a long?
873    bne .LisOther2
874
875    cmp x8, # 7*12          // Skip this load if all registers full.
876    beq .Ladvance8_2
877
878    add x17, x12, x8        // Calculate subroutine to jump to.
879    br x17
880
881.LisOther2:                 // Everything else takes one vReg.
882    cmp x8, # 7*12          // Skip this load if all registers full.
883    beq .Ladvance4_2
884
885    add x17, x11, x8        // Calculate subroutine to jump to.
886    br x17
887
888.Ladvance4_2:
889    add x9, x9, #4
890    b .LfillRegisters2
891
892.Ladvance8_2:
893    add x9, x9, #8
894    b .LfillRegisters2
895
896// Store ints.
897.LstoreW1_2:
898    LOADREG x8 4 w1 .LfillRegisters2
899    LOADREG x8 4 w2 .LfillRegisters2
900    LOADREG x8 4 w3 .LfillRegisters2
901    LOADREG x8 4 w4 .LfillRegisters2
902    LOADREG x8 4 w5 .LfillRegisters2
903    LOADREG x8 4 w6 .LfillRegisters2
904    LOADREG x8 4 w7 .LfillRegisters2
905
906// Store longs.
907.LstoreX1_2:
908    LOADREG x8 8 x1 .LfillRegisters2
909    LOADREG x8 8 x2 .LfillRegisters2
910    LOADREG x8 8 x3 .LfillRegisters2
911    LOADREG x8 8 x4 .LfillRegisters2
912    LOADREG x8 8 x5 .LfillRegisters2
913    LOADREG x8 8 x6 .LfillRegisters2
914    LOADREG x8 8 x7 .LfillRegisters2
915
916// Store singles.
917.LstoreS0_2:
918    LOADREG x15 4 s0 .LfillRegisters2
919    LOADREG x15 4 s1 .LfillRegisters2
920    LOADREG x15 4 s2 .LfillRegisters2
921    LOADREG x15 4 s3 .LfillRegisters2
922    LOADREG x15 4 s4 .LfillRegisters2
923    LOADREG x15 4 s5 .LfillRegisters2
924    LOADREG x15 4 s6 .LfillRegisters2
925    LOADREG x15 4 s7 .LfillRegisters2
926
927// Store doubles.
928.LstoreD0_2:
929    LOADREG x15 8 d0 .LfillRegisters2
930    LOADREG x15 8 d1 .LfillRegisters2
931    LOADREG x15 8 d2 .LfillRegisters2
932    LOADREG x15 8 d3 .LfillRegisters2
933    LOADREG x15 8 d4 .LfillRegisters2
934    LOADREG x15 8 d5 .LfillRegisters2
935    LOADREG x15 8 d6 .LfillRegisters2
936    LOADREG x15 8 d7 .LfillRegisters2
937
938
939.LcallFunction2:
940
941    INVOKE_STUB_CALL_AND_RETURN
942
943END art_quick_invoke_static_stub
944
945
946
947    /*
948     * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_
949     */
950
951ENTRY art_quick_do_long_jump
952    // Load FPRs
953    ldp d0, d1, [x1], #16
954    ldp d2, d3, [x1], #16
955    ldp d4, d5, [x1], #16
956    ldp d6, d7, [x1], #16
957    ldp d8, d9, [x1], #16
958    ldp d10, d11, [x1], #16
959    ldp d12, d13, [x1], #16
960    ldp d14, d15, [x1], #16
961    ldp d16, d17, [x1], #16
962    ldp d18, d19, [x1], #16
963    ldp d20, d21, [x1], #16
964    ldp d22, d23, [x1], #16
965    ldp d24, d25, [x1], #16
966    ldp d26, d27, [x1], #16
967    ldp d28, d29, [x1], #16
968    ldp d30, d31, [x1]
969
970    // Load GPRs
971    // TODO: lots of those are smashed, could optimize.
972    add x0, x0, #30*8
973    ldp x30, x1, [x0], #-16
974    ldp x28, x29, [x0], #-16
975    ldp x26, x27, [x0], #-16
976    ldp x24, x25, [x0], #-16
977    ldp x22, x23, [x0], #-16
978    ldp x20, x21, [x0], #-16
979    ldp x18, x19, [x0], #-16
980    ldp x16, x17, [x0], #-16
981    ldp x14, x15, [x0], #-16
982    ldp x12, x13, [x0], #-16
983    ldp x10, x11, [x0], #-16
984    ldp x8, x9, [x0], #-16
985    ldp x6, x7, [x0], #-16
986    ldp x4, x5, [x0], #-16
987    ldp x2, x3, [x0], #-16
988    mov sp, x1
989
990    // TODO: Is it really OK to use LR for the target PC?
991    mov x0, #0
992    mov x1, #0
993    br  xLR
994END art_quick_do_long_jump
995
996    /*
997     * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the
998     * possibly null object to lock.
999     *
1000     * Derived from arm32 code.
1001     */
1002    .extern artLockObjectFromCode
1003ENTRY art_quick_lock_object
1004    cbz    w0, .Lslow_lock
1005    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
1006.Lretry_lock:
1007    ldr    w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
1008    ldxr   w1, [x4]
1009    mov    x3, x1
1010    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1011    cbnz   w3, .Lnot_unlocked         // already thin locked
1012    // unlocked case - x1: original lock word that's zero except for the read barrier bits.
1013    orr    x2, x1, x2                 // x2 holds thread id with count of 0 with preserved read barrier bits
1014    stxr   w3, w2, [x4]
1015    cbnz   w3, .Llock_stxr_fail       // store failed, retry
1016    dmb    ishld                      // full (LoadLoad|LoadStore) memory barrier
1017    ret
1018.Lnot_unlocked:  // x1: original lock word
1019    lsr    w3, w1, LOCK_WORD_STATE_SHIFT
1020    cbnz   w3, .Lslow_lock            // if either of the top two bits are set, go slow path
1021    eor    w2, w1, w2                 // lock_word.ThreadId() ^ self->ThreadId()
1022    uxth   w2, w2                     // zero top 16 bits
1023    cbnz   w2, .Lslow_lock            // lock word and self thread id's match -> recursive lock
1024                                      // else contention, go to slow path
1025    mov    x3, x1                     // copy the lock word to check count overflow.
1026    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits.
1027    add    w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count in lock word placing in w2 to check overflow
1028    lsr    w3, w2, LOCK_WORD_READ_BARRIER_STATE_SHIFT  // if either of the upper two bits (28-29) are set, we overflowed.
1029    cbnz   w3, .Lslow_lock            // if we overflow the count go slow path
1030    add    w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count for real
1031    stxr   w3, w2, [x4]
1032    cbnz   w3, .Llock_stxr_fail       // store failed, retry
1033    ret
1034.Llock_stxr_fail:
1035    b      .Lretry_lock               // retry
1036.Lslow_lock:
1037    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case we block
1038    mov    x1, xSELF                  // pass Thread::Current
1039    bl     artLockObjectFromCode      // (Object* obj, Thread*)
1040    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1041    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1042END art_quick_lock_object
1043
1044    /*
1045     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
1046     * x0 holds the possibly null object to lock.
1047     *
1048     * Derived from arm32 code.
1049     */
1050    .extern artUnlockObjectFromCode
1051ENTRY art_quick_unlock_object
1052    cbz    x0, .Lslow_unlock
1053    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
1054.Lretry_unlock:
1055#ifndef USE_READ_BARRIER
1056    ldr    w1, [x4]
1057#else
1058    ldxr   w1, [x4]                   // Need to use atomic instructions for read barrier
1059#endif
1060    lsr    w2, w1, LOCK_WORD_STATE_SHIFT
1061    cbnz   w2, .Lslow_unlock          // if either of the top two bits are set, go slow path
1062    ldr    w2, [xSELF, #THREAD_ID_OFFSET]
1063    mov    x3, x1                     // copy lock word to check thread id equality
1064    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1065    eor    w3, w3, w2                 // lock_word.ThreadId() ^ self->ThreadId()
1066    uxth   w3, w3                     // zero top 16 bits
1067    cbnz   w3, .Lslow_unlock          // do lock word and self thread id's match?
1068    mov    x3, x1                     // copy lock word to detect transition to unlocked
1069    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1070    cmp    w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
1071    bpl    .Lrecursive_thin_unlock
1072    // transition to unlocked
1073    mov    x3, x1
1074    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK  // w3: zero except for the preserved read barrier bits
1075    dmb    ish                        // full (LoadStore|StoreStore) memory barrier
1076#ifndef USE_READ_BARRIER
1077    str    w3, [x4]
1078#else
1079    stxr   w2, w3, [x4]               // Need to use atomic instructions for read barrier
1080    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
1081#endif
1082    ret
1083.Lrecursive_thin_unlock:  // w1: original lock word
1084    sub    w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // decrement count
1085#ifndef USE_READ_BARRIER
1086    str    w1, [x4]
1087#else
1088    stxr   w2, w1, [x4]               // Need to use atomic instructions for read barrier
1089    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
1090#endif
1091    ret
1092.Lunlock_stxr_fail:
1093    b      .Lretry_unlock               // retry
1094.Lslow_unlock:
1095    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case exception allocation triggers GC
1096    mov    x1, xSELF                  // pass Thread::Current
1097    bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
1098    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1099    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1100END art_quick_unlock_object
1101
1102    /*
1103     * Entry from managed code that calls artIsAssignableFromCode and on failure calls
1104     * artThrowClassCastException.
1105     */
1106    .extern artThrowClassCastException
1107ENTRY art_quick_check_cast
1108    // Store arguments and link register
1109    sub sp, sp, #32                     // Stack needs to be 16b aligned on calls
1110    .cfi_adjust_cfa_offset 32
1111    stp x0, x1, [sp]
1112    .cfi_rel_offset x0, 0
1113    .cfi_rel_offset x1, 8
1114    stp xSELF, xLR, [sp, #16]
1115    .cfi_rel_offset x18, 16
1116    .cfi_rel_offset x30, 24
1117
1118    // Call runtime code
1119    bl artIsAssignableFromCode
1120
1121    // Check for exception
1122    cbz x0, .Lthrow_class_cast_exception
1123
1124    // Restore and return
1125    ldp x0, x1, [sp]
1126    .cfi_restore x0
1127    .cfi_restore x1
1128    ldp xSELF, xLR, [sp, #16]
1129    .cfi_restore x18
1130    .cfi_restore x30
1131    add sp, sp, #32
1132    .cfi_adjust_cfa_offset -32
1133    ret
1134
1135.Lthrow_class_cast_exception:
1136    // Restore
1137    ldp x0, x1, [sp]
1138    .cfi_restore x0
1139    .cfi_restore x1
1140    ldp xSELF, xLR, [sp, #16]
1141    .cfi_restore x18
1142    .cfi_restore x30
1143    add sp, sp, #32
1144    .cfi_adjust_cfa_offset -32
1145
1146    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
1147    mov x2, xSELF                     // pass Thread::Current
1148    b artThrowClassCastException      // (Class*, Class*, Thread*)
1149    brk 0                             // We should not return here...
1150END art_quick_check_cast
1151
1152    /*
1153     * Entry from managed code for array put operations of objects where the value being stored
1154     * needs to be checked for compatibility.
1155     * x0 = array, x1 = index, x2 = value
1156     *
1157     * Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We
1158     * assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by
1159     * using index-zero-extension in load/stores.
1160     *
1161     * Temporaries: x3, x4
1162     * TODO: x4 OK? ip seems wrong here.
1163     */
1164ENTRY art_quick_aput_obj_with_null_and_bound_check
1165    tst x0, x0
1166    bne art_quick_aput_obj_with_bound_check
1167    b art_quick_throw_null_pointer_exception
1168END art_quick_aput_obj_with_null_and_bound_check
1169
1170ENTRY art_quick_aput_obj_with_bound_check
1171    ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]
1172    cmp w3, w1
1173    bhi art_quick_aput_obj
1174    mov x0, x1
1175    mov x1, x3
1176    b art_quick_throw_array_bounds
1177END art_quick_aput_obj_with_bound_check
1178
1179ENTRY art_quick_aput_obj
1180    cbz x2, .Ldo_aput_null
1181    ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET]            // Heap reference = 32b
1182                                                         // This also zero-extends to x3
1183    ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET]            // Heap reference = 32b
1184                                                         // This also zero-extends to x4
1185    ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]    // Heap reference = 32b
1186                                                         // This also zero-extends to x3
1187    cmp w3, w4  // value's type == array's component type - trivial assignability
1188    bne .Lcheck_assignability
1189.Ldo_aput:
1190    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1191                                                         // "Compress" = do nothing
1192    str w2, [x3, x1, lsl #2]                             // Heap reference = 32b
1193    ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
1194    lsr x0, x0, #7
1195    strb w3, [x3, x0]
1196    ret
1197.Ldo_aput_null:
1198    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1199                                                         // "Compress" = do nothing
1200    str w2, [x3, x1, lsl #2]                             // Heap reference = 32b
1201    ret
1202.Lcheck_assignability:
1203    // Store arguments and link register
1204    sub sp, sp, #48                     // Stack needs to be 16b aligned on calls
1205    .cfi_adjust_cfa_offset 48
1206    stp x0, x1, [sp]
1207    .cfi_rel_offset x0, 0
1208    .cfi_rel_offset x1, 8
1209    stp x2, xSELF, [sp, #16]
1210    .cfi_rel_offset x2, 16
1211    .cfi_rel_offset x18, 24
1212    str xLR, [sp, #32]
1213    .cfi_rel_offset x30, 32
1214
1215    // Call runtime code
1216    mov x0, x3              // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
1217    mov x1, x4              // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
1218    bl artIsAssignableFromCode
1219
1220    // Check for exception
1221    cbz x0, .Lthrow_array_store_exception
1222
1223    // Restore
1224    ldp x0, x1, [sp]
1225    .cfi_restore x0
1226    .cfi_restore x1
1227    ldp x2, xSELF, [sp, #16]
1228    .cfi_restore x2
1229    .cfi_restore x18
1230    ldr xLR, [sp, #32]
1231    .cfi_restore x30
1232    add sp, sp, #48
1233    .cfi_adjust_cfa_offset -48
1234
1235    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1236                                                          // "Compress" = do nothing
1237    str w2, [x3, x1, lsl #2]                              // Heap reference = 32b
1238    ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
1239    lsr x0, x0, #7
1240    strb w3, [x3, x0]
1241    ret
1242.Lthrow_array_store_exception:
1243    ldp x0, x1, [sp]
1244    .cfi_restore x0
1245    .cfi_restore x1
1246    ldp x2, xSELF, [sp, #16]
1247    .cfi_restore x2
1248    .cfi_restore x18
1249    ldr xLR, [sp, #32]
1250    .cfi_restore x30
1251    add sp, sp, #48
1252    .cfi_adjust_cfa_offset -48
1253
1254    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
1255    mov x1, x2                    // Pass value.
1256    mov x2, xSELF                 // Pass Thread::Current.
1257    b artThrowArrayStoreException // (Object*, Object*, Thread*).
1258    brk 0                         // Unreached.
1259END art_quick_aput_obj
1260
1261// Macro to facilitate adding new allocation entrypoints.
1262.macro ONE_ARG_DOWNCALL name, entrypoint, return
1263    .extern \entrypoint
1264ENTRY \name
1265    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1266    mov    x1, xSELF                  // pass Thread::Current
1267    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*)
1268    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1269    \return
1270END \name
1271.endm
1272
1273// Macro to facilitate adding new allocation entrypoints.
1274.macro TWO_ARG_DOWNCALL name, entrypoint, return
1275    .extern \entrypoint
1276ENTRY \name
1277    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1278    mov    x2, xSELF                  // pass Thread::Current
1279    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*)
1280    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1281    \return
1282END \name
1283.endm
1284
1285// Macro to facilitate adding new allocation entrypoints.
1286.macro THREE_ARG_DOWNCALL name, entrypoint, return
1287    .extern \entrypoint
1288ENTRY \name
1289    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1290    mov    x3, xSELF                  // pass Thread::Current
1291    bl     \entrypoint
1292    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1293    \return
1294END \name
1295.endm
1296
1297// Macro to facilitate adding new allocation entrypoints.
1298.macro FOUR_ARG_DOWNCALL name, entrypoint, return
1299    .extern \entrypoint
1300ENTRY \name
1301    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1302    mov    x4, xSELF                  // pass Thread::Current
1303    bl     \entrypoint                //
1304    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1305    \return
1306    DELIVER_PENDING_EXCEPTION
1307END \name
1308.endm
1309
1310// Macros taking opportunity of code similarities for downcalls with referrer.
1311.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
1312    .extern \entrypoint
1313ENTRY \name
1314    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1315    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1316    mov    x2, xSELF                  // pass Thread::Current
1317    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*, SP)
1318    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1319    \return
1320END \name
1321.endm
1322
1323.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
1324    .extern \entrypoint
1325ENTRY \name
1326    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1327    ldr    x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1328    mov    x3, xSELF                  // pass Thread::Current
1329    bl     \entrypoint
1330    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1331    \return
1332END \name
1333.endm
1334
1335.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
1336    .extern \entrypoint
1337ENTRY \name
1338    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1339    ldr    x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1340    mov    x4, xSELF                  // pass Thread::Current
1341    bl     \entrypoint
1342    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1343    \return
1344END \name
1345.endm
1346
1347.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1348    cbz w0, 1f                 // result zero branch over
1349    ret                        // return
13501:
1351    DELIVER_PENDING_EXCEPTION
1352.endm
1353
1354    /*
1355     * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
1356     * failure.
1357     */
1358TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1359
1360    /*
1361     * Entry from managed code when uninitialized static storage, this stub will run the class
1362     * initializer and deliver the exception on error. On success the static storage base is
1363     * returned.
1364     */
1365ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1366
1367ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1368ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1369
1370ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1371ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1372ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1373ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1374ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1375ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1376ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1377
1378TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1379TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1380TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1381TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1382TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1383TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1384TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1385
1386TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1387TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1388TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1389TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1390
1391THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1392THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1393THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1394THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1395THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1396
1397// This is separated out as the argument order is different.
1398    .extern artSet64StaticFromCode
1399ENTRY art_quick_set64_static
1400    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1401    mov    x3, x1                     // Store value
1402    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1403    mov    x2, x3                     // Put value param
1404    mov    x3, xSELF                  // pass Thread::Current
1405    bl     artSet64StaticFromCode
1406    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1407    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1408END art_quick_set64_static
1409
1410    /*
1411     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
1412     * exception on error. On success the String is returned. w0 holds the string index. The fast
1413     * path check for hit in strings cache has already been performed.
1414     */
1415ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1416
1417// Generate the allocation entrypoints for each allocator.
1418GENERATE_ALL_ALLOC_ENTRYPOINTS
1419
1420    /*
1421     * Called by managed code when the thread has been asked to suspend.
1422     */
1423    .extern artTestSuspendFromCode
1424ENTRY art_quick_test_suspend
1425    ldrh   w0, [xSELF, #THREAD_FLAGS_OFFSET]  // get xSELF->state_and_flags.as_struct.flags
1426    cbnz   w0, .Lneed_suspend                 // check flags == 0
1427    ret                                       // return if flags == 0
1428.Lneed_suspend:
1429    mov    x0, xSELF
1430    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // save callee saves for stack crawl
1431    bl     artTestSuspendFromCode             // (Thread*)
1432    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
1433END art_quick_test_suspend
1434
1435ENTRY art_quick_implicit_suspend
1436    mov    x0, xSELF
1437    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // save callee saves for stack crawl
1438    bl     artTestSuspendFromCode             // (Thread*)
1439    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
1440END art_quick_implicit_suspend
1441
1442     /*
1443     * Called by managed code that is attempting to call a method on a proxy class. On entry
1444     * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
1445     * method agrees with a ref and args callee save frame.
1446     */
1447     .extern artQuickProxyInvokeHandler
1448ENTRY art_quick_proxy_invoke_handler
1449    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
1450    mov     x2, xSELF                   // pass Thread::Current
1451    mov     x3, sp                      // pass SP
1452    bl      artQuickProxyInvokeHandler  // (Method* proxy method, receiver, Thread*, SP)
1453    // Use xETR as xSELF might be scratched by native function above.
1454    ldr     x2, [xETR, THREAD_EXCEPTION_OFFSET]
1455    cbnz    x2, .Lexception_in_proxy    // success if no exception is pending
1456    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
1457    fmov    d0, x0                      // Store result in d0 in case it was float or double
1458    ret                                 // return on success
1459.Lexception_in_proxy:
1460    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1461    DELIVER_PENDING_EXCEPTION
1462END art_quick_proxy_invoke_handler
1463
1464    /*
1465     * Called to resolve an imt conflict. xIP1 is a hidden argument that holds the target method's
1466     * dex method index.
1467     */
1468ENTRY art_quick_imt_conflict_trampoline
1469    ldr    x0, [sp, #0]                                // load caller Method*
1470    ldr    w0, [x0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET]  // load dex_cache_resolved_methods
1471    add    x0, x0, #MIRROR_LONG_ARRAY_DATA_OFFSET      // get starting address of data
1472    ldr    x0, [x0, xIP1, lsl 3]                       // load the target method
1473    b art_quick_invoke_interface_trampoline
1474END art_quick_imt_conflict_trampoline
1475
1476ENTRY art_quick_resolution_trampoline
1477    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1478    mov x2, xSELF
1479    mov x3, sp
1480    bl artQuickResolutionTrampoline  // (called, receiver, Thread*, SP)
1481    cbz x0, 1f
1482    mov xIP0, x0            // Remember returned code pointer in xIP0.
1483    ldr x0, [sp, #0]        // artQuickResolutionTrampoline puts called method in *SP.
1484    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1485    br xIP0
14861:
1487    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1488    DELIVER_PENDING_EXCEPTION
1489END art_quick_resolution_trampoline
1490
1491/*
1492 * Generic JNI frame layout:
1493 *
1494 * #-------------------#
1495 * |                   |
1496 * | caller method...  |
1497 * #-------------------#    <--- SP on entry
1498 * | Return X30/LR     |
1499 * | X29/FP            |    callee save
1500 * | X28               |    callee save
1501 * | X27               |    callee save
1502 * | X26               |    callee save
1503 * | X25               |    callee save
1504 * | X24               |    callee save
1505 * | X23               |    callee save
1506 * | X22               |    callee save
1507 * | X21               |    callee save
1508 * | X20               |    callee save
1509 * | X19               |    callee save
1510 * | X7                |    arg7
1511 * | X6                |    arg6
1512 * | X5                |    arg5
1513 * | X4                |    arg4
1514 * | X3                |    arg3
1515 * | X2                |    arg2
1516 * | X1                |    arg1
1517 * | D7                |    float arg 8
1518 * | D6                |    float arg 7
1519 * | D5                |    float arg 6
1520 * | D4                |    float arg 5
1521 * | D3                |    float arg 4
1522 * | D2                |    float arg 3
1523 * | D1                |    float arg 2
1524 * | D0                |    float arg 1
1525 * | Method*           | <- X0
1526 * #-------------------#
1527 * | local ref cookie  | // 4B
1528 * | handle scope size | // 4B
1529 * #-------------------#
1530 * | JNI Call Stack    |
1531 * #-------------------#    <--- SP on native call
1532 * |                   |
1533 * | Stack for Regs    |    The trampoline assembly will pop these values
1534 * |                   |    into registers for native call
1535 * #-------------------#
1536 * | Native code ptr   |
1537 * #-------------------#
1538 * | Free scratch      |
1539 * #-------------------#
1540 * | Ptr to (1)        |    <--- SP
1541 * #-------------------#
1542 */
1543    /*
1544     * Called to do a generic JNI down-call
1545     */
1546ENTRY art_quick_generic_jni_trampoline
1547    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
1548
1549    // Save SP , so we can have static CFI info.
1550    mov x28, sp
1551    .cfi_def_cfa_register x28
1552
1553    // This looks the same, but is different: this will be updated to point to the bottom
1554    // of the frame when the handle scope is inserted.
1555    mov xFP, sp
1556
1557    mov xIP0, #5120
1558    sub sp, sp, xIP0
1559
1560    // prepare for artQuickGenericJniTrampoline call
1561    // (Thread*,  SP)
1562    //    x0      x1   <= C calling convention
1563    //   xSELF    xFP  <= where they are
1564
1565    mov x0, xSELF   // Thread*
1566    mov x1, xFP
1567    bl artQuickGenericJniTrampoline  // (Thread*, sp)
1568
1569    // The C call will have registered the complete save-frame on success.
1570    // The result of the call is:
1571    // x0: pointer to native code, 0 on error.
1572    // x1: pointer to the bottom of the used area of the alloca, can restore stack till there.
1573
1574    // Check for error = 0.
1575    cbz x0, .Lexception_in_native
1576
1577    // Release part of the alloca.
1578    mov sp, x1
1579
1580    // Save the code pointer
1581    mov xIP0, x0
1582
1583    // Load parameters from frame into registers.
1584    // TODO Check with artQuickGenericJniTrampoline.
1585    //      Also, check again APPCS64 - the stack arguments are interleaved.
1586    ldp x0, x1, [sp]
1587    ldp x2, x3, [sp, #16]
1588    ldp x4, x5, [sp, #32]
1589    ldp x6, x7, [sp, #48]
1590
1591    ldp d0, d1, [sp, #64]
1592    ldp d2, d3, [sp, #80]
1593    ldp d4, d5, [sp, #96]
1594    ldp d6, d7, [sp, #112]
1595
1596    add sp, sp, #128
1597
1598    blr xIP0        // native call.
1599
1600    // result sign extension is handled in C code
1601    // prepare for artQuickGenericJniEndTrampoline call
1602    // (Thread*, result, result_f)
1603    //    x0       x1       x2        <= C calling convention
1604    mov x1, x0      // Result (from saved)
1605    mov x0, xETR    // Thread register, original xSELF might be scratched by native code.
1606    fmov x2, d0     // d0 will contain floating point result, but needs to go into x2
1607
1608    bl artQuickGenericJniEndTrampoline
1609
1610    // Pending exceptions possible.
1611    // Use xETR as xSELF might be scratched by native code
1612    ldr x2, [xETR, THREAD_EXCEPTION_OFFSET]
1613    cbnz x2, .Lexception_in_native
1614
1615    // Tear down the alloca.
1616    mov sp, x28
1617    .cfi_def_cfa_register sp
1618
1619    // Tear down the callee-save frame.
1620    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1621
1622    // store into fpr, for when it's a fpr return...
1623    fmov d0, x0
1624    ret
1625
1626.Lexception_in_native:
1627    // Restore xSELF. It might have been scratched by native code.
1628    mov xSELF, xETR
1629    // Move to x1 then sp to please assembler.
1630    ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
1631    mov sp, x1
1632    .cfi_def_cfa_register sp
1633    # This will create a new save-all frame, required by the runtime.
1634    DELIVER_PENDING_EXCEPTION
1635END art_quick_generic_jni_trampoline
1636
1637/*
1638 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those
1639 * of a quick call:
1640 * x0 = method being called/to bridge to.
1641 * x1..x7, d0..d7 = arguments to that method.
1642 */
1643ENTRY art_quick_to_interpreter_bridge
1644    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME   // Set up frame and save arguments.
1645
1646    //  x0 will contain mirror::ArtMethod* method.
1647    mov x1, xSELF                          // How to get Thread::Current() ???
1648    mov x2, sp
1649
1650    // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
1651    //                                      mirror::ArtMethod** sp)
1652    bl   artQuickToInterpreterBridge
1653
1654    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // TODO: no need to restore arguments in this case.
1655
1656    fmov d0, x0
1657
1658    RETURN_OR_DELIVER_PENDING_EXCEPTION
1659END art_quick_to_interpreter_bridge
1660
1661
1662//
1663// Instrumentation-related stubs
1664//
1665    .extern artInstrumentationMethodEntryFromCode
1666ENTRY art_quick_instrumentation_entry
1667    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1668
1669    mov   x20, x0             // Preserve method reference in a callee-save.
1670
1671    mov   x2, xSELF
1672    mov   x3, xLR
1673    bl    artInstrumentationMethodEntryFromCode  // (Method*, Object*, Thread*, LR)
1674
1675    mov   xIP0, x0            // x0 = result of call.
1676    mov   x0, x20             // Reload method reference.
1677
1678    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // Note: will restore xSELF
1679    adr   xLR, art_quick_instrumentation_exit
1680    br    xIP0                // Tail-call method with lr set to art_quick_instrumentation_exit.
1681END art_quick_instrumentation_entry
1682
1683    .extern artInstrumentationMethodExitFromCode
1684ENTRY art_quick_instrumentation_exit
1685    mov   xLR, #0             // Clobber LR for later checks.
1686
1687    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
1688
1689    // We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then
1690    // we would need to fully restore it. As there are a lot of callee-save registers, it seems
1691    // easier to have an extra small stack area.
1692
1693    str x0, [sp, #-16]!       // Save integer result.
1694    .cfi_adjust_cfa_offset 16
1695    str d0,  [sp, #8]         // Save floating-point result.
1696
1697    add   x1, sp, #16         // Pass SP.
1698    mov   x2, x0              // Pass integer result.
1699    fmov  x3, d0              // Pass floating-point result.
1700    mov   x0, xSELF           // Pass Thread.
1701    bl   artInstrumentationMethodExitFromCode    // (Thread*, SP, gpr_res, fpr_res)
1702
1703    mov   xIP0, x0            // Return address from instrumentation call.
1704    mov   xLR, x1             // r1 is holding link register if we're to bounce to deoptimize
1705
1706    ldr   d0, [sp, #8]        // Restore floating-point result.
1707    ldr   x0, [sp], 16        // Restore integer result, and drop stack area.
1708    .cfi_adjust_cfa_offset 16
1709
1710    POP_REFS_ONLY_CALLEE_SAVE_FRAME
1711
1712    br    xIP0                // Tail-call out.
1713END art_quick_instrumentation_exit
1714
1715    /*
1716     * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
1717     * will long jump to the upcall with a special exception of -1.
1718     */
1719    .extern artDeoptimize
1720ENTRY art_quick_deoptimize
1721    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
1722    mov    x0, xSELF          // Pass thread.
1723    bl     artDeoptimize      // artDeoptimize(Thread*)
1724    brk 0
1725END art_quick_deoptimize
1726
1727
1728    /*
1729     * String's indexOf.
1730     *
1731     * TODO: Not very optimized.
1732     * On entry:
1733     *    x0:   string object (known non-null)
1734     *    w1:   char to match (known <= 0xFFFF)
1735     *    w2:   Starting offset in string data
1736     */
1737ENTRY art_quick_indexof
1738    ldr   w3, [x0, #MIRROR_STRING_COUNT_OFFSET]
1739    add   x0, x0, #MIRROR_STRING_VALUE_OFFSET
1740
1741    /* Clamp start to [0..count] */
1742    cmp   w2, #0
1743    csel  w2, wzr, w2, lt
1744    cmp   w2, w3
1745    csel  w2, w3, w2, gt
1746
1747    /* Save a copy to compute result */
1748    mov   x5, x0
1749
1750    /* Build pointer to start of data to compare and pre-bias */
1751    add   x0, x0, x2, lsl #1
1752    sub   x0, x0, #2
1753
1754    /* Compute iteration count */
1755    sub   w2, w3, w2
1756
1757    /*
1758     * At this point we have:
1759     *  x0: start of the data to test
1760     *  w1: char to compare
1761     *  w2: iteration count
1762     *  x5: original start of string data
1763     */
1764
1765    subs  w2, w2, #4
1766    b.lt  .Lindexof_remainder
1767
1768.Lindexof_loop4:
1769    ldrh  w6, [x0, #2]!
1770    ldrh  w7, [x0, #2]!
1771    ldrh  wIP0, [x0, #2]!
1772    ldrh  wIP1, [x0, #2]!
1773    cmp   w6, w1
1774    b.eq  .Lmatch_0
1775    cmp   w7, w1
1776    b.eq  .Lmatch_1
1777    cmp   wIP0, w1
1778    b.eq  .Lmatch_2
1779    cmp   wIP1, w1
1780    b.eq  .Lmatch_3
1781    subs  w2, w2, #4
1782    b.ge  .Lindexof_loop4
1783
1784.Lindexof_remainder:
1785    adds  w2, w2, #4
1786    b.eq  .Lindexof_nomatch
1787
1788.Lindexof_loop1:
1789    ldrh  w6, [x0, #2]!
1790    cmp   w6, w1
1791    b.eq  .Lmatch_3
1792    subs  w2, w2, #1
1793    b.ne  .Lindexof_loop1
1794
1795.Lindexof_nomatch:
1796    mov   x0, #-1
1797    ret
1798
1799.Lmatch_0:
1800    sub   x0, x0, #6
1801    sub   x0, x0, x5
1802    asr   x0, x0, #1
1803    ret
1804.Lmatch_1:
1805    sub   x0, x0, #4
1806    sub   x0, x0, x5
1807    asr   x0, x0, #1
1808    ret
1809.Lmatch_2:
1810    sub   x0, x0, #2
1811    sub   x0, x0, x5
1812    asr   x0, x0, #1
1813    ret
1814.Lmatch_3:
1815    sub   x0, x0, x5
1816    asr   x0, x0, #1
1817    ret
1818END art_quick_indexof
1819
1820   /*
1821     * String's compareTo.
1822     *
1823     * TODO: Not very optimized.
1824     *
1825     * On entry:
1826     *    x0:   this object pointer
1827     *    x1:   comp object pointer
1828     *
1829     */
1830    .extern __memcmp16
1831ENTRY art_quick_string_compareto
1832    mov    x2, x0         // x0 is return, use x2 for first input.
1833    sub    x0, x2, x1     // Same string object?
1834    cbnz   x0,1f
1835    ret
18361:                        // Different string objects.
1837
1838    ldr    w4, [x2, #MIRROR_STRING_COUNT_OFFSET]
1839    ldr    w3, [x1, #MIRROR_STRING_COUNT_OFFSET]
1840    add    x2, x2, #MIRROR_STRING_VALUE_OFFSET
1841    add    x1, x1, #MIRROR_STRING_VALUE_OFFSET
1842
1843    /*
1844     * Now:           Data*  Count
1845     *    first arg    x2      w4
1846     *   second arg    x1      w3
1847     */
1848
1849    // x0 := str1.length(w4) - str2.length(w3). ldr zero-extended w3/w4 into x3/x4.
1850    subs x0, x4, x3
1851    // Min(count1, count2) into w3.
1852    csel x3, x3, x4, ge
1853
1854    // TODO: Tune this value.
1855    // Check for long string, do memcmp16 for them.
1856    cmp w3, #28  // Constant from arm32.
1857    bgt .Ldo_memcmp16
1858
1859    /*
1860     * Now:
1861     *   x2: *first string data
1862     *   x1: *second string data
1863     *   w3: iteration count
1864     *   x0: return value if comparison equal
1865     *   x4, x5, x6, x7: free
1866     */
1867
1868    // Do a simple unrolled loop.
1869.Lloop:
1870    // At least two more elements?
1871    subs w3, w3, #2
1872    b.lt .Lremainder_or_done
1873
1874    ldrh w4, [x2], #2
1875    ldrh w5, [x1], #2
1876
1877    ldrh w6, [x2], #2
1878    ldrh w7, [x1], #2
1879
1880    subs w4, w4, w5
1881    b.ne .Lw4_result
1882
1883    subs w6, w6, w7
1884    b.ne .Lw6_result
1885
1886    b .Lloop
1887
1888.Lremainder_or_done:
1889    adds w3, w3, #1
1890    b.eq .Lremainder
1891    ret
1892
1893.Lremainder:
1894    ldrh w4, [x2], #2
1895    ldrh w5, [x1], #2
1896    subs w4, w4, w5
1897    b.ne .Lw4_result
1898    ret
1899
1900// Result is in w4
1901.Lw4_result:
1902    sxtw x0, w4
1903    ret
1904
1905// Result is in w6
1906.Lw6_result:
1907    sxtw x0, w6
1908    ret
1909
1910.Ldo_memcmp16:
1911    mov x14, x0                  // Save x0 and LR. __memcmp16 does not use these temps.
1912    mov x15, xLR                 //                 TODO: Codify and check that?
1913
1914    mov x0, x2
1915    uxtw x2, w3
1916    bl __memcmp16
1917
1918    mov xLR, x15                 // Restore LR.
1919
1920    cmp x0, #0                   // Check the memcmp difference.
1921    csel x0, x0, x14, ne         // x0 := x0 != 0 ? x14(prev x0=length diff) : x1.
1922    ret
1923END art_quick_string_compareto
1924
1925// Macro to facilitate adding new entrypoints which call to native function directly.
1926// Currently, xSELF is the only thing we need to take care of between managed code and AAPCS.
1927// But we might introduce more differences.
1928.macro NATIVE_DOWNCALL name, entrypoint
1929    .extern \entrypoint
1930ENTRY \name
1931    stp    xSELF, xLR, [sp, #-16]!
1932    bl     \entrypoint
1933    ldp    xSELF, xLR, [sp], #16
1934    ret
1935END \name
1936.endm
1937
1938NATIVE_DOWNCALL art_quick_fmod fmod
1939NATIVE_DOWNCALL art_quick_fmodf fmodf
1940NATIVE_DOWNCALL art_quick_memcpy memcpy
1941NATIVE_DOWNCALL art_quick_assignable_from_code artIsAssignableFromCode
1942