quick_entrypoints_arm64.S revision 4359e61927866c254bc2d701e3ea4c48de10b79c
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "asm_support_arm64.S"
18
19#include "arch/quick_alloc_entrypoints.S"
20
21
22    /*
23     * Macro that sets up the callee save frame to conform with
24     * Runtime::CreateCalleeSaveMethod(kSaveAll)
25     */
26.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
27    adrp xIP0, :got:_ZN3art7Runtime9instance_E
28    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
29
30    // Our registers aren't intermixed - just spill in order.
31    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
32
33    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
34    // Loads appropriate callee-save-method.
35    ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
36
37    sub sp, sp, #176
38    .cfi_adjust_cfa_offset 176
39
40    // Ugly compile-time check, but we only have the preprocessor.
41#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 176)
42#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
43#endif
44
45    // Stack alignment filler [sp, #8].
46    // FP callee-saves.
47    stp d8, d9,   [sp, #16]
48    stp d10, d11, [sp, #32]
49    stp d12, d13, [sp, #48]
50    stp d14, d15, [sp, #64]
51
52    // GP callee-saves
53    stp x19, x20, [sp, #80]
54    .cfi_rel_offset x19, 80
55    .cfi_rel_offset x20, 88
56
57    stp x21, x22, [sp, #96]
58    .cfi_rel_offset x21, 96
59    .cfi_rel_offset x22, 104
60
61    stp x23, x24, [sp, #112]
62    .cfi_rel_offset x23, 112
63    .cfi_rel_offset x24, 120
64
65    stp x25, x26, [sp, #128]
66    .cfi_rel_offset x25, 128
67    .cfi_rel_offset x26, 136
68
69    stp x27, x28, [sp, #144]
70    .cfi_rel_offset x27, 144
71    .cfi_rel_offset x28, 152
72
73    stp x29, xLR, [sp, #160]
74    .cfi_rel_offset x29, 160
75    .cfi_rel_offset x30, 168
76
77    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs].
78    str xIP0, [sp]
79    // Place sp in Thread::Current()->top_quick_frame.
80    mov xIP0, sp
81    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
82.endm
83
84    /*
85     * Macro that sets up the callee save frame to conform with
86     * Runtime::CreateCalleeSaveMethod(kRefsOnly).
87     */
88.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
89    adrp xIP0, :got:_ZN3art7Runtime9instance_E
90    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
91
92    // Our registers aren't intermixed - just spill in order.
93    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
94
95    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefOnly]  .
96    // Loads appropriate callee-save-method.
97    ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
98
99    sub sp, sp, #96
100    .cfi_adjust_cfa_offset 96
101
102    // Ugly compile-time check, but we only have the preprocessor.
103#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 96)
104#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
105#endif
106
107    // GP callee-saves.
108    // x20 paired with ArtMethod* - see below.
109    stp x21, x22, [sp, #16]
110    .cfi_rel_offset x21, 16
111    .cfi_rel_offset x22, 24
112
113    stp x23, x24, [sp, #32]
114    .cfi_rel_offset x23, 32
115    .cfi_rel_offset x24, 40
116
117    stp x25, x26, [sp, #48]
118    .cfi_rel_offset x25, 48
119    .cfi_rel_offset x26, 56
120
121    stp x27, x28, [sp, #64]
122    .cfi_rel_offset x27, 64
123    .cfi_rel_offset x28, 72
124
125    stp x29, xLR, [sp, #80]
126    .cfi_rel_offset x29, 80
127    .cfi_rel_offset x30, 88
128
129    // Store ArtMethod* Runtime::callee_save_methods_[kRefsOnly].
130    stp xIP0, x20, [sp]
131    .cfi_rel_offset x20, 8
132
133    // Place sp in Thread::Current()->top_quick_frame.
134    mov xIP0, sp
135    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
136.endm
137
138// TODO: Probably no need to restore registers preserved by aapcs64.
139.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
140    // Callee-saves.
141    ldr x20, [sp, #8]
142    .cfi_restore x20
143
144    ldp x21, x22, [sp, #16]
145    .cfi_restore x21
146    .cfi_restore x22
147
148    ldp x23, x24, [sp, #32]
149    .cfi_restore x23
150    .cfi_restore x24
151
152    ldp x25, x26, [sp, #48]
153    .cfi_restore x25
154    .cfi_restore x26
155
156    ldp x27, x28, [sp, #64]
157    .cfi_restore x27
158    .cfi_restore x28
159
160    ldp x29, xLR, [sp, #80]
161    .cfi_restore x29
162    .cfi_restore x30
163
164    add sp, sp, #96
165    .cfi_adjust_cfa_offset -96
166.endm
167
168.macro POP_REFS_ONLY_CALLEE_SAVE_FRAME
169    add sp, sp, #96
170    .cfi_adjust_cfa_offset - 96
171.endm
172
173.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
174    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
175    ret
176.endm
177
178
179.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
180    sub sp, sp, #224
181    .cfi_adjust_cfa_offset 224
182
183    // Ugly compile-time check, but we only have the preprocessor.
184#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 224)
185#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
186#endif
187
188    // Stack alignment filler [sp, #8].
189    // FP args.
190    stp d0, d1, [sp, #16]
191    stp d2, d3, [sp, #32]
192    stp d4, d5, [sp, #48]
193    stp d6, d7, [sp, #64]
194
195    // Core args.
196    stp x1, x2, [sp, #80]
197    .cfi_rel_offset x1, 80
198    .cfi_rel_offset x2, 88
199
200    stp x3, x4, [sp, #96]
201    .cfi_rel_offset x3, 96
202    .cfi_rel_offset x4, 104
203
204    stp x5, x6, [sp, #112]
205    .cfi_rel_offset x5, 112
206    .cfi_rel_offset x6, 120
207
208    // x7, Callee-saves.
209    stp x7, x20, [sp, #128]
210    .cfi_rel_offset x7, 128
211    .cfi_rel_offset x20, 136
212
213    stp x21, x22, [sp, #144]
214    .cfi_rel_offset x21, 144
215    .cfi_rel_offset x22, 152
216
217    stp x23, x24, [sp, #160]
218    .cfi_rel_offset x23, 160
219    .cfi_rel_offset x24, 168
220
221    stp x25, x26, [sp, #176]
222    .cfi_rel_offset x25, 176
223    .cfi_rel_offset x26, 184
224
225    stp x27, x28, [sp, #192]
226    .cfi_rel_offset x27, 192
227    .cfi_rel_offset x28, 200
228
229    // x29(callee-save) and LR.
230    stp x29, xLR, [sp, #208]
231    .cfi_rel_offset x29, 208
232    .cfi_rel_offset x30, 216
233
234.endm
235
236    /*
237     * Macro that sets up the callee save frame to conform with
238     * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
239     *
240     * TODO This is probably too conservative - saving FP & LR.
241     */
242.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
243    adrp xIP0, :got:_ZN3art7Runtime9instance_E
244    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
245
246    // Our registers aren't intermixed - just spill in order.
247    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
248
249    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
250    ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
251
252    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
253
254    str xIP0, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
255    // Place sp in Thread::Current()->top_quick_frame.
256    mov xIP0, sp
257    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
258.endm
259
260.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
261    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
262    str x0, [sp, #0]  // Store ArtMethod* to bottom of stack.
263    // Place sp in Thread::Current()->top_quick_frame.
264    mov xIP0, sp
265    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
266.endm
267
268// TODO: Probably no need to restore registers preserved by aapcs64.
269.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
270    // FP args.
271    ldp d0, d1, [sp, #16]
272    ldp d2, d3, [sp, #32]
273    ldp d4, d5, [sp, #48]
274    ldp d6, d7, [sp, #64]
275
276    // Core args.
277    ldp x1, x2, [sp, #80]
278    .cfi_restore x1
279    .cfi_restore x2
280
281    ldp x3, x4, [sp, #96]
282    .cfi_restore x3
283    .cfi_restore x4
284
285    ldp x5, x6, [sp, #112]
286    .cfi_restore x5
287    .cfi_restore x6
288
289    // x7, Callee-saves.
290    ldp x7, x20, [sp, #128]
291    .cfi_restore x7
292    .cfi_restore x20
293
294    ldp x21, x22, [sp, #144]
295    .cfi_restore x21
296    .cfi_restore x22
297
298    ldp x23, x24, [sp, #160]
299    .cfi_restore x23
300    .cfi_restore x24
301
302    ldp x25, x26, [sp, #176]
303    .cfi_restore x25
304    .cfi_restore x26
305
306    ldp x27, x28, [sp, #192]
307    .cfi_restore x27
308    .cfi_restore x28
309
310    // x29(callee-save) and LR.
311    ldp x29, xLR, [sp, #208]
312    .cfi_restore x29
313    .cfi_restore x30
314
315    add sp, sp, #224
316    .cfi_adjust_cfa_offset -224
317.endm
318
319.macro RETURN_IF_RESULT_IS_ZERO
320    cbnz x0, 1f                // result non-zero branch over
321    ret                        // return
3221:
323.endm
324
325.macro RETURN_IF_RESULT_IS_NON_ZERO
326    cbz x0, 1f                 // result zero branch over
327    ret                        // return
3281:
329.endm
330
331    /*
332     * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
333     * exception is Thread::Current()->exception_
334     */
335.macro DELIVER_PENDING_EXCEPTION
336    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
337    mov x0, xSELF
338
339    // Point of no return.
340    b artDeliverPendingExceptionFromCode  // artDeliverPendingExceptionFromCode(Thread*)
341    brk 0  // Unreached
342.endm
343
344.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
345    ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET]   // Get exception field.
346    cbnz \reg, 1f
347    ret
3481:
349    DELIVER_PENDING_EXCEPTION
350.endm
351
352.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
353    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0
354.endm
355
356// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
357.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
358    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1
359.endm
360
361.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER
362    cbnz w0, 1f                // result non-zero branch over
363    ret                        // return
3641:
365    DELIVER_PENDING_EXCEPTION
366.endm
367
368.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
369    .extern \cxx_name
370ENTRY \c_name
371    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
372    mov x0, xSELF                     // pass Thread::Current
373    b   \cxx_name                     // \cxx_name(Thread*)
374END \c_name
375.endm
376
377.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
378    .extern \cxx_name
379ENTRY \c_name
380    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context.
381    mov x1, xSELF                     // pass Thread::Current.
382    b   \cxx_name                     // \cxx_name(arg, Thread*).
383    brk 0
384END \c_name
385.endm
386
387.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
388    .extern \cxx_name
389ENTRY \c_name
390    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
391    mov x2, xSELF                     // pass Thread::Current
392    b   \cxx_name                     // \cxx_name(arg1, arg2, Thread*)
393    brk 0
394END \c_name
395.endm
396
397    /*
398     * Called by managed code, saves callee saves and then calls artThrowException
399     * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
400     */
401ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
402
403    /*
404     * Called by managed code to create and deliver a NullPointerException.
405     */
406NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
407
408    /*
409     * Call installed by a signal handler to create and deliver a NullPointerException.
410     */
411ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal
412
413    /*
414     * Called by managed code to create and deliver an ArithmeticException.
415     */
416NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
417
418    /*
419     * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
420     * index, arg2 holds limit.
421     */
422TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
423
424    /*
425     * Called by managed code to create and deliver a StringIndexOutOfBoundsException
426     * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
427     */
428TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
429
430    /*
431     * Called by managed code to create and deliver a StackOverflowError.
432     */
433NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
434
435    /*
436     * Called by managed code to create and deliver a NoSuchMethodError.
437     */
438ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
439
440    /*
441     * All generated callsites for interface invokes and invocation slow paths will load arguments
442     * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain
443     * the method_idx.  This wrapper will save arg1-arg3, and call the appropriate C helper.
444     * NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1.
445     *
446     * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting
447     * of the target Method* in x0 and method->code_ in x1.
448     *
449     * If unsuccessful, the helper will return null/????. There will be a pending exception in the
450     * thread and we branch to another stub to deliver it.
451     *
452     * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
453     * pointing back to the original caller.
454     *
455     * Adapted from ARM32 code.
456     *
457     * Clobbers xIP0.
458     */
459.macro INVOKE_TRAMPOLINE_BODY cxx_name
460    .extern \cxx_name
461    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // save callee saves in case allocation triggers GC
462    // Helper signature is always
463    // (method_idx, *this_object, *caller_method, *self, sp)
464
465    mov    x2, xSELF                      // pass Thread::Current
466    mov    x3, sp
467    bl     \cxx_name                      // (method_idx, this, Thread*, SP)
468    mov    xIP0, x1                       // save Method*->code_
469    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
470    cbz    x0, 1f                         // did we find the target? if not go to exception delivery
471    br     xIP0                           // tail call to target
4721:
473    DELIVER_PENDING_EXCEPTION
474.endm
475.macro INVOKE_TRAMPOLINE c_name, cxx_name
476ENTRY \c_name
477    INVOKE_TRAMPOLINE_BODY \cxx_name
478END \c_name
479.endm
480
481INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
482
483INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
484INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
485INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
486INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
487
488
489.macro INVOKE_STUB_CREATE_FRAME
490
491SAVE_SIZE=15*8   // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
492SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
493
494
495    mov x9, sp                             // Save stack pointer.
496    .cfi_register sp,x9
497
498    add x10, x2, # SAVE_SIZE_AND_METHOD    // calculate size of frame.
499    sub x10, sp, x10                       // Calculate SP position - saves + ArtMethod* + args
500    and x10, x10, # ~0xf                   // Enforce 16 byte stack alignment.
501    mov sp, x10                            // Set new SP.
502
503    sub x10, x9, #SAVE_SIZE                // Calculate new FP (later). Done here as we must move SP
504    .cfi_def_cfa_register x10              // before this.
505    .cfi_adjust_cfa_offset SAVE_SIZE
506
507    str x28, [x10, #112]
508    .cfi_rel_offset x28, 112
509
510    stp x26, x27, [x10, #96]
511    .cfi_rel_offset x26, 96
512    .cfi_rel_offset x27, 104
513
514    stp x24, x25, [x10, #80]
515    .cfi_rel_offset x24, 80
516    .cfi_rel_offset x25, 88
517
518    stp x22, x23, [x10, #64]
519    .cfi_rel_offset x22, 64
520    .cfi_rel_offset x23, 72
521
522    stp x20, x21, [x10, #48]
523    .cfi_rel_offset x20, 48
524    .cfi_rel_offset x21, 56
525
526    stp x9, x19, [x10, #32]                // Save old stack pointer and x19.
527    .cfi_rel_offset sp, 32
528    .cfi_rel_offset x19, 40
529
530    stp x4, x5, [x10, #16]                 // Save result and shorty addresses.
531    .cfi_rel_offset x4, 16
532    .cfi_rel_offset x5, 24
533
534    stp xFP, xLR, [x10]                    // Store LR & FP.
535    .cfi_rel_offset x29, 0
536    .cfi_rel_offset x30, 8
537
538    mov xFP, x10                           // Use xFP now, as it's callee-saved.
539    .cfi_def_cfa_register x29
540    mov xSELF, x3                          // Move thread pointer into SELF register.
541
542    // Copy arguments into stack frame.
543    // Use simple copy routine for now.
544    // 4 bytes per slot.
545    // X1 - source address
546    // W2 - args length
547    // X9 - destination address.
548    // W10 - temporary
549    add x9, sp, #8                         // Destination address is bottom of stack + null.
550
551    // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler
552    // does not have unique-id variables.
5531:
554    cmp w2, #0
555    beq 2f
556    sub w2, w2, #4      // Need 65536 bytes of range.
557    ldr w10, [x1, x2]
558    str w10, [x9, x2]
559
560    b 1b
561
5622:
563    // Store null into ArtMethod* at bottom of frame.
564    str xzr, [sp]
565.endm
566
567.macro INVOKE_STUB_CALL_AND_RETURN
568
569    // load method-> METHOD_QUICK_CODE_OFFSET
570    ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
571    // Branch to method.
572    blr x9
573
574    // Restore return value address and shorty address.
575    ldp x4,x5, [xFP, #16]
576    .cfi_restore x4
577    .cfi_restore x5
578
579    ldr x28, [xFP, #112]
580    .cfi_restore x28
581
582    ldp x26, x27, [xFP, #96]
583    .cfi_restore x26
584    .cfi_restore x27
585
586    ldp x24, x25, [xFP, #80]
587    .cfi_restore x24
588    .cfi_restore x25
589
590    ldp x22, x23, [xFP, #64]
591    .cfi_restore x22
592    .cfi_restore x23
593
594    ldp x20, x21, [xFP, #48]
595    .cfi_restore x20
596    .cfi_restore x21
597
598    // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
599    ldrb w10, [x5]
600
601    // Check the return type and store the correct register into the jvalue in memory.
602    // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables.
603
604    // Don't set anything for a void type.
605    cmp w10, #'V'
606    beq 3f
607
608    // Is it a double?
609    cmp w10, #'D'
610    bne 1f
611    str d0, [x4]
612    b 3f
613
6141:  // Is it a float?
615    cmp w10, #'F'
616    bne 2f
617    str s0, [x4]
618    b 3f
619
6202:  // Just store x0. Doesn't matter if it is 64 or 32 bits.
621    str x0, [x4]
622
6233:  // Finish up.
624    ldp x2, x19, [xFP, #32]   // Restore stack pointer and x19.
625    .cfi_restore x19
626    mov sp, x2
627    .cfi_restore sp
628
629    ldp xFP, xLR, [xFP]    // Restore old frame pointer and link register.
630    .cfi_restore x29
631    .cfi_restore x30
632
633    ret
634
635.endm
636
637
638/*
639 *  extern"C" void art_quick_invoke_stub(ArtMethod *method,   x0
640 *                                       uint32_t  *args,     x1
641 *                                       uint32_t argsize,    w2
642 *                                       Thread *self,        x3
643 *                                       JValue *result,      x4
644 *                                       char   *shorty);     x5
645 *  +----------------------+
646 *  |                      |
647 *  |  C/C++ frame         |
648 *  |       LR''           |
649 *  |       FP''           | <- SP'
650 *  +----------------------+
651 *  +----------------------+
652 *  |        x28           | <- TODO: Remove callee-saves.
653 *  |         :            |
654 *  |        x19           |
655 *  |        SP'           |
656 *  |        X5            |
657 *  |        X4            |        Saved registers
658 *  |        LR'           |
659 *  |        FP'           | <- FP
660 *  +----------------------+
661 *  | uint32_t out[n-1]    |
662 *  |    :      :          |        Outs
663 *  | uint32_t out[0]      |
664 *  | ArtMethod*           | <- SP  value=null
665 *  +----------------------+
666 *
667 * Outgoing registers:
668 *  x0    - Method*
669 *  x1-x7 - integer parameters.
670 *  d0-d7 - Floating point parameters.
671 *  xSELF = self
672 *  SP = & of ArtMethod*
673 *  x1 = "this" pointer.
674 *
675 */
676ENTRY art_quick_invoke_stub
677    // Spill registers as per AACPS64 calling convention.
678    INVOKE_STUB_CREATE_FRAME
679
680    // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
681    // Parse the passed shorty to determine which register to load.
682    // Load addresses for routines that load WXSD registers.
683    adr  x11, .LstoreW2
684    adr  x12, .LstoreX2
685    adr  x13, .LstoreS0
686    adr  x14, .LstoreD0
687
688    // Initialize routine offsets to 0 for integers and floats.
689    // x8 for integers, x15 for floating point.
690    mov x8, #0
691    mov x15, #0
692
693    add x10, x5, #1         // Load shorty address, plus one to skip return value.
694    ldr w1, [x9],#4         // Load "this" parameter, and increment arg pointer.
695
696    // Loop to fill registers.
697.LfillRegisters:
698    ldrb w17, [x10], #1       // Load next character in signature, and increment.
699    cbz w17, .LcallFunction   // Exit at end of signature. Shorty 0 terminated.
700
701    cmp  w17, #'F' // is this a float?
702    bne .LisDouble
703
704    cmp x15, # 8*12         // Skip this load if all registers full.
705    beq .Ladvance4
706
707    add x17, x13, x15       // Calculate subroutine to jump to.
708    br  x17
709
710.LisDouble:
711    cmp w17, #'D'           // is this a double?
712    bne .LisLong
713
714    cmp x15, # 8*12         // Skip this load if all registers full.
715    beq .Ladvance8
716
717    add x17, x14, x15       // Calculate subroutine to jump to.
718    br x17
719
720.LisLong:
721    cmp w17, #'J'           // is this a long?
722    bne .LisOther
723
724    cmp x8, # 6*12          // Skip this load if all registers full.
725    beq .Ladvance8
726
727    add x17, x12, x8        // Calculate subroutine to jump to.
728    br x17
729
730.LisOther:                  // Everything else takes one vReg.
731    cmp x8, # 6*12          // Skip this load if all registers full.
732    beq .Ladvance4
733
734    add x17, x11, x8        // Calculate subroutine to jump to.
735    br x17
736
737.Ladvance4:
738    add x9, x9, #4
739    b .LfillRegisters
740
741.Ladvance8:
742    add x9, x9, #8
743    b .LfillRegisters
744
745// Macro for loading a parameter into a register.
746//  counter - the register with offset into these tables
747//  size - the size of the register - 4 or 8 bytes.
748//  register - the name of the register to be loaded.
749.macro LOADREG counter size register return
750    ldr \register , [x9], #\size
751    add \counter, \counter, 12
752    b \return
753.endm
754
755// Store ints.
756.LstoreW2:
757    LOADREG x8 4 w2 .LfillRegisters
758    LOADREG x8 4 w3 .LfillRegisters
759    LOADREG x8 4 w4 .LfillRegisters
760    LOADREG x8 4 w5 .LfillRegisters
761    LOADREG x8 4 w6 .LfillRegisters
762    LOADREG x8 4 w7 .LfillRegisters
763
764// Store longs.
765.LstoreX2:
766    LOADREG x8 8 x2 .LfillRegisters
767    LOADREG x8 8 x3 .LfillRegisters
768    LOADREG x8 8 x4 .LfillRegisters
769    LOADREG x8 8 x5 .LfillRegisters
770    LOADREG x8 8 x6 .LfillRegisters
771    LOADREG x8 8 x7 .LfillRegisters
772
773// Store singles.
774.LstoreS0:
775    LOADREG x15 4 s0 .LfillRegisters
776    LOADREG x15 4 s1 .LfillRegisters
777    LOADREG x15 4 s2 .LfillRegisters
778    LOADREG x15 4 s3 .LfillRegisters
779    LOADREG x15 4 s4 .LfillRegisters
780    LOADREG x15 4 s5 .LfillRegisters
781    LOADREG x15 4 s6 .LfillRegisters
782    LOADREG x15 4 s7 .LfillRegisters
783
784// Store doubles.
785.LstoreD0:
786    LOADREG x15 8 d0 .LfillRegisters
787    LOADREG x15 8 d1 .LfillRegisters
788    LOADREG x15 8 d2 .LfillRegisters
789    LOADREG x15 8 d3 .LfillRegisters
790    LOADREG x15 8 d4 .LfillRegisters
791    LOADREG x15 8 d5 .LfillRegisters
792    LOADREG x15 8 d6 .LfillRegisters
793    LOADREG x15 8 d7 .LfillRegisters
794
795
796.LcallFunction:
797
798    INVOKE_STUB_CALL_AND_RETURN
799
800END art_quick_invoke_stub
801
802/*  extern"C"
803 *     void art_quick_invoke_static_stub(ArtMethod *method,   x0
804 *                                       uint32_t  *args,     x1
805 *                                       uint32_t argsize,    w2
806 *                                       Thread *self,        x3
807 *                                       JValue *result,      x4
808 *                                       char   *shorty);     x5
809 */
810ENTRY art_quick_invoke_static_stub
811    // Spill registers as per AACPS64 calling convention.
812    INVOKE_STUB_CREATE_FRAME
813
814    // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
815    // Parse the passed shorty to determine which register to load.
816    // Load addresses for routines that load WXSD registers.
817    adr  x11, .LstoreW1_2
818    adr  x12, .LstoreX1_2
819    adr  x13, .LstoreS0_2
820    adr  x14, .LstoreD0_2
821
822    // Initialize routine offsets to 0 for integers and floats.
823    // x8 for integers, x15 for floating point.
824    mov x8, #0
825    mov x15, #0
826
827    add x10, x5, #1     // Load shorty address, plus one to skip return value.
828
829    // Loop to fill registers.
830.LfillRegisters2:
831    ldrb w17, [x10], #1         // Load next character in signature, and increment.
832    cbz w17, .LcallFunction2    // Exit at end of signature. Shorty 0 terminated.
833
834    cmp  w17, #'F'          // is this a float?
835    bne .LisDouble2
836
837    cmp x15, # 8*12         // Skip this load if all registers full.
838    beq .Ladvance4_2
839
840    add x17, x13, x15       // Calculate subroutine to jump to.
841    br  x17
842
843.LisDouble2:
844    cmp w17, #'D'           // is this a double?
845    bne .LisLong2
846
847    cmp x15, # 8*12         // Skip this load if all registers full.
848    beq .Ladvance8_2
849
850    add x17, x14, x15       // Calculate subroutine to jump to.
851    br x17
852
853.LisLong2:
854    cmp w17, #'J'           // is this a long?
855    bne .LisOther2
856
857    cmp x8, # 7*12          // Skip this load if all registers full.
858    beq .Ladvance8_2
859
860    add x17, x12, x8        // Calculate subroutine to jump to.
861    br x17
862
863.LisOther2:                 // Everything else takes one vReg.
864    cmp x8, # 7*12          // Skip this load if all registers full.
865    beq .Ladvance4_2
866
867    add x17, x11, x8        // Calculate subroutine to jump to.
868    br x17
869
870.Ladvance4_2:
871    add x9, x9, #4
872    b .LfillRegisters2
873
874.Ladvance8_2:
875    add x9, x9, #8
876    b .LfillRegisters2
877
878// Store ints.
879.LstoreW1_2:
880    LOADREG x8 4 w1 .LfillRegisters2
881    LOADREG x8 4 w2 .LfillRegisters2
882    LOADREG x8 4 w3 .LfillRegisters2
883    LOADREG x8 4 w4 .LfillRegisters2
884    LOADREG x8 4 w5 .LfillRegisters2
885    LOADREG x8 4 w6 .LfillRegisters2
886    LOADREG x8 4 w7 .LfillRegisters2
887
888// Store longs.
889.LstoreX1_2:
890    LOADREG x8 8 x1 .LfillRegisters2
891    LOADREG x8 8 x2 .LfillRegisters2
892    LOADREG x8 8 x3 .LfillRegisters2
893    LOADREG x8 8 x4 .LfillRegisters2
894    LOADREG x8 8 x5 .LfillRegisters2
895    LOADREG x8 8 x6 .LfillRegisters2
896    LOADREG x8 8 x7 .LfillRegisters2
897
898// Store singles.
899.LstoreS0_2:
900    LOADREG x15 4 s0 .LfillRegisters2
901    LOADREG x15 4 s1 .LfillRegisters2
902    LOADREG x15 4 s2 .LfillRegisters2
903    LOADREG x15 4 s3 .LfillRegisters2
904    LOADREG x15 4 s4 .LfillRegisters2
905    LOADREG x15 4 s5 .LfillRegisters2
906    LOADREG x15 4 s6 .LfillRegisters2
907    LOADREG x15 4 s7 .LfillRegisters2
908
909// Store doubles.
910.LstoreD0_2:
911    LOADREG x15 8 d0 .LfillRegisters2
912    LOADREG x15 8 d1 .LfillRegisters2
913    LOADREG x15 8 d2 .LfillRegisters2
914    LOADREG x15 8 d3 .LfillRegisters2
915    LOADREG x15 8 d4 .LfillRegisters2
916    LOADREG x15 8 d5 .LfillRegisters2
917    LOADREG x15 8 d6 .LfillRegisters2
918    LOADREG x15 8 d7 .LfillRegisters2
919
920
921.LcallFunction2:
922
923    INVOKE_STUB_CALL_AND_RETURN
924
925END art_quick_invoke_static_stub
926
927
928
929/*  extern"C" void art_quick_osr_stub(void** stack,                x0
930 *                                    size_t stack_size_in_bytes,  x1
931 *                                    const uin8_t* native_pc,     x2
932 *                                    JValue *result,              x3
933 *                                    char   *shorty,              x4
934 *                                    Thread *self)                x5
935 */
936ENTRY art_quick_osr_stub
937SAVE_SIZE=15*8   // x3, x4, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
938    mov x9, sp                             // Save stack pointer.
939    .cfi_register sp,x9
940
941    sub x10, sp, # SAVE_SIZE
942    and x10, x10, # ~0xf                   // Enforce 16 byte stack alignment.
943    mov sp, x10                            // Set new SP.
944
945    str x28, [sp, #112]
946    stp x26, x27, [sp, #96]
947    stp x24, x25, [sp, #80]
948    stp x22, x23, [sp, #64]
949    stp x20, x21, [sp, #48]
950    stp x9, x19, [sp, #32]                // Save old stack pointer and x19.
951    stp x3, x4, [sp, #16]                 // Save result and shorty addresses.
952    stp xFP, xLR, [sp]                    // Store LR & FP.
953    mov xSELF, x5                         // Move thread pointer into SELF register.
954
955    sub sp, sp, #16
956    str xzr, [sp]                         // Store null for ArtMethod* slot
957    // Branch to stub.
958    bl .Losr_entry
959    add sp, sp, #16
960
961    // Restore return value address and shorty address.
962    ldp x3,x4, [sp, #16]
963    ldr x28, [sp, #112]
964    ldp x26, x27, [sp, #96]
965    ldp x24, x25, [sp, #80]
966    ldp x22, x23, [sp, #64]
967    ldp x20, x21, [sp, #48]
968
969    // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
970    ldrb w10, [x4]
971
972    // Check the return type and store the correct register into the jvalue in memory.
973
974    // Don't set anything for a void type.
975    cmp w10, #'V'
976    beq .Losr_exit
977
978    // Is it a double?
979    cmp w10, #'D'
980    bne .Lno_double
981    str d0, [x3]
982    b .Losr_exit
983
984.Lno_double:  // Is it a float?
985    cmp w10, #'F'
986    bne .Lno_float
987    str s0, [x3]
988    b .Losr_exit
989
990.Lno_float:  // Just store x0. Doesn't matter if it is 64 or 32 bits.
991    str x0, [x3]
992
993.Losr_exit:  // Finish up.
994    ldp x2, x19, [sp, #32]   // Restore stack pointer and x19.
995    ldp xFP, xLR, [sp]    // Restore old frame pointer and link register.
996    mov sp, x2
997    ret
998
999.Losr_entry:
1000    // Update stack pointer for the callee
1001    sub sp, sp, x1
1002
1003    // Update link register slot expected by the callee.
1004    sub w1, w1, #8
1005    str lr, [sp, x1]
1006
1007    // Copy arguments into stack frame.
1008    // Use simple copy routine for now.
1009    // 4 bytes per slot.
1010    // X0 - source address
1011    // W1 - args length
1012    // SP - destination address.
1013    // W10 - temporary
1014.Losr_loop_entry:
1015    cmp w1, #0
1016    beq .Losr_loop_exit
1017    sub w1, w1, #4
1018    ldr w10, [x0, x1]
1019    str w10, [sp, x1]
1020    b .Losr_loop_entry
1021
1022.Losr_loop_exit:
1023    // Branch to the OSR entry point.
1024    br x2
1025
1026END art_quick_osr_stub
1027
1028    /*
1029     * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_
1030     */
1031
1032ENTRY art_quick_do_long_jump
1033    // Load FPRs
1034    ldp d0, d1, [x1], #16
1035    ldp d2, d3, [x1], #16
1036    ldp d4, d5, [x1], #16
1037    ldp d6, d7, [x1], #16
1038    ldp d8, d9, [x1], #16
1039    ldp d10, d11, [x1], #16
1040    ldp d12, d13, [x1], #16
1041    ldp d14, d15, [x1], #16
1042    ldp d16, d17, [x1], #16
1043    ldp d18, d19, [x1], #16
1044    ldp d20, d21, [x1], #16
1045    ldp d22, d23, [x1], #16
1046    ldp d24, d25, [x1], #16
1047    ldp d26, d27, [x1], #16
1048    ldp d28, d29, [x1], #16
1049    ldp d30, d31, [x1]
1050
1051    // Load GPRs
1052    // TODO: lots of those are smashed, could optimize.
1053    add x0, x0, #30*8
1054    ldp x30, x1, [x0], #-16          // LR & SP
1055    ldp x28, x29, [x0], #-16
1056    ldp x26, x27, [x0], #-16
1057    ldp x24, x25, [x0], #-16
1058    ldp x22, x23, [x0], #-16
1059    ldp x20, x21, [x0], #-16
1060    ldp x18, x19, [x0], #-16
1061    ldp x16, x17, [x0], #-16
1062    ldp x14, x15, [x0], #-16
1063    ldp x12, x13, [x0], #-16
1064    ldp x10, x11, [x0], #-16
1065    ldp x8, x9, [x0], #-16
1066    ldp x6, x7, [x0], #-16
1067    ldp x4, x5, [x0], #-16
1068    ldp x2, x3, [x0], #-16
1069    mov sp, x1
1070
1071    // Need to load PC, it's at the end (after the space for the unused XZR). Use x1.
1072    ldr x1, [x0, #33*8]
1073    // And the value of x0.
1074    ldr x0, [x0]
1075
1076    br  x1
1077END art_quick_do_long_jump
1078
1079    /*
1080     * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the
1081     * possibly null object to lock.
1082     *
1083     * Derived from arm32 code.
1084     */
1085    .extern artLockObjectFromCode
1086ENTRY art_quick_lock_object
1087    cbz    w0, .Lslow_lock
1088    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
1089.Lretry_lock:
1090    ldr    w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
1091    ldxr   w1, [x4]
1092    mov    x3, x1
1093    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1094    cbnz   w3, .Lnot_unlocked         // already thin locked
1095    // unlocked case - x1: original lock word that's zero except for the read barrier bits.
1096    orr    x2, x1, x2                 // x2 holds thread id with count of 0 with preserved read barrier bits
1097    stxr   w3, w2, [x4]
1098    cbnz   w3, .Llock_stxr_fail       // store failed, retry
1099    dmb    ishld                      // full (LoadLoad|LoadStore) memory barrier
1100    ret
1101.Lnot_unlocked:  // x1: original lock word
1102    lsr    w3, w1, LOCK_WORD_STATE_SHIFT
1103    cbnz   w3, .Lslow_lock            // if either of the top two bits are set, go slow path
1104    eor    w2, w1, w2                 // lock_word.ThreadId() ^ self->ThreadId()
1105    uxth   w2, w2                     // zero top 16 bits
1106    cbnz   w2, .Lslow_lock            // lock word and self thread id's match -> recursive lock
1107                                      // else contention, go to slow path
1108    mov    x3, x1                     // copy the lock word to check count overflow.
1109    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits.
1110    add    w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count in lock word placing in w2 to check overflow
1111    lsr    w3, w2, LOCK_WORD_READ_BARRIER_STATE_SHIFT  // if either of the upper two bits (28-29) are set, we overflowed.
1112    cbnz   w3, .Lslow_lock            // if we overflow the count go slow path
1113    add    w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count for real
1114    stxr   w3, w2, [x4]
1115    cbnz   w3, .Llock_stxr_fail       // store failed, retry
1116    ret
1117.Llock_stxr_fail:
1118    b      .Lretry_lock               // retry
1119.Lslow_lock:
1120    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case we block
1121    mov    x1, xSELF                  // pass Thread::Current
1122    bl     artLockObjectFromCode      // (Object* obj, Thread*)
1123    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1124    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1125END art_quick_lock_object
1126
1127ENTRY art_quick_lock_object_no_inline
1128    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case we block
1129    mov    x1, xSELF                  // pass Thread::Current
1130    bl     artLockObjectFromCode      // (Object* obj, Thread*)
1131    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1132    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1133END art_quick_lock_object_no_inline
1134
1135    /*
1136     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
1137     * x0 holds the possibly null object to lock.
1138     *
1139     * Derived from arm32 code.
1140     */
1141    .extern artUnlockObjectFromCode
1142ENTRY art_quick_unlock_object
1143    cbz    x0, .Lslow_unlock
1144    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
1145.Lretry_unlock:
1146#ifndef USE_READ_BARRIER
1147    ldr    w1, [x4]
1148#else
1149    ldxr   w1, [x4]                   // Need to use atomic instructions for read barrier
1150#endif
1151    lsr    w2, w1, LOCK_WORD_STATE_SHIFT
1152    cbnz   w2, .Lslow_unlock          // if either of the top two bits are set, go slow path
1153    ldr    w2, [xSELF, #THREAD_ID_OFFSET]
1154    mov    x3, x1                     // copy lock word to check thread id equality
1155    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1156    eor    w3, w3, w2                 // lock_word.ThreadId() ^ self->ThreadId()
1157    uxth   w3, w3                     // zero top 16 bits
1158    cbnz   w3, .Lslow_unlock          // do lock word and self thread id's match?
1159    mov    x3, x1                     // copy lock word to detect transition to unlocked
1160    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1161    cmp    w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
1162    bpl    .Lrecursive_thin_unlock
1163    // transition to unlocked
1164    mov    x3, x1
1165    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK  // w3: zero except for the preserved read barrier bits
1166    dmb    ish                        // full (LoadStore|StoreStore) memory barrier
1167#ifndef USE_READ_BARRIER
1168    str    w3, [x4]
1169#else
1170    stxr   w2, w3, [x4]               // Need to use atomic instructions for read barrier
1171    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
1172#endif
1173    ret
1174.Lrecursive_thin_unlock:  // w1: original lock word
1175    sub    w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // decrement count
1176#ifndef USE_READ_BARRIER
1177    str    w1, [x4]
1178#else
1179    stxr   w2, w1, [x4]               // Need to use atomic instructions for read barrier
1180    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
1181#endif
1182    ret
1183.Lunlock_stxr_fail:
1184    b      .Lretry_unlock               // retry
1185.Lslow_unlock:
1186    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case exception allocation triggers GC
1187    mov    x1, xSELF                  // pass Thread::Current
1188    bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
1189    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1190    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1191END art_quick_unlock_object
1192
1193ENTRY art_quick_unlock_object_no_inline
1194    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case exception allocation triggers GC
1195    mov    x1, xSELF                  // pass Thread::Current
1196    bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
1197    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1198    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1199END art_quick_unlock_object_no_inline
1200
1201    /*
1202     * Entry from managed code that calls artIsAssignableFromCode and on failure calls
1203     * artThrowClassCastException.
1204     */
1205    .extern artThrowClassCastException
1206ENTRY art_quick_check_cast
1207    // Store arguments and link register
1208    // Stack needs to be 16B aligned on calls.
1209    stp x0, x1, [sp,#-32]!
1210    .cfi_adjust_cfa_offset 32
1211    .cfi_rel_offset x0, 0
1212    .cfi_rel_offset x1, 8
1213    str xLR, [sp, #24]
1214    .cfi_rel_offset x30, 24
1215
1216    // Call runtime code
1217    bl artIsAssignableFromCode
1218
1219    // Check for exception
1220    cbz x0, .Lthrow_class_cast_exception
1221
1222    // Restore and return
1223    ldr xLR, [sp, #24]
1224    .cfi_restore x30
1225    ldp x0, x1, [sp], #32
1226    .cfi_restore x0
1227    .cfi_restore x1
1228    .cfi_adjust_cfa_offset -32
1229    ret
1230
1231    .cfi_adjust_cfa_offset 32         // Reset unwind info so following code unwinds.
1232
1233.Lthrow_class_cast_exception:
1234    // Restore
1235    ldr xLR, [sp, #24]
1236    .cfi_restore x30
1237    ldp x0, x1, [sp], #32
1238    .cfi_restore x0
1239    .cfi_restore x1
1240    .cfi_adjust_cfa_offset -32
1241
1242    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
1243    mov x2, xSELF                     // pass Thread::Current
1244    b artThrowClassCastException      // (Class*, Class*, Thread*)
1245    brk 0                             // We should not return here...
1246END art_quick_check_cast
1247
1248// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
1249.macro POP_REG_NE xReg, offset, xExclude
1250    .ifnc \xReg, \xExclude
1251        ldr \xReg, [sp, #\offset]     // restore xReg
1252        .cfi_restore \xReg
1253    .endif
1254.endm
1255
1256// Restore xReg1's value from [sp, #offset] if xReg1 is not the same as xExclude.
1257// Restore xReg2's value from [sp, #(offset + 8)] if xReg2 is not the same as xExclude.
1258.macro POP_REGS_NE xReg1, xReg2, offset, xExclude
1259    .ifc \xReg1, \xExclude
1260        ldr \xReg2, [sp, #(\offset + 8)]        // restore xReg2
1261    .else
1262        .ifc \xReg2, \xExclude
1263            ldr \xReg1, [sp, #\offset]          // restore xReg1
1264        .else
1265            ldp \xReg1, \xReg2, [sp, #\offset]  // restore xReg1 and xReg2
1266        .endif
1267    .endif
1268    .cfi_restore \xReg1
1269    .cfi_restore \xReg2
1270.endm
1271
1272    /*
1273     * Macro to insert read barrier, only used in art_quick_aput_obj.
1274     * xDest, wDest and xObj are registers, offset is a defined literal such as
1275     * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle
1276     * name mismatch between instructions. This macro uses the lower 32b of register when possible.
1277     * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
1278     */
1279.macro READ_BARRIER xDest, wDest, xObj, offset
1280#ifdef USE_READ_BARRIER
1281    // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
1282    stp x0, x1, [sp, #-48]!
1283    .cfi_adjust_cfa_offset 48
1284    .cfi_rel_offset x0, 0
1285    .cfi_rel_offset x1, 8
1286    stp x2, x3, [sp, #16]
1287    .cfi_rel_offset x2, 16
1288    .cfi_rel_offset x3, 24
1289    stp x4, xLR, [sp, #32]
1290    .cfi_rel_offset x4, 32
1291    .cfi_rel_offset x30, 40
1292
1293    // mov x0, \xRef                // pass ref in x0 (no-op for now since parameter ref is unused)
1294    .ifnc \xObj, x1
1295        mov x1, \xObj               // pass xObj
1296    .endif
1297    mov w2, #\offset                // pass offset
1298    bl artReadBarrierSlow           // artReadBarrierSlow(ref, xObj, offset)
1299    // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning.
1300    .ifnc \wDest, w0
1301        mov \wDest, w0              // save return value in wDest
1302    .endif
1303
1304    // Conditionally restore saved registers
1305    POP_REG_NE x0, 0, \xDest
1306    POP_REG_NE x1, 8, \xDest
1307    POP_REG_NE x2, 16, \xDest
1308    POP_REG_NE x3, 24, \xDest
1309    POP_REG_NE x4, 32, \xDest
1310    ldr xLR, [sp, #40]
1311    .cfi_restore x30
1312    add sp, sp, #48
1313    .cfi_adjust_cfa_offset -48
1314#else
1315    ldr \wDest, [\xObj, #\offset]   // Heap reference = 32b. This also zero-extends to \xDest.
1316    UNPOISON_HEAP_REF \wDest
1317#endif  // USE_READ_BARRIER
1318.endm
1319
1320    /*
1321     * Entry from managed code for array put operations of objects where the value being stored
1322     * needs to be checked for compatibility.
1323     * x0 = array, x1 = index, x2 = value
1324     *
1325     * Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We
1326     * assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by
1327     * using index-zero-extension in load/stores.
1328     *
1329     * Temporaries: x3, x4
1330     * TODO: x4 OK? ip seems wrong here.
1331     */
1332ENTRY art_quick_aput_obj_with_null_and_bound_check
1333    tst x0, x0
1334    bne art_quick_aput_obj_with_bound_check
1335    b art_quick_throw_null_pointer_exception
1336END art_quick_aput_obj_with_null_and_bound_check
1337
1338ENTRY art_quick_aput_obj_with_bound_check
1339    ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]
1340    cmp w3, w1
1341    bhi art_quick_aput_obj
1342    mov x0, x1
1343    mov x1, x3
1344    b art_quick_throw_array_bounds
1345END art_quick_aput_obj_with_bound_check
1346
1347#ifdef USE_READ_BARRIER
1348    .extern artReadBarrierSlow
1349#endif
1350ENTRY art_quick_aput_obj
1351    cbz x2, .Ldo_aput_null
1352    READ_BARRIER x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET     // Heap reference = 32b
1353                                                         // This also zero-extends to x3
1354    READ_BARRIER x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET     // Heap reference = 32b
1355                                                         // This also zero-extends to x4
1356    READ_BARRIER x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET // Heap reference = 32b
1357                                                         // This also zero-extends to x3
1358    cmp w3, w4  // value's type == array's component type - trivial assignability
1359    bne .Lcheck_assignability
1360.Ldo_aput:
1361    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1362                                                         // "Compress" = do nothing
1363    POISON_HEAP_REF w2
1364    str w2, [x3, x1, lsl #2]                             // Heap reference = 32b
1365    ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
1366    lsr x0, x0, #7
1367    strb w3, [x3, x0]
1368    ret
1369.Ldo_aput_null:
1370    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1371                                                         // "Compress" = do nothing
1372    str w2, [x3, x1, lsl #2]                             // Heap reference = 32b
1373    ret
1374.Lcheck_assignability:
1375    // Store arguments and link register
1376    stp x0, x1, [sp,#-32]!
1377    .cfi_adjust_cfa_offset 32
1378    .cfi_rel_offset x0, 0
1379    .cfi_rel_offset x1, 8
1380    stp x2, xLR, [sp, #16]
1381    .cfi_rel_offset x2, 16
1382    .cfi_rel_offset x30, 24
1383
1384    // Call runtime code
1385    mov x0, x3              // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
1386    mov x1, x4              // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
1387    bl artIsAssignableFromCode
1388
1389    // Check for exception
1390    cbz x0, .Lthrow_array_store_exception
1391
1392    // Restore
1393    ldp x2, x30, [sp, #16]
1394    .cfi_restore x2
1395    .cfi_restore x30
1396    ldp x0, x1, [sp], #32
1397    .cfi_restore x0
1398    .cfi_restore x1
1399    .cfi_adjust_cfa_offset -32
1400
1401    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1402                                                          // "Compress" = do nothing
1403    POISON_HEAP_REF w2
1404    str w2, [x3, x1, lsl #2]                              // Heap reference = 32b
1405    ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
1406    lsr x0, x0, #7
1407    strb w3, [x3, x0]
1408    ret
1409    .cfi_adjust_cfa_offset 32  // 4 restores after cbz for unwinding.
1410.Lthrow_array_store_exception:
1411    ldp x2, x30, [sp, #16]
1412    .cfi_restore x2
1413    .cfi_restore x30
1414    ldp x0, x1, [sp], #32
1415    .cfi_restore x0
1416    .cfi_restore x1
1417    .cfi_adjust_cfa_offset -32
1418
1419    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
1420    mov x1, x2                    // Pass value.
1421    mov x2, xSELF                 // Pass Thread::Current.
1422    b artThrowArrayStoreException // (Object*, Object*, Thread*).
1423    brk 0                         // Unreached.
1424END art_quick_aput_obj
1425
1426// Macro to facilitate adding new allocation entrypoints.
1427.macro ONE_ARG_DOWNCALL name, entrypoint, return
1428    .extern \entrypoint
1429ENTRY \name
1430    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1431    mov    x1, xSELF                  // pass Thread::Current
1432    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*)
1433    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1434    \return
1435END \name
1436.endm
1437
1438// Macro to facilitate adding new allocation entrypoints.
1439.macro TWO_ARG_DOWNCALL name, entrypoint, return
1440    .extern \entrypoint
1441ENTRY \name
1442    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1443    mov    x2, xSELF                  // pass Thread::Current
1444    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*)
1445    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1446    \return
1447END \name
1448.endm
1449
1450// Macro to facilitate adding new allocation entrypoints.
1451.macro THREE_ARG_DOWNCALL name, entrypoint, return
1452    .extern \entrypoint
1453ENTRY \name
1454    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1455    mov    x3, xSELF                  // pass Thread::Current
1456    bl     \entrypoint
1457    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1458    \return
1459END \name
1460.endm
1461
1462// Macro to facilitate adding new allocation entrypoints.
1463.macro FOUR_ARG_DOWNCALL name, entrypoint, return
1464    .extern \entrypoint
1465ENTRY \name
1466    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1467    mov    x4, xSELF                  // pass Thread::Current
1468    bl     \entrypoint                //
1469    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1470    \return
1471    DELIVER_PENDING_EXCEPTION
1472END \name
1473.endm
1474
1475// Macros taking opportunity of code similarities for downcalls with referrer.
1476.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
1477    .extern \entrypoint
1478ENTRY \name
1479    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1480    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1481    mov    x2, xSELF                  // pass Thread::Current
1482    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*, SP)
1483    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1484    \return
1485END \name
1486.endm
1487
1488.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
1489    .extern \entrypoint
1490ENTRY \name
1491    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1492    ldr    x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1493    mov    x3, xSELF                  // pass Thread::Current
1494    bl     \entrypoint
1495    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1496    \return
1497END \name
1498.endm
1499
1500.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
1501    .extern \entrypoint
1502ENTRY \name
1503    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1504    ldr    x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1505    mov    x4, xSELF                  // pass Thread::Current
1506    bl     \entrypoint
1507    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1508    \return
1509END \name
1510.endm
1511
1512.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1513    cbz w0, 1f                 // result zero branch over
1514    ret                        // return
15151:
1516    DELIVER_PENDING_EXCEPTION
1517.endm
1518
1519    /*
1520     * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
1521     * failure.
1522     */
1523TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1524
1525    /*
1526     * Entry from managed code when uninitialized static storage, this stub will run the class
1527     * initializer and deliver the exception on error. On success the static storage base is
1528     * returned.
1529     */
1530ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1531
1532ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1533ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1534
1535ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1536ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1537ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1538ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1539ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1540ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1541ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1542
1543TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1544TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1545TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1546TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1547TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1548TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1549TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1550
1551TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1552TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1553TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1554TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1555
1556THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1557THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1558THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1559THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1560THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1561
1562// This is separated out as the argument order is different.
1563    .extern artSet64StaticFromCode
1564ENTRY art_quick_set64_static
1565    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1566    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1567                                      // x2 contains the parameter
1568    mov    x3, xSELF                  // pass Thread::Current
1569    bl     artSet64StaticFromCode
1570    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1571    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1572END art_quick_set64_static
1573
1574    /*
1575     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
1576     * exception on error. On success the String is returned. w0 holds the string index. The fast
1577     * path check for hit in strings cache has already been performed.
1578     */
1579ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1580
1581// Generate the allocation entrypoints for each allocator.
1582GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
1583
1584// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
1585ENTRY art_quick_alloc_object_rosalloc
1586    // Fast path rosalloc allocation.
1587    // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
1588    // x2-x7: free.
1589    ldr    x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
1590                                                              // Load the class (x2)
1591    ldr    w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
1592    cbz    x2, .Lart_quick_alloc_object_rosalloc_slow_path    // Check null class
1593                                                              // Check class status.
1594    ldr    w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
1595    cmp    x3, #MIRROR_CLASS_STATUS_INITIALIZED
1596    bne    .Lart_quick_alloc_object_rosalloc_slow_path
1597                                                              // Add a fake dependence from the
1598                                                              // following access flag and size
1599                                                              // loads to the status load.
1600                                                              // This is to prevent those loads
1601                                                              // from being reordered above the
1602                                                              // status load and reading wrong
1603                                                              // values (an alternative is to use
1604                                                              // a load-acquire for the status).
1605    eor    x3, x3, x3
1606    add    x2, x2, x3
1607                                                              // Check access flags has
1608                                                              // kAccClassIsFinalizable
1609    ldr    w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
1610    tst    x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
1611    bne    .Lart_quick_alloc_object_rosalloc_slow_path
1612    ldr    x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]  // Check if the thread local
1613                                                              // allocation stack has room.
1614                                                              // ldp won't work due to large offset.
1615    ldr    x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
1616    cmp    x3, x4
1617    bhs    .Lart_quick_alloc_object_rosalloc_slow_path
1618    ldr    w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET]         // Load the object size (x3)
1619    cmp    x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE        // Check if the size is for a thread
1620                                                              // local allocation
1621    bhs    .Lart_quick_alloc_object_rosalloc_slow_path
1622                                                              // Compute the rosalloc bracket index
1623                                                              // from the size.
1624                                                              // Align up the size by the rosalloc
1625                                                              // bracket quantum size and divide
1626                                                              // by the quantum size and subtract
1627                                                              // by 1. This code is a shorter but
1628                                                              // equivalent version.
1629    sub    x3, x3, #1
1630    lsr    x3, x3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
1631                                                              // Load the rosalloc run (x4)
1632    add    x4, xSELF, x3, lsl #POINTER_SIZE_SHIFT
1633    ldr    x4, [x4, #THREAD_ROSALLOC_RUNS_OFFSET]
1634                                                              // Load the free list head (x3). This
1635                                                              // will be the return val.
1636    ldr    x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
1637    cbz    x3, .Lart_quick_alloc_object_rosalloc_slow_path
1638    // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
1639    ldr    x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET]               // Load the next pointer of the head
1640                                                              // and update the list head with the
1641                                                              // next pointer.
1642    str    x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
1643                                                              // Store the class pointer in the
1644                                                              // header. This also overwrites the
1645                                                              // next pointer. The offsets are
1646                                                              // asserted to match.
1647#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
1648#error "Class pointer needs to overwrite next pointer."
1649#endif
1650    POISON_HEAP_REF w2
1651    str    w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
1652                                                              // Fence. This is "ish" not "ishst" so
1653                                                              // that it also ensures ordering of
1654                                                              // the class status load with respect
1655                                                              // to later accesses to the class
1656                                                              // object. Alternatively we could use
1657                                                              // "ishst" if we use load-acquire for
1658                                                              // the class status load.)
1659                                                              // Needs to be done before pushing on
1660                                                              // allocation since Heap::VisitObjects
1661                                                              // relies on seeing the class pointer.
1662                                                              // b/28790624
1663    dmb    ish
1664                                                              // Push the new object onto the thread
1665                                                              // local allocation stack and
1666                                                              // increment the thread local
1667                                                              // allocation stack top.
1668    ldr    x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
1669    str    w3, [x1], #COMPRESSED_REFERENCE_SIZE               // (Increment x1 as a side effect.)
1670    str    x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
1671                                                              // Decrement the size of the free list
1672    ldr    w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
1673    sub    x1, x1, #1
1674                                                              // TODO: consider combining this store
1675                                                              // and the list head store above using
1676                                                              // strd.
1677    str    w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
1678
1679    mov    x0, x3                                             // Set the return value and return.
1680    ret
1681.Lart_quick_alloc_object_rosalloc_slow_path:
1682    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME      // save callee saves in case of GC
1683    mov    x2, xSELF                       // pass Thread::Current
1684    bl     artAllocObjectFromCodeRosAlloc  // (uint32_t type_idx, Method* method, Thread*)
1685    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1686    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1687END art_quick_alloc_object_rosalloc
1688
1689// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
1690//
1691// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
1692// x3-x7: free.
1693// Need to preserve x0 and x1 to the slow path.
1694.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
1695    cbz    x2, \slowPathLabel                                 // Check null class
1696                                                              // Check class status.
1697    ldr    w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
1698    cmp    x3, #MIRROR_CLASS_STATUS_INITIALIZED
1699    bne    \slowPathLabel
1700                                                              // Add a fake dependence from the
1701                                                              // following access flag and size
1702                                                              // loads to the status load.
1703                                                              // This is to prevent those loads
1704                                                              // from being reordered above the
1705                                                              // status load and reading wrong
1706                                                              // values (an alternative is to use
1707                                                              // a load-acquire for the status).
1708    eor    x3, x3, x3
1709    add    x2, x2, x3
1710                                                              // Check access flags has
1711                                                              // kAccClassIsFinalizable.
1712    ldr    w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
1713    tbnz   x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT, \slowPathLabel
1714                                                              // Load thread_local_pos (x4) and
1715                                                              // thread_local_end (x5).
1716    ldr    x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
1717    ldr    x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
1718    sub    x6, x5, x4                                         // Compute the remaining buf size.
1719    ldr    w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET]         // Load the object size (x7).
1720    cmp    x7, x6                                             // Check if it fits. OK to do this
1721                                                              // before rounding up the object size
1722                                                              // assuming the buf size alignment.
1723    bhi    \slowPathLabel
1724    // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
1725                                                              // Round up the object size by the
1726                                                              // object alignment. (addr + 7) & ~7.
1727    add    x7, x7, #OBJECT_ALIGNMENT_MASK
1728    and    x7, x7, #OBJECT_ALIGNMENT_MASK_TOGGLED
1729                                                              // Move old thread_local_pos to x0
1730                                                              // for the return value.
1731    mov    x0, x4
1732    add    x5, x0, x7
1733    str    x5, [xSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
1734    ldr    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
1735    add    x5, x5, #1
1736    str    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
1737    POISON_HEAP_REF w2
1738    str    w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
1739                                                              // Fence. This is "ish" not "ishst" so
1740                                                              // that the code after this allocation
1741                                                              // site will see the right values in
1742                                                              // the fields of the class.
1743                                                              // Alternatively we could use "ishst"
1744                                                              // if we use load-acquire for the
1745                                                              // class status load.)
1746    dmb    ish
1747    ret
1748.endm
1749
1750// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
1751ENTRY art_quick_alloc_object_tlab
1752    // Fast path tlab allocation.
1753    // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
1754    // x2-x7: free.
1755#if defined(USE_READ_BARRIER)
1756    mvn    x0, xzr                                            // Read barrier not supported here.
1757    ret                                                       // Return -1.
1758#endif
1759    ldr    x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
1760                                                              // Load the class (x2)
1761    ldr    w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
1762    ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
1763.Lart_quick_alloc_object_tlab_slow_path:
1764    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    // Save callee saves in case of GC.
1765    mov    x2, xSELF                     // Pass Thread::Current.
1766    bl     artAllocObjectFromCodeTLAB    // (uint32_t type_idx, Method* method, Thread*)
1767    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1768    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1769END art_quick_alloc_object_tlab
1770
1771// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
1772ENTRY art_quick_alloc_object_region_tlab
1773    // Fast path region tlab allocation.
1774    // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
1775    // x2-x7: free.
1776#if !defined(USE_READ_BARRIER)
1777    mvn    x0, xzr                                            // Read barrier must be enabled here.
1778    ret                                                       // Return -1.
1779#endif
1780    ldr    x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
1781                                                              // Load the class (x2)
1782    ldr    w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
1783                                                              // Read barrier for class load.
1784    ldr    w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
1785    cbnz   x3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
1786.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
1787    ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
1788.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
1789                                                              // The read barrier slow path. Mark
1790                                                              // the class.
1791    stp    x0, x1, [sp, #-32]!                                // Save registers (x0, x1, lr).
1792    str    xLR, [sp, #16]                                     // Align sp by 16 bytes.
1793    mov    x0, x2                                             // Pass the class as the first param.
1794    bl     artReadBarrierMark
1795    mov    x2, x0                                             // Get the (marked) class back.
1796    ldp    x0, x1, [sp, #0]                                   // Restore registers.
1797    ldr    xLR, [sp, #16]
1798    add    sp, sp, #32
1799    b      .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
1800.Lart_quick_alloc_object_region_tlab_slow_path:
1801    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // Save callee saves in case of GC.
1802    mov    x2, xSELF                           // Pass Thread::Current.
1803    bl     artAllocObjectFromCodeRegionTLAB    // (uint32_t type_idx, Method* method, Thread*)
1804    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1805    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1806END art_quick_alloc_object_region_tlab
1807
1808    /*
1809     * Called by managed code when the thread has been asked to suspend.
1810     */
1811    .extern artTestSuspendFromCode
1812ENTRY art_quick_test_suspend
1813    ldrh   w0, [xSELF, #THREAD_FLAGS_OFFSET]  // get xSELF->state_and_flags.as_struct.flags
1814    cbnz   w0, .Lneed_suspend                 // check flags == 0
1815    ret                                       // return if flags == 0
1816.Lneed_suspend:
1817    mov    x0, xSELF
1818    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // save callee saves for stack crawl
1819    bl     artTestSuspendFromCode             // (Thread*)
1820    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
1821END art_quick_test_suspend
1822
1823ENTRY art_quick_implicit_suspend
1824    mov    x0, xSELF
1825    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // save callee saves for stack crawl
1826    bl     artTestSuspendFromCode             // (Thread*)
1827    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
1828END art_quick_implicit_suspend
1829
1830     /*
1831     * Called by managed code that is attempting to call a method on a proxy class. On entry
1832     * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
1833     * method agrees with a ref and args callee save frame.
1834     */
1835     .extern artQuickProxyInvokeHandler
1836ENTRY art_quick_proxy_invoke_handler
1837    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
1838    mov     x2, xSELF                   // pass Thread::Current
1839    mov     x3, sp                      // pass SP
1840    bl      artQuickProxyInvokeHandler  // (Method* proxy method, receiver, Thread*, SP)
1841    ldr     x2, [xSELF, THREAD_EXCEPTION_OFFSET]
1842    cbnz    x2, .Lexception_in_proxy    // success if no exception is pending
1843    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
1844    fmov    d0, x0                      // Store result in d0 in case it was float or double
1845    ret                                 // return on success
1846.Lexception_in_proxy:
1847    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1848    DELIVER_PENDING_EXCEPTION
1849END art_quick_proxy_invoke_handler
1850
1851    /*
1852     * Called to resolve an imt conflict.
1853     * x0 is the conflict ArtMethod.
1854     * xIP1 is a hidden argument that holds the target interface method's dex method index.
1855     *
1856     * Note that this stub writes to xIP0, xIP1, and x0.
1857     */
1858    .extern artInvokeInterfaceTrampoline
1859ENTRY art_quick_imt_conflict_trampoline
1860    ldr xIP0, [sp, #0]  // Load referrer
1861    ldr xIP0, [xIP0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_64]   // Load dex cache methods array
1862    ldr xIP0, [xIP0, xIP1, lsl #POINTER_SIZE_SHIFT]  // Load interface method
1863    ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64]  // Load ImtConflictTable
1864    ldr x0, [xIP1]  // Load first entry in ImtConflictTable.
1865.Limt_table_iterate:
1866    cmp x0, xIP0
1867    // Branch if found. Benchmarks have shown doing a branch here is better.
1868    beq .Limt_table_found
1869    // If the entry is null, the interface method is not in the ImtConflictTable.
1870    cbz x0, .Lconflict_trampoline
1871    // Iterate over the entries of the ImtConflictTable.
1872    ldr x0, [xIP1, #(2 * __SIZEOF_POINTER__)]!
1873    b .Limt_table_iterate
1874.Limt_table_found:
1875    // We successfully hit an entry in the table. Load the target method
1876    // and jump to it.
1877    ldr x0, [xIP1, #__SIZEOF_POINTER__]
1878    ldr xIP0, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
1879    br xIP0
1880.Lconflict_trampoline:
1881    // Call the runtime stub to populate the ImtConflictTable and jump to the
1882    // resolved method.
1883    INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
1884END art_quick_imt_conflict_trampoline
1885
1886ENTRY art_quick_resolution_trampoline
1887    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1888    mov x2, xSELF
1889    mov x3, sp
1890    bl artQuickResolutionTrampoline  // (called, receiver, Thread*, SP)
1891    cbz x0, 1f
1892    mov xIP0, x0            // Remember returned code pointer in xIP0.
1893    ldr x0, [sp, #0]        // artQuickResolutionTrampoline puts called method in *SP.
1894    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1895    br xIP0
18961:
1897    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1898    DELIVER_PENDING_EXCEPTION
1899END art_quick_resolution_trampoline
1900
1901/*
1902 * Generic JNI frame layout:
1903 *
1904 * #-------------------#
1905 * |                   |
1906 * | caller method...  |
1907 * #-------------------#    <--- SP on entry
1908 * | Return X30/LR     |
1909 * | X29/FP            |    callee save
1910 * | X28               |    callee save
1911 * | X27               |    callee save
1912 * | X26               |    callee save
1913 * | X25               |    callee save
1914 * | X24               |    callee save
1915 * | X23               |    callee save
1916 * | X22               |    callee save
1917 * | X21               |    callee save
1918 * | X20               |    callee save
1919 * | X19               |    callee save
1920 * | X7                |    arg7
1921 * | X6                |    arg6
1922 * | X5                |    arg5
1923 * | X4                |    arg4
1924 * | X3                |    arg3
1925 * | X2                |    arg2
1926 * | X1                |    arg1
1927 * | D7                |    float arg 8
1928 * | D6                |    float arg 7
1929 * | D5                |    float arg 6
1930 * | D4                |    float arg 5
1931 * | D3                |    float arg 4
1932 * | D2                |    float arg 3
1933 * | D1                |    float arg 2
1934 * | D0                |    float arg 1
1935 * | Method*           | <- X0
1936 * #-------------------#
1937 * | local ref cookie  | // 4B
1938 * | handle scope size | // 4B
1939 * #-------------------#
1940 * | JNI Call Stack    |
1941 * #-------------------#    <--- SP on native call
1942 * |                   |
1943 * | Stack for Regs    |    The trampoline assembly will pop these values
1944 * |                   |    into registers for native call
1945 * #-------------------#
1946 * | Native code ptr   |
1947 * #-------------------#
1948 * | Free scratch      |
1949 * #-------------------#
1950 * | Ptr to (1)        |    <--- SP
1951 * #-------------------#
1952 */
1953    /*
1954     * Called to do a generic JNI down-call
1955     */
1956ENTRY art_quick_generic_jni_trampoline
1957    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
1958
1959    // Save SP , so we can have static CFI info.
1960    mov x28, sp
1961    .cfi_def_cfa_register x28
1962
1963    // This looks the same, but is different: this will be updated to point to the bottom
1964    // of the frame when the handle scope is inserted.
1965    mov xFP, sp
1966
1967    mov xIP0, #5120
1968    sub sp, sp, xIP0
1969
1970    // prepare for artQuickGenericJniTrampoline call
1971    // (Thread*,  SP)
1972    //    x0      x1   <= C calling convention
1973    //   xSELF    xFP  <= where they are
1974
1975    mov x0, xSELF   // Thread*
1976    mov x1, xFP
1977    bl artQuickGenericJniTrampoline  // (Thread*, sp)
1978
1979    // The C call will have registered the complete save-frame on success.
1980    // The result of the call is:
1981    // x0: pointer to native code, 0 on error.
1982    // x1: pointer to the bottom of the used area of the alloca, can restore stack till there.
1983
1984    // Check for error = 0.
1985    cbz x0, .Lexception_in_native
1986
1987    // Release part of the alloca.
1988    mov sp, x1
1989
1990    // Save the code pointer
1991    mov xIP0, x0
1992
1993    // Load parameters from frame into registers.
1994    // TODO Check with artQuickGenericJniTrampoline.
1995    //      Also, check again APPCS64 - the stack arguments are interleaved.
1996    ldp x0, x1, [sp]
1997    ldp x2, x3, [sp, #16]
1998    ldp x4, x5, [sp, #32]
1999    ldp x6, x7, [sp, #48]
2000
2001    ldp d0, d1, [sp, #64]
2002    ldp d2, d3, [sp, #80]
2003    ldp d4, d5, [sp, #96]
2004    ldp d6, d7, [sp, #112]
2005
2006    add sp, sp, #128
2007
2008    blr xIP0        // native call.
2009
2010    // result sign extension is handled in C code
2011    // prepare for artQuickGenericJniEndTrampoline call
2012    // (Thread*, result, result_f)
2013    //    x0       x1       x2        <= C calling convention
2014    mov x1, x0      // Result (from saved).
2015    mov x0, xSELF   // Thread register.
2016    fmov x2, d0     // d0 will contain floating point result, but needs to go into x2
2017
2018    bl artQuickGenericJniEndTrampoline
2019
2020    // Pending exceptions possible.
2021    ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
2022    cbnz x2, .Lexception_in_native
2023
2024    // Tear down the alloca.
2025    mov sp, x28
2026    .cfi_def_cfa_register sp
2027
2028    // Tear down the callee-save frame.
2029    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
2030
2031    // store into fpr, for when it's a fpr return...
2032    fmov d0, x0
2033    ret
2034
2035.Lexception_in_native:
2036    // Move to x1 then sp to please assembler.
2037    ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
2038    mov sp, x1
2039    .cfi_def_cfa_register sp
2040    # This will create a new save-all frame, required by the runtime.
2041    DELIVER_PENDING_EXCEPTION
2042END art_quick_generic_jni_trampoline
2043
2044/*
2045 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those
2046 * of a quick call:
2047 * x0 = method being called/to bridge to.
2048 * x1..x7, d0..d7 = arguments to that method.
2049 */
2050ENTRY art_quick_to_interpreter_bridge
2051    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME   // Set up frame and save arguments.
2052
2053    //  x0 will contain mirror::ArtMethod* method.
2054    mov x1, xSELF                          // How to get Thread::Current() ???
2055    mov x2, sp
2056
2057    // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
2058    //                                      mirror::ArtMethod** sp)
2059    bl   artQuickToInterpreterBridge
2060
2061    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // TODO: no need to restore arguments in this case.
2062
2063    fmov d0, x0
2064
2065    RETURN_OR_DELIVER_PENDING_EXCEPTION
2066END art_quick_to_interpreter_bridge
2067
2068
2069//
2070// Instrumentation-related stubs
2071//
2072    .extern artInstrumentationMethodEntryFromCode
2073ENTRY art_quick_instrumentation_entry
2074    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
2075
2076    mov   x20, x0             // Preserve method reference in a callee-save.
2077
2078    mov   x2, xSELF
2079    mov   x3, xLR
2080    bl    artInstrumentationMethodEntryFromCode  // (Method*, Object*, Thread*, LR)
2081
2082    mov   xIP0, x0            // x0 = result of call.
2083    mov   x0, x20             // Reload method reference.
2084
2085    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // Note: will restore xSELF
2086    adr   xLR, art_quick_instrumentation_exit
2087    br    xIP0                // Tail-call method with lr set to art_quick_instrumentation_exit.
2088END art_quick_instrumentation_entry
2089
2090    .extern artInstrumentationMethodExitFromCode
2091ENTRY art_quick_instrumentation_exit
2092    mov   xLR, #0             // Clobber LR for later checks.
2093
2094    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
2095
2096    // We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then
2097    // we would need to fully restore it. As there are a lot of callee-save registers, it seems
2098    // easier to have an extra small stack area.
2099
2100    str x0, [sp, #-16]!       // Save integer result.
2101    .cfi_adjust_cfa_offset 16
2102    str d0,  [sp, #8]         // Save floating-point result.
2103
2104    add   x1, sp, #16         // Pass SP.
2105    mov   x2, x0              // Pass integer result.
2106    fmov  x3, d0              // Pass floating-point result.
2107    mov   x0, xSELF           // Pass Thread.
2108    bl   artInstrumentationMethodExitFromCode    // (Thread*, SP, gpr_res, fpr_res)
2109
2110    mov   xIP0, x0            // Return address from instrumentation call.
2111    mov   xLR, x1             // r1 is holding link register if we're to bounce to deoptimize
2112
2113    ldr   d0, [sp, #8]        // Restore floating-point result.
2114    ldr   x0, [sp], 16        // Restore integer result, and drop stack area.
2115    .cfi_adjust_cfa_offset 16
2116
2117    POP_REFS_ONLY_CALLEE_SAVE_FRAME
2118
2119    br    xIP0                // Tail-call out.
2120END art_quick_instrumentation_exit
2121
2122    /*
2123     * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
2124     * will long jump to the upcall with a special exception of -1.
2125     */
2126    .extern artDeoptimize
2127ENTRY art_quick_deoptimize
2128    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
2129    mov    x0, xSELF          // Pass thread.
2130    bl     artDeoptimize      // artDeoptimize(Thread*)
2131    brk 0
2132END art_quick_deoptimize
2133
2134    /*
2135     * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
2136     * will long jump to the upcall with a special exception of -1.
2137     */
2138    .extern artDeoptimizeFromCompiledCode
2139ENTRY art_quick_deoptimize_from_compiled_code
2140    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
2141    mov    x0, xSELF                      // Pass thread.
2142    bl     artDeoptimizeFromCompiledCode  // artDeoptimizeFromCompiledCode(Thread*)
2143    brk 0
2144END art_quick_deoptimize_from_compiled_code
2145
2146
2147    /*
2148     * String's indexOf.
2149     *
2150     * TODO: Not very optimized.
2151     * On entry:
2152     *    x0:   string object (known non-null)
2153     *    w1:   char to match (known <= 0xFFFF)
2154     *    w2:   Starting offset in string data
2155     */
2156ENTRY art_quick_indexof
2157    ldr   w3, [x0, #MIRROR_STRING_COUNT_OFFSET]
2158    add   x0, x0, #MIRROR_STRING_VALUE_OFFSET
2159
2160    /* Clamp start to [0..count] */
2161    cmp   w2, #0
2162    csel  w2, wzr, w2, lt
2163    cmp   w2, w3
2164    csel  w2, w3, w2, gt
2165
2166    /* Save a copy to compute result */
2167    mov   x5, x0
2168
2169    /* Build pointer to start of data to compare and pre-bias */
2170    add   x0, x0, x2, lsl #1
2171    sub   x0, x0, #2
2172
2173    /* Compute iteration count */
2174    sub   w2, w3, w2
2175
2176    /*
2177     * At this point we have:
2178     *  x0: start of the data to test
2179     *  w1: char to compare
2180     *  w2: iteration count
2181     *  x5: original start of string data
2182     */
2183
2184    subs  w2, w2, #4
2185    b.lt  .Lindexof_remainder
2186
2187.Lindexof_loop4:
2188    ldrh  w6, [x0, #2]!
2189    ldrh  w7, [x0, #2]!
2190    ldrh  wIP0, [x0, #2]!
2191    ldrh  wIP1, [x0, #2]!
2192    cmp   w6, w1
2193    b.eq  .Lmatch_0
2194    cmp   w7, w1
2195    b.eq  .Lmatch_1
2196    cmp   wIP0, w1
2197    b.eq  .Lmatch_2
2198    cmp   wIP1, w1
2199    b.eq  .Lmatch_3
2200    subs  w2, w2, #4
2201    b.ge  .Lindexof_loop4
2202
2203.Lindexof_remainder:
2204    adds  w2, w2, #4
2205    b.eq  .Lindexof_nomatch
2206
2207.Lindexof_loop1:
2208    ldrh  w6, [x0, #2]!
2209    cmp   w6, w1
2210    b.eq  .Lmatch_3
2211    subs  w2, w2, #1
2212    b.ne  .Lindexof_loop1
2213
2214.Lindexof_nomatch:
2215    mov   x0, #-1
2216    ret
2217
2218.Lmatch_0:
2219    sub   x0, x0, #6
2220    sub   x0, x0, x5
2221    asr   x0, x0, #1
2222    ret
2223.Lmatch_1:
2224    sub   x0, x0, #4
2225    sub   x0, x0, x5
2226    asr   x0, x0, #1
2227    ret
2228.Lmatch_2:
2229    sub   x0, x0, #2
2230    sub   x0, x0, x5
2231    asr   x0, x0, #1
2232    ret
2233.Lmatch_3:
2234    sub   x0, x0, x5
2235    asr   x0, x0, #1
2236    ret
2237END art_quick_indexof
2238
2239    /*
2240     * Create a function `name` calling the ReadBarrier::Mark routine,
2241     * getting its argument and returning its result through W register
2242     * `wreg` (corresponding to X register `xreg`), saving and restoring
2243     * all caller-save registers.
2244     *
2245     * If `wreg` is different from `w0`, the generated function follows a
2246     * non-standard runtime calling convention:
2247     * - register `wreg` is used to pass the (sole) argument of this
2248     *   function (instead of W0);
2249     * - register `wreg` is used to return the result of this function
2250     *   (instead of W0);
2251     * - W0 is treated like a normal (non-argument) caller-save register;
2252     * - everything else is the same as in the standard runtime calling
2253     *   convention (e.g. standard callee-save registers are preserved).
2254     */
2255.macro READ_BARRIER_MARK_REG name, wreg, xreg
2256ENTRY \name
2257    /*
2258     * Allocate 46 stack slots * 8 = 368 bytes:
2259     * - 20 slots for core registers X0-X19
2260     * - 24 slots for floating-point registers D0-D7 and D16-D31
2261     * -  1 slot for return address register XLR
2262     * -  1 padding slot for 16-byte stack alignment
2263     */
2264    // Save all potentially live caller-save core registers.
2265    stp   x0, x1,   [sp, #-368]!
2266    .cfi_adjust_cfa_offset 368
2267    .cfi_rel_offset x0, 0
2268    .cfi_rel_offset x1, 8
2269    stp   x2, x3,   [sp, #16]
2270    .cfi_rel_offset x2, 16
2271    .cfi_rel_offset x3, 24
2272    stp   x4, x5,   [sp, #32]
2273    .cfi_rel_offset x4, 32
2274    .cfi_rel_offset x5, 40
2275    stp   x6, x7,   [sp, #48]
2276    .cfi_rel_offset x6, 48
2277    .cfi_rel_offset x7, 56
2278    stp   x8, x9,   [sp, #64]
2279    .cfi_rel_offset x8, 64
2280    .cfi_rel_offset x9, 72
2281    stp   x10, x11, [sp, #80]
2282    .cfi_rel_offset x10, 80
2283    .cfi_rel_offset x11, 88
2284    stp   x12, x13, [sp, #96]
2285    .cfi_rel_offset x12, 96
2286    .cfi_rel_offset x13, 104
2287    stp   x14, x15, [sp, #112]
2288    .cfi_rel_offset x14, 112
2289    .cfi_rel_offset x15, 120
2290    stp   x16, x17, [sp, #128]
2291    .cfi_rel_offset x16, 128
2292    .cfi_rel_offset x17, 136
2293    stp   x18, x19, [sp, #144]
2294    .cfi_rel_offset x18, 144
2295    .cfi_rel_offset x19, 152
2296    // Save all potentially live caller-save floating-point registers.
2297    stp   d0, d1,   [sp, #160]
2298    stp   d2, d3,   [sp, #176]
2299    stp   d4, d5,   [sp, #192]
2300    stp   d6, d7,   [sp, #208]
2301    stp   d16, d17, [sp, #224]
2302    stp   d18, d19, [sp, #240]
2303    stp   d20, d21, [sp, #256]
2304    stp   d22, d23, [sp, #272]
2305    stp   d24, d25, [sp, #288]
2306    stp   d26, d27, [sp, #304]
2307    stp   d28, d29, [sp, #320]
2308    stp   d30, d31, [sp, #336]
2309    // Save return address.
2310    str   xLR,      [sp, #352]
2311    .cfi_rel_offset x30, 352
2312    // (sp + #360 is a padding slot)
2313
2314    .ifnc \wreg, w0
2315      mov   w0, \wreg                   // Pass arg1 - obj from `wreg`
2316    .endif
2317    bl    artReadBarrierMark            // artReadBarrierMark(obj)
2318    .ifnc \wreg, w0
2319      mov   \wreg, w0                   // Return result into `wreg`
2320    .endif
2321
2322    // Restore core regs, except `xreg`, as `wreg` is used to return the
2323    // result of this function (simply remove it from the stack instead).
2324    POP_REGS_NE x0, x1,   0,   \xreg
2325    POP_REGS_NE x2, x3,   16,  \xreg
2326    POP_REGS_NE x4, x5,   32,  \xreg
2327    POP_REGS_NE x6, x7,   48,  \xreg
2328    POP_REGS_NE x8, x9,   64,  \xreg
2329    POP_REGS_NE x10, x11, 80,  \xreg
2330    POP_REGS_NE x12, x13, 96,  \xreg
2331    POP_REGS_NE x14, x15, 112, \xreg
2332    POP_REGS_NE x16, x17, 128, \xreg
2333    POP_REGS_NE x18, x19, 144, \xreg
2334    // Restore floating-point registers.
2335    ldp   d0, d1,   [sp, #160]
2336    ldp   d2, d3,   [sp, #176]
2337    ldp   d4, d5,   [sp, #192]
2338    ldp   d6, d7,   [sp, #208]
2339    ldp   d16, d17, [sp, #224]
2340    ldp   d18, d19, [sp, #240]
2341    ldp   d20, d21, [sp, #256]
2342    ldp   d22, d23, [sp, #272]
2343    ldp   d24, d25, [sp, #288]
2344    ldp   d26, d27, [sp, #304]
2345    ldp   d28, d29, [sp, #320]
2346    ldp   d30, d31, [sp, #336]
2347    // Restore return address and remove padding.
2348    ldr   xLR,      [sp, #352]
2349    .cfi_restore x30
2350    add sp, sp, #368
2351    .cfi_adjust_cfa_offset -368
2352    ret
2353END \name
2354.endm
2355
2356READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg00, w0,  x0
2357READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, w1,  x1
2358READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, w2,  x2
2359READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, w3,  x3
2360READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, w4,  x4
2361READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, w5,  x5
2362READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, w6,  x6
2363READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, w7,  x7
2364READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, w8,  x8
2365READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, w9,  x9
2366READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, w10, x10
2367READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, w11, x11
2368READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, w12, x12
2369READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, w13, x13
2370READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, w14, x14
2371READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg15, w15, x15
2372READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg16, w16, x16
2373READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, w17, x17
2374READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18
2375READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, w19, x19
2376READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, w20, x20
2377READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, w21, x21
2378READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, w22, x22
2379READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg23, w23, x23
2380READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg24, w24, x24
2381READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg25, w25, x25
2382READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg26, w26, x26
2383READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg27, w27, x27
2384READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg28, w28, x28
2385READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29
2386