1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "asm_support_arm64.S"
18
19#include "arch/quick_alloc_entrypoints.S"
20
21
22    /*
23     * Macro that sets up the callee save frame to conform with
24     * Runtime::CreateCalleeSaveMethod(kSaveAll)
25     */
26.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
27    adrp xIP0, :got:_ZN3art7Runtime9instance_E
28    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
29
30    // Our registers aren't intermixed - just spill in order.
31    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
32
33    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
34    // Loads appropriate callee-save-method.
35    ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
36
37    sub sp, sp, #176
38    .cfi_adjust_cfa_offset 176
39
40    // Ugly compile-time check, but we only have the preprocessor.
41#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 176)
42#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
43#endif
44
45    // Stack alignment filler [sp, #8].
46    // FP callee-saves.
47    stp d8, d9,   [sp, #16]
48    stp d10, d11, [sp, #32]
49    stp d12, d13, [sp, #48]
50    stp d14, d15, [sp, #64]
51
52    // GP callee-saves
53    stp x19, x20, [sp, #80]
54    .cfi_rel_offset x19, 80
55    .cfi_rel_offset x20, 88
56
57    stp x21, x22, [sp, #96]
58    .cfi_rel_offset x21, 96
59    .cfi_rel_offset x22, 104
60
61    stp x23, x24, [sp, #112]
62    .cfi_rel_offset x23, 112
63    .cfi_rel_offset x24, 120
64
65    stp x25, x26, [sp, #128]
66    .cfi_rel_offset x25, 128
67    .cfi_rel_offset x26, 136
68
69    stp x27, x28, [sp, #144]
70    .cfi_rel_offset x27, 144
71    .cfi_rel_offset x28, 152
72
73    stp x29, xLR, [sp, #160]
74    .cfi_rel_offset x29, 160
75    .cfi_rel_offset x30, 168
76
77    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs].
78    str xIP0, [sp]
79    // Place sp in Thread::Current()->top_quick_frame.
80    mov xIP0, sp
81    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
82.endm
83
84    /*
85     * Macro that sets up the callee save frame to conform with
86     * Runtime::CreateCalleeSaveMethod(kRefsOnly).
87     */
88.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
89    adrp xIP0, :got:_ZN3art7Runtime9instance_E
90    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
91
92    // Our registers aren't intermixed - just spill in order.
93    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
94
95    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefOnly]  .
96    // Loads appropriate callee-save-method.
97    ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
98
99    sub sp, sp, #96
100    .cfi_adjust_cfa_offset 96
101
102    // Ugly compile-time check, but we only have the preprocessor.
103#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 96)
104#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
105#endif
106
107    // GP callee-saves.
108    // x20 paired with ArtMethod* - see below.
109    stp x21, x22, [sp, #16]
110    .cfi_rel_offset x21, 16
111    .cfi_rel_offset x22, 24
112
113    stp x23, x24, [sp, #32]
114    .cfi_rel_offset x23, 32
115    .cfi_rel_offset x24, 40
116
117    stp x25, x26, [sp, #48]
118    .cfi_rel_offset x25, 48
119    .cfi_rel_offset x26, 56
120
121    stp x27, x28, [sp, #64]
122    .cfi_rel_offset x27, 64
123    .cfi_rel_offset x28, 72
124
125    stp x29, xLR, [sp, #80]
126    .cfi_rel_offset x29, 80
127    .cfi_rel_offset x30, 88
128
129    // Store ArtMethod* Runtime::callee_save_methods_[kRefsOnly].
130    stp xIP0, x20, [sp]
131    .cfi_rel_offset x20, 8
132
133    // Place sp in Thread::Current()->top_quick_frame.
134    mov xIP0, sp
135    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
136.endm
137
138// TODO: Probably no need to restore registers preserved by aapcs64.
139.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
140    // Callee-saves.
141    ldr x20, [sp, #8]
142    .cfi_restore x20
143
144    ldp x21, x22, [sp, #16]
145    .cfi_restore x21
146    .cfi_restore x22
147
148    ldp x23, x24, [sp, #32]
149    .cfi_restore x23
150    .cfi_restore x24
151
152    ldp x25, x26, [sp, #48]
153    .cfi_restore x25
154    .cfi_restore x26
155
156    ldp x27, x28, [sp, #64]
157    .cfi_restore x27
158    .cfi_restore x28
159
160    ldp x29, xLR, [sp, #80]
161    .cfi_restore x29
162    .cfi_restore x30
163
164    add sp, sp, #96
165    .cfi_adjust_cfa_offset -96
166.endm
167
168.macro POP_REFS_ONLY_CALLEE_SAVE_FRAME
169    add sp, sp, #96
170    .cfi_adjust_cfa_offset - 96
171.endm
172
173.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
174    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
175    ret
176.endm
177
178
179.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
180    sub sp, sp, #224
181    .cfi_adjust_cfa_offset 224
182
183    // Ugly compile-time check, but we only have the preprocessor.
184#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 224)
185#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
186#endif
187
188    // Stack alignment filler [sp, #8].
189    // FP args.
190    stp d0, d1, [sp, #16]
191    stp d2, d3, [sp, #32]
192    stp d4, d5, [sp, #48]
193    stp d6, d7, [sp, #64]
194
195    // Core args.
196    stp x1, x2, [sp, #80]
197    .cfi_rel_offset x1, 80
198    .cfi_rel_offset x2, 88
199
200    stp x3, x4, [sp, #96]
201    .cfi_rel_offset x3, 96
202    .cfi_rel_offset x4, 104
203
204    stp x5, x6, [sp, #112]
205    .cfi_rel_offset x5, 112
206    .cfi_rel_offset x6, 120
207
208    // x7, Callee-saves.
209    stp x7, x20, [sp, #128]
210    .cfi_rel_offset x7, 128
211    .cfi_rel_offset x20, 136
212
213    stp x21, x22, [sp, #144]
214    .cfi_rel_offset x21, 144
215    .cfi_rel_offset x22, 152
216
217    stp x23, x24, [sp, #160]
218    .cfi_rel_offset x23, 160
219    .cfi_rel_offset x24, 168
220
221    stp x25, x26, [sp, #176]
222    .cfi_rel_offset x25, 176
223    .cfi_rel_offset x26, 184
224
225    stp x27, x28, [sp, #192]
226    .cfi_rel_offset x27, 192
227    .cfi_rel_offset x28, 200
228
229    // x29(callee-save) and LR.
230    stp x29, xLR, [sp, #208]
231    .cfi_rel_offset x29, 208
232    .cfi_rel_offset x30, 216
233
234.endm
235
236    /*
237     * Macro that sets up the callee save frame to conform with
238     * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
239     *
240     * TODO This is probably too conservative - saving FP & LR.
241     */
242.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
243    adrp xIP0, :got:_ZN3art7Runtime9instance_E
244    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
245
246    // Our registers aren't intermixed - just spill in order.
247    ldr xIP0, [xIP0]  // xIP0 = & (art::Runtime * art::Runtime.instance_) .
248
249    // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs]  .
250    ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
251
252    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
253
254    str xIP0, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
255    // Place sp in Thread::Current()->top_quick_frame.
256    mov xIP0, sp
257    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
258.endm
259
260.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
261    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
262    str x0, [sp, #0]  // Store ArtMethod* to bottom of stack.
263    // Place sp in Thread::Current()->top_quick_frame.
264    mov xIP0, sp
265    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
266.endm
267
268// TODO: Probably no need to restore registers preserved by aapcs64.
269.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
270    // FP args.
271    ldp d0, d1, [sp, #16]
272    ldp d2, d3, [sp, #32]
273    ldp d4, d5, [sp, #48]
274    ldp d6, d7, [sp, #64]
275
276    // Core args.
277    ldp x1, x2, [sp, #80]
278    .cfi_restore x1
279    .cfi_restore x2
280
281    ldp x3, x4, [sp, #96]
282    .cfi_restore x3
283    .cfi_restore x4
284
285    ldp x5, x6, [sp, #112]
286    .cfi_restore x5
287    .cfi_restore x6
288
289    // x7, Callee-saves.
290    ldp x7, x20, [sp, #128]
291    .cfi_restore x7
292    .cfi_restore x20
293
294    ldp x21, x22, [sp, #144]
295    .cfi_restore x21
296    .cfi_restore x22
297
298    ldp x23, x24, [sp, #160]
299    .cfi_restore x23
300    .cfi_restore x24
301
302    ldp x25, x26, [sp, #176]
303    .cfi_restore x25
304    .cfi_restore x26
305
306    ldp x27, x28, [sp, #192]
307    .cfi_restore x27
308    .cfi_restore x28
309
310    // x29(callee-save) and LR.
311    ldp x29, xLR, [sp, #208]
312    .cfi_restore x29
313    .cfi_restore x30
314
315    add sp, sp, #224
316    .cfi_adjust_cfa_offset -224
317.endm
318
319.macro RETURN_IF_RESULT_IS_ZERO
320    cbnz x0, 1f                // result non-zero branch over
321    ret                        // return
3221:
323.endm
324
325.macro RETURN_IF_RESULT_IS_NON_ZERO
326    cbz x0, 1f                 // result zero branch over
327    ret                        // return
3281:
329.endm
330
331    /*
332     * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
333     * exception is Thread::Current()->exception_
334     */
335.macro DELIVER_PENDING_EXCEPTION
336    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
337    mov x0, xSELF
338
339    // Point of no return.
340    b artDeliverPendingExceptionFromCode  // artDeliverPendingExceptionFromCode(Thread*)
341    brk 0  // Unreached
342.endm
343
344.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
345    ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET]   // Get exception field.
346    cbnz \reg, 1f
347    ret
3481:
349    DELIVER_PENDING_EXCEPTION
350.endm
351
352.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
353    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0
354.endm
355
356// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
357.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
358    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1
359.endm
360
361.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER
362    cbnz w0, 1f                // result non-zero branch over
363    ret                        // return
3641:
365    DELIVER_PENDING_EXCEPTION
366.endm
367
368.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
369    .extern \cxx_name
370ENTRY \c_name
371    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
372    mov x0, xSELF                     // pass Thread::Current
373    b   \cxx_name                     // \cxx_name(Thread*)
374END \c_name
375.endm
376
377.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
378    .extern \cxx_name
379ENTRY \c_name
380    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context.
381    mov x1, xSELF                     // pass Thread::Current.
382    b   \cxx_name                     // \cxx_name(arg, Thread*).
383    brk 0
384END \c_name
385.endm
386
387.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
388    .extern \cxx_name
389ENTRY \c_name
390    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
391    mov x2, xSELF                     // pass Thread::Current
392    b   \cxx_name                     // \cxx_name(arg1, arg2, Thread*)
393    brk 0
394END \c_name
395.endm
396
397    /*
398     * Called by managed code, saves callee saves and then calls artThrowException
399     * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
400     */
401ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
402
403    /*
404     * Called by managed code to create and deliver a NullPointerException.
405     */
406NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
407
408    /*
409     * Called by managed code to create and deliver an ArithmeticException.
410     */
411NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
412
413    /*
414     * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
415     * index, arg2 holds limit.
416     */
417TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
418
419    /*
420     * Called by managed code to create and deliver a StackOverflowError.
421     */
422NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
423
424    /*
425     * Called by managed code to create and deliver a NoSuchMethodError.
426     */
427ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
428
429    /*
430     * All generated callsites for interface invokes and invocation slow paths will load arguments
431     * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain
432     * the method_idx.  This wrapper will save arg1-arg3, and call the appropriate C helper.
433     * NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1.
434     *
435     * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting
436     * of the target Method* in x0 and method->code_ in x1.
437     *
438     * If unsuccessful, the helper will return null/????. There will be a pending exception in the
439     * thread and we branch to another stub to deliver it.
440     *
441     * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
442     * pointing back to the original caller.
443     *
444     * Adapted from ARM32 code.
445     *
446     * Clobbers xIP0.
447     */
448.macro INVOKE_TRAMPOLINE_BODY cxx_name
449    .extern \cxx_name
450    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // save callee saves in case allocation triggers GC
451    // Helper signature is always
452    // (method_idx, *this_object, *caller_method, *self, sp)
453
454    mov    x2, xSELF                      // pass Thread::Current
455    mov    x3, sp
456    bl     \cxx_name                      // (method_idx, this, Thread*, SP)
457    mov    xIP0, x1                       // save Method*->code_
458    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
459    cbz    x0, 1f                         // did we find the target? if not go to exception delivery
460    br     xIP0                           // tail call to target
4611:
462    DELIVER_PENDING_EXCEPTION
463.endm
464.macro INVOKE_TRAMPOLINE c_name, cxx_name
465ENTRY \c_name
466    INVOKE_TRAMPOLINE_BODY \cxx_name
467END \c_name
468.endm
469
470INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
471
472INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
473INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
474INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
475INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
476
477
478.macro INVOKE_STUB_CREATE_FRAME
479
480SAVE_SIZE=15*8   // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
481SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
482
483
484    mov x9, sp                             // Save stack pointer.
485    .cfi_register sp,x9
486
487    add x10, x2, # SAVE_SIZE_AND_METHOD    // calculate size of frame.
488    sub x10, sp, x10                       // Calculate SP position - saves + ArtMethod* + args
489    and x10, x10, # ~0xf                   // Enforce 16 byte stack alignment.
490    mov sp, x10                            // Set new SP.
491
492    sub x10, x9, #SAVE_SIZE                // Calculate new FP (later). Done here as we must move SP
493    .cfi_def_cfa_register x10              // before this.
494    .cfi_adjust_cfa_offset SAVE_SIZE
495
496    str x28, [x10, #112]
497    .cfi_rel_offset x28, 112
498
499    stp x26, x27, [x10, #96]
500    .cfi_rel_offset x26, 96
501    .cfi_rel_offset x27, 104
502
503    stp x24, x25, [x10, #80]
504    .cfi_rel_offset x24, 80
505    .cfi_rel_offset x25, 88
506
507    stp x22, x23, [x10, #64]
508    .cfi_rel_offset x22, 64
509    .cfi_rel_offset x23, 72
510
511    stp x20, x21, [x10, #48]
512    .cfi_rel_offset x20, 48
513    .cfi_rel_offset x21, 56
514
515    stp x9, x19, [x10, #32]                // Save old stack pointer and x19.
516    .cfi_rel_offset sp, 32
517    .cfi_rel_offset x19, 40
518
519    stp x4, x5, [x10, #16]                 // Save result and shorty addresses.
520    .cfi_rel_offset x4, 16
521    .cfi_rel_offset x5, 24
522
523    stp xFP, xLR, [x10]                    // Store LR & FP.
524    .cfi_rel_offset x29, 0
525    .cfi_rel_offset x30, 8
526
527    mov xFP, x10                           // Use xFP now, as it's callee-saved.
528    .cfi_def_cfa_register x29
529    mov xSELF, x3                          // Move thread pointer into SELF register.
530
531    // Copy arguments into stack frame.
532    // Use simple copy routine for now.
533    // 4 bytes per slot.
534    // X1 - source address
535    // W2 - args length
536    // X9 - destination address.
537    // W10 - temporary
538    add x9, sp, #8                         // Destination address is bottom of stack + null.
539
540    // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler
541    // does not have unique-id variables.
5421:
543    cmp w2, #0
544    beq 2f
545    sub w2, w2, #4      // Need 65536 bytes of range.
546    ldr w10, [x1, x2]
547    str w10, [x9, x2]
548
549    b 1b
550
5512:
552    // Store null into ArtMethod* at bottom of frame.
553    str xzr, [sp]
554.endm
555
556.macro INVOKE_STUB_CALL_AND_RETURN
557
558    // load method-> METHOD_QUICK_CODE_OFFSET
559    ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
560    // Branch to method.
561    blr x9
562
563    // Restore return value address and shorty address.
564    ldp x4,x5, [xFP, #16]
565    .cfi_restore x4
566    .cfi_restore x5
567
568    ldr x28, [xFP, #112]
569    .cfi_restore x28
570
571    ldp x26, x27, [xFP, #96]
572    .cfi_restore x26
573    .cfi_restore x27
574
575    ldp x24, x25, [xFP, #80]
576    .cfi_restore x24
577    .cfi_restore x25
578
579    ldp x22, x23, [xFP, #64]
580    .cfi_restore x22
581    .cfi_restore x23
582
583    ldp x20, x21, [xFP, #48]
584    .cfi_restore x20
585    .cfi_restore x21
586
587    // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
588    ldrb w10, [x5]
589
590    // Check the return type and store the correct register into the jvalue in memory.
591    // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables.
592
593    // Don't set anything for a void type.
594    cmp w10, #'V'
595    beq 3f
596
597    // Is it a double?
598    cmp w10, #'D'
599    bne 1f
600    str d0, [x4]
601    b 3f
602
6031:  // Is it a float?
604    cmp w10, #'F'
605    bne 2f
606    str s0, [x4]
607    b 3f
608
6092:  // Just store x0. Doesn't matter if it is 64 or 32 bits.
610    str x0, [x4]
611
6123:  // Finish up.
613    ldp x2, x19, [xFP, #32]   // Restore stack pointer and x19.
614    .cfi_restore x19
615    mov sp, x2
616    .cfi_restore sp
617
618    ldp xFP, xLR, [xFP]    // Restore old frame pointer and link register.
619    .cfi_restore x29
620    .cfi_restore x30
621
622    ret
623
624.endm
625
626
627/*
628 *  extern"C" void art_quick_invoke_stub(ArtMethod *method,   x0
629 *                                       uint32_t  *args,     x1
630 *                                       uint32_t argsize,    w2
631 *                                       Thread *self,        x3
632 *                                       JValue *result,      x4
633 *                                       char   *shorty);     x5
634 *  +----------------------+
635 *  |                      |
636 *  |  C/C++ frame         |
637 *  |       LR''           |
638 *  |       FP''           | <- SP'
639 *  +----------------------+
640 *  +----------------------+
641 *  |        x28           | <- TODO: Remove callee-saves.
642 *  |         :            |
643 *  |        x19           |
644 *  |        SP'           |
645 *  |        X5            |
646 *  |        X4            |        Saved registers
647 *  |        LR'           |
648 *  |        FP'           | <- FP
649 *  +----------------------+
650 *  | uint32_t out[n-1]    |
651 *  |    :      :          |        Outs
652 *  | uint32_t out[0]      |
653 *  | ArtMethod*           | <- SP  value=null
654 *  +----------------------+
655 *
656 * Outgoing registers:
657 *  x0    - Method*
658 *  x1-x7 - integer parameters.
659 *  d0-d7 - Floating point parameters.
660 *  xSELF = self
661 *  SP = & of ArtMethod*
662 *  x1 = "this" pointer.
663 *
664 */
665ENTRY art_quick_invoke_stub
666    // Spill registers as per AACPS64 calling convention.
667    INVOKE_STUB_CREATE_FRAME
668
669    // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
670    // Parse the passed shorty to determine which register to load.
671    // Load addresses for routines that load WXSD registers.
672    adr  x11, .LstoreW2
673    adr  x12, .LstoreX2
674    adr  x13, .LstoreS0
675    adr  x14, .LstoreD0
676
677    // Initialize routine offsets to 0 for integers and floats.
678    // x8 for integers, x15 for floating point.
679    mov x8, #0
680    mov x15, #0
681
682    add x10, x5, #1         // Load shorty address, plus one to skip return value.
683    ldr w1, [x9],#4         // Load "this" parameter, and increment arg pointer.
684
685    // Loop to fill registers.
686.LfillRegisters:
687    ldrb w17, [x10], #1       // Load next character in signature, and increment.
688    cbz w17, .LcallFunction   // Exit at end of signature. Shorty 0 terminated.
689
690    cmp  w17, #'F' // is this a float?
691    bne .LisDouble
692
693    cmp x15, # 8*12         // Skip this load if all registers full.
694    beq .Ladvance4
695
696    add x17, x13, x15       // Calculate subroutine to jump to.
697    br  x17
698
699.LisDouble:
700    cmp w17, #'D'           // is this a double?
701    bne .LisLong
702
703    cmp x15, # 8*12         // Skip this load if all registers full.
704    beq .Ladvance8
705
706    add x17, x14, x15       // Calculate subroutine to jump to.
707    br x17
708
709.LisLong:
710    cmp w17, #'J'           // is this a long?
711    bne .LisOther
712
713    cmp x8, # 6*12          // Skip this load if all registers full.
714    beq .Ladvance8
715
716    add x17, x12, x8        // Calculate subroutine to jump to.
717    br x17
718
719.LisOther:                  // Everything else takes one vReg.
720    cmp x8, # 6*12          // Skip this load if all registers full.
721    beq .Ladvance4
722
723    add x17, x11, x8        // Calculate subroutine to jump to.
724    br x17
725
726.Ladvance4:
727    add x9, x9, #4
728    b .LfillRegisters
729
730.Ladvance8:
731    add x9, x9, #8
732    b .LfillRegisters
733
734// Macro for loading a parameter into a register.
735//  counter - the register with offset into these tables
736//  size - the size of the register - 4 or 8 bytes.
737//  register - the name of the register to be loaded.
738.macro LOADREG counter size register return
739    ldr \register , [x9], #\size
740    add \counter, \counter, 12
741    b \return
742.endm
743
744// Store ints.
745.LstoreW2:
746    LOADREG x8 4 w2 .LfillRegisters
747    LOADREG x8 4 w3 .LfillRegisters
748    LOADREG x8 4 w4 .LfillRegisters
749    LOADREG x8 4 w5 .LfillRegisters
750    LOADREG x8 4 w6 .LfillRegisters
751    LOADREG x8 4 w7 .LfillRegisters
752
753// Store longs.
754.LstoreX2:
755    LOADREG x8 8 x2 .LfillRegisters
756    LOADREG x8 8 x3 .LfillRegisters
757    LOADREG x8 8 x4 .LfillRegisters
758    LOADREG x8 8 x5 .LfillRegisters
759    LOADREG x8 8 x6 .LfillRegisters
760    LOADREG x8 8 x7 .LfillRegisters
761
762// Store singles.
763.LstoreS0:
764    LOADREG x15 4 s0 .LfillRegisters
765    LOADREG x15 4 s1 .LfillRegisters
766    LOADREG x15 4 s2 .LfillRegisters
767    LOADREG x15 4 s3 .LfillRegisters
768    LOADREG x15 4 s4 .LfillRegisters
769    LOADREG x15 4 s5 .LfillRegisters
770    LOADREG x15 4 s6 .LfillRegisters
771    LOADREG x15 4 s7 .LfillRegisters
772
773// Store doubles.
774.LstoreD0:
775    LOADREG x15 8 d0 .LfillRegisters
776    LOADREG x15 8 d1 .LfillRegisters
777    LOADREG x15 8 d2 .LfillRegisters
778    LOADREG x15 8 d3 .LfillRegisters
779    LOADREG x15 8 d4 .LfillRegisters
780    LOADREG x15 8 d5 .LfillRegisters
781    LOADREG x15 8 d6 .LfillRegisters
782    LOADREG x15 8 d7 .LfillRegisters
783
784
785.LcallFunction:
786
787    INVOKE_STUB_CALL_AND_RETURN
788
789END art_quick_invoke_stub
790
791/*  extern"C"
792 *     void art_quick_invoke_static_stub(ArtMethod *method,   x0
793 *                                       uint32_t  *args,     x1
794 *                                       uint32_t argsize,    w2
795 *                                       Thread *self,        x3
796 *                                       JValue *result,      x4
797 *                                       char   *shorty);     x5
798 */
799ENTRY art_quick_invoke_static_stub
800    // Spill registers as per AACPS64 calling convention.
801    INVOKE_STUB_CREATE_FRAME
802
803    // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
804    // Parse the passed shorty to determine which register to load.
805    // Load addresses for routines that load WXSD registers.
806    adr  x11, .LstoreW1_2
807    adr  x12, .LstoreX1_2
808    adr  x13, .LstoreS0_2
809    adr  x14, .LstoreD0_2
810
811    // Initialize routine offsets to 0 for integers and floats.
812    // x8 for integers, x15 for floating point.
813    mov x8, #0
814    mov x15, #0
815
816    add x10, x5, #1     // Load shorty address, plus one to skip return value.
817
818    // Loop to fill registers.
819.LfillRegisters2:
820    ldrb w17, [x10], #1         // Load next character in signature, and increment.
821    cbz w17, .LcallFunction2    // Exit at end of signature. Shorty 0 terminated.
822
823    cmp  w17, #'F'          // is this a float?
824    bne .LisDouble2
825
826    cmp x15, # 8*12         // Skip this load if all registers full.
827    beq .Ladvance4_2
828
829    add x17, x13, x15       // Calculate subroutine to jump to.
830    br  x17
831
832.LisDouble2:
833    cmp w17, #'D'           // is this a double?
834    bne .LisLong2
835
836    cmp x15, # 8*12         // Skip this load if all registers full.
837    beq .Ladvance8_2
838
839    add x17, x14, x15       // Calculate subroutine to jump to.
840    br x17
841
842.LisLong2:
843    cmp w17, #'J'           // is this a long?
844    bne .LisOther2
845
846    cmp x8, # 7*12          // Skip this load if all registers full.
847    beq .Ladvance8_2
848
849    add x17, x12, x8        // Calculate subroutine to jump to.
850    br x17
851
852.LisOther2:                 // Everything else takes one vReg.
853    cmp x8, # 7*12          // Skip this load if all registers full.
854    beq .Ladvance4_2
855
856    add x17, x11, x8        // Calculate subroutine to jump to.
857    br x17
858
859.Ladvance4_2:
860    add x9, x9, #4
861    b .LfillRegisters2
862
863.Ladvance8_2:
864    add x9, x9, #8
865    b .LfillRegisters2
866
867// Store ints.
868.LstoreW1_2:
869    LOADREG x8 4 w1 .LfillRegisters2
870    LOADREG x8 4 w2 .LfillRegisters2
871    LOADREG x8 4 w3 .LfillRegisters2
872    LOADREG x8 4 w4 .LfillRegisters2
873    LOADREG x8 4 w5 .LfillRegisters2
874    LOADREG x8 4 w6 .LfillRegisters2
875    LOADREG x8 4 w7 .LfillRegisters2
876
877// Store longs.
878.LstoreX1_2:
879    LOADREG x8 8 x1 .LfillRegisters2
880    LOADREG x8 8 x2 .LfillRegisters2
881    LOADREG x8 8 x3 .LfillRegisters2
882    LOADREG x8 8 x4 .LfillRegisters2
883    LOADREG x8 8 x5 .LfillRegisters2
884    LOADREG x8 8 x6 .LfillRegisters2
885    LOADREG x8 8 x7 .LfillRegisters2
886
887// Store singles.
888.LstoreS0_2:
889    LOADREG x15 4 s0 .LfillRegisters2
890    LOADREG x15 4 s1 .LfillRegisters2
891    LOADREG x15 4 s2 .LfillRegisters2
892    LOADREG x15 4 s3 .LfillRegisters2
893    LOADREG x15 4 s4 .LfillRegisters2
894    LOADREG x15 4 s5 .LfillRegisters2
895    LOADREG x15 4 s6 .LfillRegisters2
896    LOADREG x15 4 s7 .LfillRegisters2
897
898// Store doubles.
899.LstoreD0_2:
900    LOADREG x15 8 d0 .LfillRegisters2
901    LOADREG x15 8 d1 .LfillRegisters2
902    LOADREG x15 8 d2 .LfillRegisters2
903    LOADREG x15 8 d3 .LfillRegisters2
904    LOADREG x15 8 d4 .LfillRegisters2
905    LOADREG x15 8 d5 .LfillRegisters2
906    LOADREG x15 8 d6 .LfillRegisters2
907    LOADREG x15 8 d7 .LfillRegisters2
908
909
910.LcallFunction2:
911
912    INVOKE_STUB_CALL_AND_RETURN
913
914END art_quick_invoke_static_stub
915
916
917
918/*  extern"C" void art_quick_osr_stub(void** stack,                x0
919 *                                    size_t stack_size_in_bytes,  x1
920 *                                    const uin8_t* native_pc,     x2
921 *                                    JValue *result,              x3
922 *                                    char   *shorty,              x4
923 *                                    Thread *self)                x5
924 */
925ENTRY art_quick_osr_stub
926SAVE_SIZE=15*8   // x3, x4, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
927    mov x9, sp                             // Save stack pointer.
928    .cfi_register sp,x9
929
930    sub x10, sp, # SAVE_SIZE
931    and x10, x10, # ~0xf                   // Enforce 16 byte stack alignment.
932    mov sp, x10                            // Set new SP.
933
934    str x28, [sp, #112]
935    stp x26, x27, [sp, #96]
936    stp x24, x25, [sp, #80]
937    stp x22, x23, [sp, #64]
938    stp x20, x21, [sp, #48]
939    stp x9, x19, [sp, #32]                // Save old stack pointer and x19.
940    stp x3, x4, [sp, #16]                 // Save result and shorty addresses.
941    stp xFP, xLR, [sp]                    // Store LR & FP.
942    mov xSELF, x5                         // Move thread pointer into SELF register.
943
944    sub sp, sp, #16
945    str xzr, [sp]                         // Store null for ArtMethod* slot
946    // Branch to stub.
947    bl .Losr_entry
948    add sp, sp, #16
949
950    // Restore return value address and shorty address.
951    ldp x3,x4, [sp, #16]
952    ldr x28, [sp, #112]
953    ldp x26, x27, [sp, #96]
954    ldp x24, x25, [sp, #80]
955    ldp x22, x23, [sp, #64]
956    ldp x20, x21, [sp, #48]
957
958    // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
959    ldrb w10, [x4]
960
961    // Check the return type and store the correct register into the jvalue in memory.
962
963    // Don't set anything for a void type.
964    cmp w10, #'V'
965    beq .Losr_exit
966
967    // Is it a double?
968    cmp w10, #'D'
969    bne .Lno_double
970    str d0, [x3]
971    b .Losr_exit
972
973.Lno_double:  // Is it a float?
974    cmp w10, #'F'
975    bne .Lno_float
976    str s0, [x3]
977    b .Losr_exit
978
979.Lno_float:  // Just store x0. Doesn't matter if it is 64 or 32 bits.
980    str x0, [x3]
981
982.Losr_exit:  // Finish up.
983    ldp x2, x19, [sp, #32]   // Restore stack pointer and x19.
984    ldp xFP, xLR, [sp]    // Restore old frame pointer and link register.
985    mov sp, x2
986    ret
987
988.Losr_entry:
989    // Update stack pointer for the callee
990    sub sp, sp, x1
991
992    // Update link register slot expected by the callee.
993    sub w1, w1, #8
994    str lr, [sp, x1]
995
996    // Copy arguments into stack frame.
997    // Use simple copy routine for now.
998    // 4 bytes per slot.
999    // X0 - source address
1000    // W1 - args length
1001    // SP - destination address.
1002    // W10 - temporary
1003.Losr_loop_entry:
1004    cmp w1, #0
1005    beq .Losr_loop_exit
1006    sub w1, w1, #4
1007    ldr w10, [x0, x1]
1008    str w10, [sp, x1]
1009    b .Losr_loop_entry
1010
1011.Losr_loop_exit:
1012    // Branch to the OSR entry point.
1013    br x2
1014
1015END art_quick_osr_stub
1016
1017    /*
1018     * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_
1019     */
1020
1021ENTRY art_quick_do_long_jump
1022    // Load FPRs
1023    ldp d0, d1, [x1], #16
1024    ldp d2, d3, [x1], #16
1025    ldp d4, d5, [x1], #16
1026    ldp d6, d7, [x1], #16
1027    ldp d8, d9, [x1], #16
1028    ldp d10, d11, [x1], #16
1029    ldp d12, d13, [x1], #16
1030    ldp d14, d15, [x1], #16
1031    ldp d16, d17, [x1], #16
1032    ldp d18, d19, [x1], #16
1033    ldp d20, d21, [x1], #16
1034    ldp d22, d23, [x1], #16
1035    ldp d24, d25, [x1], #16
1036    ldp d26, d27, [x1], #16
1037    ldp d28, d29, [x1], #16
1038    ldp d30, d31, [x1]
1039
1040    // Load GPRs
1041    // TODO: lots of those are smashed, could optimize.
1042    add x0, x0, #30*8
1043    ldp x30, x1, [x0], #-16          // LR & SP
1044    ldp x28, x29, [x0], #-16
1045    ldp x26, x27, [x0], #-16
1046    ldp x24, x25, [x0], #-16
1047    ldp x22, x23, [x0], #-16
1048    ldp x20, x21, [x0], #-16
1049    ldp x18, x19, [x0], #-16
1050    ldp x16, x17, [x0], #-16
1051    ldp x14, x15, [x0], #-16
1052    ldp x12, x13, [x0], #-16
1053    ldp x10, x11, [x0], #-16
1054    ldp x8, x9, [x0], #-16
1055    ldp x6, x7, [x0], #-16
1056    ldp x4, x5, [x0], #-16
1057    ldp x2, x3, [x0], #-16
1058    mov sp, x1
1059
1060    // Need to load PC, it's at the end (after the space for the unused XZR). Use x1.
1061    ldr x1, [x0, #33*8]
1062    // And the value of x0.
1063    ldr x0, [x0]
1064
1065    br  x1
1066END art_quick_do_long_jump
1067
1068    /*
1069     * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the
1070     * possibly null object to lock.
1071     *
1072     * Derived from arm32 code.
1073     */
1074    .extern artLockObjectFromCode
1075ENTRY art_quick_lock_object
1076    cbz    w0, .Lslow_lock
1077    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
1078.Lretry_lock:
1079    ldr    w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
1080    ldxr   w1, [x4]
1081    mov    x3, x1
1082    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1083    cbnz   w3, .Lnot_unlocked         // already thin locked
1084    // unlocked case - x1: original lock word that's zero except for the read barrier bits.
1085    orr    x2, x1, x2                 // x2 holds thread id with count of 0 with preserved read barrier bits
1086    stxr   w3, w2, [x4]
1087    cbnz   w3, .Llock_stxr_fail       // store failed, retry
1088    dmb    ishld                      // full (LoadLoad|LoadStore) memory barrier
1089    ret
1090.Lnot_unlocked:  // x1: original lock word
1091    lsr    w3, w1, LOCK_WORD_STATE_SHIFT
1092    cbnz   w3, .Lslow_lock            // if either of the top two bits are set, go slow path
1093    eor    w2, w1, w2                 // lock_word.ThreadId() ^ self->ThreadId()
1094    uxth   w2, w2                     // zero top 16 bits
1095    cbnz   w2, .Lslow_lock            // lock word and self thread id's match -> recursive lock
1096                                      // else contention, go to slow path
1097    mov    x3, x1                     // copy the lock word to check count overflow.
1098    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits.
1099    add    w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count in lock word placing in w2 to check overflow
1100    lsr    w3, w2, LOCK_WORD_READ_BARRIER_STATE_SHIFT  // if either of the upper two bits (28-29) are set, we overflowed.
1101    cbnz   w3, .Lslow_lock            // if we overflow the count go slow path
1102    add    w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count for real
1103    stxr   w3, w2, [x4]
1104    cbnz   w3, .Llock_stxr_fail       // store failed, retry
1105    ret
1106.Llock_stxr_fail:
1107    b      .Lretry_lock               // retry
1108.Lslow_lock:
1109    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case we block
1110    mov    x1, xSELF                  // pass Thread::Current
1111    bl     artLockObjectFromCode      // (Object* obj, Thread*)
1112    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1113    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1114END art_quick_lock_object
1115
1116ENTRY art_quick_lock_object_no_inline
1117    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case we block
1118    mov    x1, xSELF                  // pass Thread::Current
1119    bl     artLockObjectFromCode      // (Object* obj, Thread*)
1120    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1121    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1122END art_quick_lock_object_no_inline
1123
1124    /*
1125     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
1126     * x0 holds the possibly null object to lock.
1127     *
1128     * Derived from arm32 code.
1129     */
1130    .extern artUnlockObjectFromCode
1131ENTRY art_quick_unlock_object
1132    cbz    x0, .Lslow_unlock
1133    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
1134.Lretry_unlock:
1135#ifndef USE_READ_BARRIER
1136    ldr    w1, [x4]
1137#else
1138    ldxr   w1, [x4]                   // Need to use atomic instructions for read barrier
1139#endif
1140    lsr    w2, w1, LOCK_WORD_STATE_SHIFT
1141    cbnz   w2, .Lslow_unlock          // if either of the top two bits are set, go slow path
1142    ldr    w2, [xSELF, #THREAD_ID_OFFSET]
1143    mov    x3, x1                     // copy lock word to check thread id equality
1144    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1145    eor    w3, w3, w2                 // lock_word.ThreadId() ^ self->ThreadId()
1146    uxth   w3, w3                     // zero top 16 bits
1147    cbnz   w3, .Lslow_unlock          // do lock word and self thread id's match?
1148    mov    x3, x1                     // copy lock word to detect transition to unlocked
1149    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED  // zero the read barrier bits
1150    cmp    w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
1151    bpl    .Lrecursive_thin_unlock
1152    // transition to unlocked
1153    mov    x3, x1
1154    and    w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK  // w3: zero except for the preserved read barrier bits
1155    dmb    ish                        // full (LoadStore|StoreStore) memory barrier
1156#ifndef USE_READ_BARRIER
1157    str    w3, [x4]
1158#else
1159    stxr   w2, w3, [x4]               // Need to use atomic instructions for read barrier
1160    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
1161#endif
1162    ret
1163.Lrecursive_thin_unlock:  // w1: original lock word
1164    sub    w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // decrement count
1165#ifndef USE_READ_BARRIER
1166    str    w1, [x4]
1167#else
1168    stxr   w2, w1, [x4]               // Need to use atomic instructions for read barrier
1169    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
1170#endif
1171    ret
1172.Lunlock_stxr_fail:
1173    b      .Lretry_unlock               // retry
1174.Lslow_unlock:
1175    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case exception allocation triggers GC
1176    mov    x1, xSELF                  // pass Thread::Current
1177    bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
1178    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1179    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1180END art_quick_unlock_object
1181
1182ENTRY art_quick_unlock_object_no_inline
1183    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case exception allocation triggers GC
1184    mov    x1, xSELF                  // pass Thread::Current
1185    bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
1186    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1187    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1188END art_quick_unlock_object_no_inline
1189
1190    /*
1191     * Entry from managed code that calls artIsAssignableFromCode and on failure calls
1192     * artThrowClassCastException.
1193     */
1194    .extern artThrowClassCastException
1195ENTRY art_quick_check_cast
1196    // Store arguments and link register
1197    // Stack needs to be 16B aligned on calls.
1198    stp x0, x1, [sp,#-32]!
1199    .cfi_adjust_cfa_offset 32
1200    .cfi_rel_offset x0, 0
1201    .cfi_rel_offset x1, 8
1202    str xLR, [sp, #24]
1203    .cfi_rel_offset x30, 24
1204
1205    // Call runtime code
1206    bl artIsAssignableFromCode
1207
1208    // Check for exception
1209    cbz x0, .Lthrow_class_cast_exception
1210
1211    // Restore and return
1212    ldr xLR, [sp, #24]
1213    .cfi_restore x30
1214    ldp x0, x1, [sp], #32
1215    .cfi_restore x0
1216    .cfi_restore x1
1217    .cfi_adjust_cfa_offset -32
1218    ret
1219
1220    .cfi_adjust_cfa_offset 32         // Reset unwind info so following code unwinds.
1221
1222.Lthrow_class_cast_exception:
1223    // Restore
1224    ldr xLR, [sp, #24]
1225    .cfi_restore x30
1226    ldp x0, x1, [sp], #32
1227    .cfi_restore x0
1228    .cfi_restore x1
1229    .cfi_adjust_cfa_offset -32
1230
1231    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
1232    mov x2, xSELF                     // pass Thread::Current
1233    b artThrowClassCastException      // (Class*, Class*, Thread*)
1234    brk 0                             // We should not return here...
1235END art_quick_check_cast
1236
1237// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
1238.macro POP_REG_NE xReg, offset, xExclude
1239    .ifnc \xReg, \xExclude
1240        ldr \xReg, [sp, #\offset]     // restore xReg
1241        .cfi_restore \xReg
1242    .endif
1243.endm
1244
1245    /*
1246     * Macro to insert read barrier, only used in art_quick_aput_obj.
1247     * xDest, wDest and xObj are registers, offset is a defined literal such as
1248     * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle
1249     * name mismatch between instructions. This macro uses the lower 32b of register when possible.
1250     * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
1251     */
1252.macro READ_BARRIER xDest, wDest, xObj, offset
1253#ifdef USE_READ_BARRIER
1254    // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
1255    stp x0, x1, [sp, #-48]!
1256    .cfi_adjust_cfa_offset 48
1257    .cfi_rel_offset x0, 0
1258    .cfi_rel_offset x1, 8
1259    stp x2, x3, [sp, #16]
1260    .cfi_rel_offset x2, 16
1261    .cfi_rel_offset x3, 24
1262    stp x4, xLR, [sp, #32]
1263    .cfi_rel_offset x4, 32
1264    .cfi_rel_offset x30, 40
1265
1266    // mov x0, \xRef                // pass ref in x0 (no-op for now since parameter ref is unused)
1267    .ifnc \xObj, x1
1268        mov x1, \xObj               // pass xObj
1269    .endif
1270    mov w2, #\offset                // pass offset
1271    bl artReadBarrierSlow           // artReadBarrierSlow(ref, xObj, offset)
1272    // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning.
1273    .ifnc \wDest, w0
1274        mov \wDest, w0              // save return value in wDest
1275    .endif
1276
1277    // Conditionally restore saved registers
1278    POP_REG_NE x0, 0, \xDest
1279    POP_REG_NE x1, 8, \xDest
1280    POP_REG_NE x2, 16, \xDest
1281    POP_REG_NE x3, 24, \xDest
1282    POP_REG_NE x4, 32, \xDest
1283    ldr xLR, [sp, #40]
1284    .cfi_restore x30
1285    add sp, sp, #48
1286    .cfi_adjust_cfa_offset -48
1287#else
1288    ldr \wDest, [\xObj, #\offset]   // Heap reference = 32b. This also zero-extends to \xDest.
1289    UNPOISON_HEAP_REF \wDest
1290#endif  // USE_READ_BARRIER
1291.endm
1292
1293    /*
1294     * Entry from managed code for array put operations of objects where the value being stored
1295     * needs to be checked for compatibility.
1296     * x0 = array, x1 = index, x2 = value
1297     *
1298     * Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We
1299     * assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by
1300     * using index-zero-extension in load/stores.
1301     *
1302     * Temporaries: x3, x4
1303     * TODO: x4 OK? ip seems wrong here.
1304     */
1305ENTRY art_quick_aput_obj_with_null_and_bound_check
1306    tst x0, x0
1307    bne art_quick_aput_obj_with_bound_check
1308    b art_quick_throw_null_pointer_exception
1309END art_quick_aput_obj_with_null_and_bound_check
1310
1311ENTRY art_quick_aput_obj_with_bound_check
1312    ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]
1313    cmp w3, w1
1314    bhi art_quick_aput_obj
1315    mov x0, x1
1316    mov x1, x3
1317    b art_quick_throw_array_bounds
1318END art_quick_aput_obj_with_bound_check
1319
1320#ifdef USE_READ_BARRIER
1321    .extern artReadBarrierSlow
1322#endif
1323ENTRY art_quick_aput_obj
1324    cbz x2, .Ldo_aput_null
1325    READ_BARRIER x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET     // Heap reference = 32b
1326                                                         // This also zero-extends to x3
1327    READ_BARRIER x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET     // Heap reference = 32b
1328                                                         // This also zero-extends to x4
1329    READ_BARRIER x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET // Heap reference = 32b
1330                                                         // This also zero-extends to x3
1331    cmp w3, w4  // value's type == array's component type - trivial assignability
1332    bne .Lcheck_assignability
1333.Ldo_aput:
1334    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1335                                                         // "Compress" = do nothing
1336    POISON_HEAP_REF w2
1337    str w2, [x3, x1, lsl #2]                             // Heap reference = 32b
1338    ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
1339    lsr x0, x0, #7
1340    strb w3, [x3, x0]
1341    ret
1342.Ldo_aput_null:
1343    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1344                                                         // "Compress" = do nothing
1345    str w2, [x3, x1, lsl #2]                             // Heap reference = 32b
1346    ret
1347.Lcheck_assignability:
1348    // Store arguments and link register
1349    stp x0, x1, [sp,#-32]!
1350    .cfi_adjust_cfa_offset 32
1351    .cfi_rel_offset x0, 0
1352    .cfi_rel_offset x1, 8
1353    stp x2, xLR, [sp, #16]
1354    .cfi_rel_offset x2, 16
1355    .cfi_rel_offset x30, 24
1356
1357    // Call runtime code
1358    mov x0, x3              // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
1359    mov x1, x4              // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
1360    bl artIsAssignableFromCode
1361
1362    // Check for exception
1363    cbz x0, .Lthrow_array_store_exception
1364
1365    // Restore
1366    ldp x2, x30, [sp, #16]
1367    .cfi_restore x2
1368    .cfi_restore x30
1369    ldp x0, x1, [sp], #32
1370    .cfi_restore x0
1371    .cfi_restore x1
1372    .cfi_adjust_cfa_offset -32
1373
1374    add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
1375                                                          // "Compress" = do nothing
1376    POISON_HEAP_REF w2
1377    str w2, [x3, x1, lsl #2]                              // Heap reference = 32b
1378    ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
1379    lsr x0, x0, #7
1380    strb w3, [x3, x0]
1381    ret
1382    .cfi_adjust_cfa_offset 32  // 4 restores after cbz for unwinding.
1383.Lthrow_array_store_exception:
1384    ldp x2, x30, [sp, #16]
1385    .cfi_restore x2
1386    .cfi_restore x30
1387    ldp x0, x1, [sp], #32
1388    .cfi_restore x0
1389    .cfi_restore x1
1390    .cfi_adjust_cfa_offset -32
1391
1392    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
1393    mov x1, x2                    // Pass value.
1394    mov x2, xSELF                 // Pass Thread::Current.
1395    b artThrowArrayStoreException // (Object*, Object*, Thread*).
1396    brk 0                         // Unreached.
1397END art_quick_aput_obj
1398
1399// Macro to facilitate adding new allocation entrypoints.
1400.macro ONE_ARG_DOWNCALL name, entrypoint, return
1401    .extern \entrypoint
1402ENTRY \name
1403    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1404    mov    x1, xSELF                  // pass Thread::Current
1405    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*)
1406    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1407    \return
1408END \name
1409.endm
1410
1411// Macro to facilitate adding new allocation entrypoints.
1412.macro TWO_ARG_DOWNCALL name, entrypoint, return
1413    .extern \entrypoint
1414ENTRY \name
1415    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1416    mov    x2, xSELF                  // pass Thread::Current
1417    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*)
1418    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1419    \return
1420END \name
1421.endm
1422
1423// Macro to facilitate adding new allocation entrypoints.
1424.macro THREE_ARG_DOWNCALL name, entrypoint, return
1425    .extern \entrypoint
1426ENTRY \name
1427    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1428    mov    x3, xSELF                  // pass Thread::Current
1429    bl     \entrypoint
1430    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1431    \return
1432END \name
1433.endm
1434
1435// Macro to facilitate adding new allocation entrypoints.
1436.macro FOUR_ARG_DOWNCALL name, entrypoint, return
1437    .extern \entrypoint
1438ENTRY \name
1439    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
1440    mov    x4, xSELF                  // pass Thread::Current
1441    bl     \entrypoint                //
1442    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1443    \return
1444    DELIVER_PENDING_EXCEPTION
1445END \name
1446.endm
1447
1448// Macros taking opportunity of code similarities for downcalls with referrer.
1449.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
1450    .extern \entrypoint
1451ENTRY \name
1452    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1453    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1454    mov    x2, xSELF                  // pass Thread::Current
1455    bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*, SP)
1456    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1457    \return
1458END \name
1459.endm
1460
1461.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
1462    .extern \entrypoint
1463ENTRY \name
1464    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1465    ldr    x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1466    mov    x3, xSELF                  // pass Thread::Current
1467    bl     \entrypoint
1468    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1469    \return
1470END \name
1471.endm
1472
1473.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
1474    .extern \entrypoint
1475ENTRY \name
1476    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1477    ldr    x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1478    mov    x4, xSELF                  // pass Thread::Current
1479    bl     \entrypoint
1480    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1481    \return
1482END \name
1483.endm
1484
1485.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1486    cbz w0, 1f                 // result zero branch over
1487    ret                        // return
14881:
1489    DELIVER_PENDING_EXCEPTION
1490.endm
1491
1492    /*
1493     * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
1494     * failure.
1495     */
1496TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1497
1498    /*
1499     * Entry from managed code when uninitialized static storage, this stub will run the class
1500     * initializer and deliver the exception on error. On success the static storage base is
1501     * returned.
1502     */
1503ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1504
1505ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1506ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1507
1508ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1509ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1510ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1511ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1512ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1513ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1514ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1515
1516TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1517TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1518TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1519TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1520TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1521TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1522TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
1523
1524TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1525TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1526TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1527TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1528
1529THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1530THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1531THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1532THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1533THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
1534
1535// This is separated out as the argument order is different.
1536    .extern artSet64StaticFromCode
1537ENTRY art_quick_set64_static
1538    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
1539    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
1540                                      // x2 contains the parameter
1541    mov    x3, xSELF                  // pass Thread::Current
1542    bl     artSet64StaticFromCode
1543    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1544    RETURN_IF_W0_IS_ZERO_OR_DELIVER
1545END art_quick_set64_static
1546
1547    /*
1548     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
1549     * exception on error. On success the String is returned. w0 holds the string index. The fast
1550     * path check for hit in strings cache has already been performed.
1551     */
1552ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1553
1554// Generate the allocation entrypoints for each allocator.
1555GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
1556
1557// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
1558ENTRY art_quick_alloc_object_rosalloc
1559    // Fast path rosalloc allocation.
1560    // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
1561    // x2-x7: free.
1562    ldr    x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
1563                                                              // Load the class (x2)
1564    ldr    w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
1565    cbz    x2, .Lart_quick_alloc_object_rosalloc_slow_path    // Check null class
1566                                                              // Check class status.
1567    ldr    w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
1568    cmp    x3, #MIRROR_CLASS_STATUS_INITIALIZED
1569    bne    .Lart_quick_alloc_object_rosalloc_slow_path
1570                                                              // Add a fake dependence from the
1571                                                              // following access flag and size
1572                                                              // loads to the status load.
1573                                                              // This is to prevent those loads
1574                                                              // from being reordered above the
1575                                                              // status load and reading wrong
1576                                                              // values (an alternative is to use
1577                                                              // a load-acquire for the status).
1578    eor    x3, x3, x3
1579    add    x2, x2, x3
1580                                                              // Check access flags has
1581                                                              // kAccClassIsFinalizable
1582    ldr    w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
1583    tst    x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
1584    bne    .Lart_quick_alloc_object_rosalloc_slow_path
1585    ldr    x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]  // Check if the thread local
1586                                                              // allocation stack has room.
1587                                                              // ldp won't work due to large offset.
1588    ldr    x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
1589    cmp    x3, x4
1590    bhs    .Lart_quick_alloc_object_rosalloc_slow_path
1591    ldr    w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET]         // Load the object size (x3)
1592    cmp    x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE        // Check if the size is for a thread
1593                                                              // local allocation
1594    bhs    .Lart_quick_alloc_object_rosalloc_slow_path
1595                                                              // Compute the rosalloc bracket index
1596                                                              // from the size.
1597                                                              // Align up the size by the rosalloc
1598                                                              // bracket quantum size and divide
1599                                                              // by the quantum size and subtract
1600                                                              // by 1. This code is a shorter but
1601                                                              // equivalent version.
1602    sub    x3, x3, #1
1603    lsr    x3, x3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
1604                                                              // Load the rosalloc run (x4)
1605    add    x4, xSELF, x3, lsl #POINTER_SIZE_SHIFT
1606    ldr    x4, [x4, #THREAD_ROSALLOC_RUNS_OFFSET]
1607                                                              // Load the free list head (x3). This
1608                                                              // will be the return val.
1609    ldr    x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
1610    cbz    x3, .Lart_quick_alloc_object_rosalloc_slow_path
1611    // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
1612    ldr    x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET]               // Load the next pointer of the head
1613                                                              // and update the list head with the
1614                                                              // next pointer.
1615    str    x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
1616                                                              // Store the class pointer in the
1617                                                              // header. This also overwrites the
1618                                                              // next pointer. The offsets are
1619                                                              // asserted to match.
1620#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
1621#error "Class pointer needs to overwrite next pointer."
1622#endif
1623    POISON_HEAP_REF w2
1624    str    w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
1625                                                              // Fence. This is "ish" not "ishst" so
1626                                                              // that it also ensures ordering of
1627                                                              // the class status load with respect
1628                                                              // to later accesses to the class
1629                                                              // object. Alternatively we could use
1630                                                              // "ishst" if we use load-acquire for
1631                                                              // the class status load.)
1632                                                              // Needs to be done before pushing on
1633                                                              // allocation since Heap::VisitObjects
1634                                                              // relies on seeing the class pointer.
1635                                                              // b/28790624
1636    dmb    ish
1637                                                              // Push the new object onto the thread
1638                                                              // local allocation stack and
1639                                                              // increment the thread local
1640                                                              // allocation stack top.
1641    ldr    x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
1642    str    w3, [x1], #COMPRESSED_REFERENCE_SIZE               // (Increment x1 as a side effect.)
1643    str    x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
1644                                                              // Decrement the size of the free list
1645    ldr    w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
1646    sub    x1, x1, #1
1647                                                              // TODO: consider combining this store
1648                                                              // and the list head store above using
1649                                                              // strd.
1650    str    w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
1651
1652    mov    x0, x3                                             // Set the return value and return.
1653    ret
1654.Lart_quick_alloc_object_rosalloc_slow_path:
1655    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME      // save callee saves in case of GC
1656    mov    x2, xSELF                       // pass Thread::Current
1657    bl     artAllocObjectFromCodeRosAlloc  // (uint32_t type_idx, Method* method, Thread*)
1658    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1659    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1660END art_quick_alloc_object_rosalloc
1661
1662// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
1663//
1664// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
1665// x3-x7: free.
1666// Need to preserve x0 and x1 to the slow path.
1667.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
1668    cbz    x2, \slowPathLabel                                 // Check null class
1669                                                              // Check class status.
1670    ldr    w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
1671    cmp    x3, #MIRROR_CLASS_STATUS_INITIALIZED
1672    bne    \slowPathLabel
1673                                                              // Add a fake dependence from the
1674                                                              // following access flag and size
1675                                                              // loads to the status load.
1676                                                              // This is to prevent those loads
1677                                                              // from being reordered above the
1678                                                              // status load and reading wrong
1679                                                              // values (an alternative is to use
1680                                                              // a load-acquire for the status).
1681    eor    x3, x3, x3
1682    add    x2, x2, x3
1683                                                              // Check access flags has
1684                                                              // kAccClassIsFinalizable.
1685    ldr    w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
1686    tbnz   x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT, \slowPathLabel
1687                                                              // Load thread_local_pos (x4) and
1688                                                              // thread_local_end (x5).
1689    ldr    x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
1690    ldr    x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
1691    sub    x6, x5, x4                                         // Compute the remaining buf size.
1692    ldr    w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET]         // Load the object size (x7).
1693    cmp    x7, x6                                             // Check if it fits. OK to do this
1694                                                              // before rounding up the object size
1695                                                              // assuming the buf size alignment.
1696    bhi    \slowPathLabel
1697    // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
1698                                                              // Round up the object size by the
1699                                                              // object alignment. (addr + 7) & ~7.
1700    add    x7, x7, #OBJECT_ALIGNMENT_MASK
1701    and    x7, x7, #OBJECT_ALIGNMENT_MASK_TOGGLED
1702                                                              // Move old thread_local_pos to x0
1703                                                              // for the return value.
1704    mov    x0, x4
1705    add    x5, x0, x7
1706    str    x5, [xSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
1707    ldr    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
1708    add    x5, x5, #1
1709    str    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
1710    POISON_HEAP_REF w2
1711    str    w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
1712                                                              // Fence. This is "ish" not "ishst" so
1713                                                              // that the code after this allocation
1714                                                              // site will see the right values in
1715                                                              // the fields of the class.
1716                                                              // Alternatively we could use "ishst"
1717                                                              // if we use load-acquire for the
1718                                                              // class status load.)
1719    dmb    ish
1720    ret
1721.endm
1722
1723// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
1724ENTRY art_quick_alloc_object_tlab
1725    // Fast path tlab allocation.
1726    // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
1727    // x2-x7: free.
1728#if defined(USE_READ_BARRIER)
1729    mvn    x0, xzr                                            // Read barrier not supported here.
1730    ret                                                       // Return -1.
1731#endif
1732    ldr    x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
1733                                                              // Load the class (x2)
1734    ldr    w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
1735    ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
1736.Lart_quick_alloc_object_tlab_slow_path:
1737    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    // Save callee saves in case of GC.
1738    mov    x2, xSELF                     // Pass Thread::Current.
1739    bl     artAllocObjectFromCodeTLAB    // (uint32_t type_idx, Method* method, Thread*)
1740    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1741    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1742END art_quick_alloc_object_tlab
1743
1744// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
1745ENTRY art_quick_alloc_object_region_tlab
1746    // Fast path region tlab allocation.
1747    // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
1748    // x2-x7: free.
1749#if !defined(USE_READ_BARRIER)
1750    mvn    x0, xzr                                            // Read barrier must be enabled here.
1751    ret                                                       // Return -1.
1752#endif
1753    ldr    x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
1754                                                              // Load the class (x2)
1755    ldr    w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
1756                                                              // Read barrier for class load.
1757    ldr    w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
1758    cbnz   x3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
1759.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
1760    ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
1761.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
1762                                                              // The read barrier slow path. Mark
1763                                                              // the class.
1764    stp    x0, x1, [sp, #-32]!                                // Save registers (x0, x1, lr).
1765    str    xLR, [sp, #16]                                     // Align sp by 16 bytes.
1766    mov    x0, x2                                             // Pass the class as the first param.
1767    bl     artReadBarrierMark
1768    mov    x2, x0                                             // Get the (marked) class back.
1769    ldp    x0, x1, [sp, #0]                                   // Restore registers.
1770    ldr    xLR, [sp, #16]
1771    add    sp, sp, #32
1772    b      .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
1773.Lart_quick_alloc_object_region_tlab_slow_path:
1774    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // Save callee saves in case of GC.
1775    mov    x2, xSELF                           // Pass Thread::Current.
1776    bl     artAllocObjectFromCodeRegionTLAB    // (uint32_t type_idx, Method* method, Thread*)
1777    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
1778    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
1779END art_quick_alloc_object_region_tlab
1780
1781    /*
1782     * Called by managed code when the thread has been asked to suspend.
1783     */
1784    .extern artTestSuspendFromCode
1785ENTRY art_quick_test_suspend
1786    ldrh   w0, [xSELF, #THREAD_FLAGS_OFFSET]  // get xSELF->state_and_flags.as_struct.flags
1787    cbnz   w0, .Lneed_suspend                 // check flags == 0
1788    ret                                       // return if flags == 0
1789.Lneed_suspend:
1790    mov    x0, xSELF
1791    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // save callee saves for stack crawl
1792    bl     artTestSuspendFromCode             // (Thread*)
1793    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
1794END art_quick_test_suspend
1795
1796ENTRY art_quick_implicit_suspend
1797    mov    x0, xSELF
1798    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // save callee saves for stack crawl
1799    bl     artTestSuspendFromCode             // (Thread*)
1800    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
1801END art_quick_implicit_suspend
1802
1803     /*
1804     * Called by managed code that is attempting to call a method on a proxy class. On entry
1805     * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
1806     * method agrees with a ref and args callee save frame.
1807     */
1808     .extern artQuickProxyInvokeHandler
1809ENTRY art_quick_proxy_invoke_handler
1810    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
1811    mov     x2, xSELF                   // pass Thread::Current
1812    mov     x3, sp                      // pass SP
1813    bl      artQuickProxyInvokeHandler  // (Method* proxy method, receiver, Thread*, SP)
1814    ldr     x2, [xSELF, THREAD_EXCEPTION_OFFSET]
1815    cbnz    x2, .Lexception_in_proxy    // success if no exception is pending
1816    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
1817    fmov    d0, x0                      // Store result in d0 in case it was float or double
1818    ret                                 // return on success
1819.Lexception_in_proxy:
1820    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1821    DELIVER_PENDING_EXCEPTION
1822END art_quick_proxy_invoke_handler
1823
1824    /*
1825     * Called to resolve an imt conflict.
1826     * x0 is the conflict ArtMethod.
1827     * xIP1 is a hidden argument that holds the target interface method's dex method index.
1828     *
1829     * Note that this stub writes to xIP0, xIP1, and x0.
1830     */
1831    .extern artInvokeInterfaceTrampoline
1832ENTRY art_quick_imt_conflict_trampoline
1833    ldr xIP0, [sp, #0]  // Load referrer
1834    ldr xIP0, [xIP0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_64]   // Load dex cache methods array
1835    ldr xIP0, [xIP0, xIP1, lsl #POINTER_SIZE_SHIFT]  // Load interface method
1836    ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64]  // Load ImtConflictTable
1837    ldr x0, [xIP1]  // Load first entry in ImtConflictTable.
1838.Limt_table_iterate:
1839    cmp x0, xIP0
1840    // Branch if found. Benchmarks have shown doing a branch here is better.
1841    beq .Limt_table_found
1842    // If the entry is null, the interface method is not in the ImtConflictTable.
1843    cbz x0, .Lconflict_trampoline
1844    // Iterate over the entries of the ImtConflictTable.
1845    ldr x0, [xIP1, #(2 * __SIZEOF_POINTER__)]!
1846    b .Limt_table_iterate
1847.Limt_table_found:
1848    // We successfully hit an entry in the table. Load the target method
1849    // and jump to it.
1850    ldr x0, [xIP1, #__SIZEOF_POINTER__]
1851    ldr xIP0, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
1852    br xIP0
1853.Lconflict_trampoline:
1854    // Call the runtime stub to populate the ImtConflictTable and jump to the
1855    // resolved method.
1856    INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
1857END art_quick_imt_conflict_trampoline
1858
1859ENTRY art_quick_resolution_trampoline
1860    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1861    mov x2, xSELF
1862    mov x3, sp
1863    bl artQuickResolutionTrampoline  // (called, receiver, Thread*, SP)
1864    cbz x0, 1f
1865    mov xIP0, x0            // Remember returned code pointer in xIP0.
1866    ldr x0, [sp, #0]        // artQuickResolutionTrampoline puts called method in *SP.
1867    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1868    br xIP0
18691:
1870    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
1871    DELIVER_PENDING_EXCEPTION
1872END art_quick_resolution_trampoline
1873
1874/*
1875 * Generic JNI frame layout:
1876 *
1877 * #-------------------#
1878 * |                   |
1879 * | caller method...  |
1880 * #-------------------#    <--- SP on entry
1881 * | Return X30/LR     |
1882 * | X29/FP            |    callee save
1883 * | X28               |    callee save
1884 * | X27               |    callee save
1885 * | X26               |    callee save
1886 * | X25               |    callee save
1887 * | X24               |    callee save
1888 * | X23               |    callee save
1889 * | X22               |    callee save
1890 * | X21               |    callee save
1891 * | X20               |    callee save
1892 * | X19               |    callee save
1893 * | X7                |    arg7
1894 * | X6                |    arg6
1895 * | X5                |    arg5
1896 * | X4                |    arg4
1897 * | X3                |    arg3
1898 * | X2                |    arg2
1899 * | X1                |    arg1
1900 * | D7                |    float arg 8
1901 * | D6                |    float arg 7
1902 * | D5                |    float arg 6
1903 * | D4                |    float arg 5
1904 * | D3                |    float arg 4
1905 * | D2                |    float arg 3
1906 * | D1                |    float arg 2
1907 * | D0                |    float arg 1
1908 * | Method*           | <- X0
1909 * #-------------------#
1910 * | local ref cookie  | // 4B
1911 * | handle scope size | // 4B
1912 * #-------------------#
1913 * | JNI Call Stack    |
1914 * #-------------------#    <--- SP on native call
1915 * |                   |
1916 * | Stack for Regs    |    The trampoline assembly will pop these values
1917 * |                   |    into registers for native call
1918 * #-------------------#
1919 * | Native code ptr   |
1920 * #-------------------#
1921 * | Free scratch      |
1922 * #-------------------#
1923 * | Ptr to (1)        |    <--- SP
1924 * #-------------------#
1925 */
1926    /*
1927     * Called to do a generic JNI down-call
1928     */
1929ENTRY art_quick_generic_jni_trampoline
1930    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
1931
1932    // Save SP , so we can have static CFI info.
1933    mov x28, sp
1934    .cfi_def_cfa_register x28
1935
1936    // This looks the same, but is different: this will be updated to point to the bottom
1937    // of the frame when the handle scope is inserted.
1938    mov xFP, sp
1939
1940    mov xIP0, #5120
1941    sub sp, sp, xIP0
1942
1943    // prepare for artQuickGenericJniTrampoline call
1944    // (Thread*,  SP)
1945    //    x0      x1   <= C calling convention
1946    //   xSELF    xFP  <= where they are
1947
1948    mov x0, xSELF   // Thread*
1949    mov x1, xFP
1950    bl artQuickGenericJniTrampoline  // (Thread*, sp)
1951
1952    // The C call will have registered the complete save-frame on success.
1953    // The result of the call is:
1954    // x0: pointer to native code, 0 on error.
1955    // x1: pointer to the bottom of the used area of the alloca, can restore stack till there.
1956
1957    // Check for error = 0.
1958    cbz x0, .Lexception_in_native
1959
1960    // Release part of the alloca.
1961    mov sp, x1
1962
1963    // Save the code pointer
1964    mov xIP0, x0
1965
1966    // Load parameters from frame into registers.
1967    // TODO Check with artQuickGenericJniTrampoline.
1968    //      Also, check again APPCS64 - the stack arguments are interleaved.
1969    ldp x0, x1, [sp]
1970    ldp x2, x3, [sp, #16]
1971    ldp x4, x5, [sp, #32]
1972    ldp x6, x7, [sp, #48]
1973
1974    ldp d0, d1, [sp, #64]
1975    ldp d2, d3, [sp, #80]
1976    ldp d4, d5, [sp, #96]
1977    ldp d6, d7, [sp, #112]
1978
1979    add sp, sp, #128
1980
1981    blr xIP0        // native call.
1982
1983    // result sign extension is handled in C code
1984    // prepare for artQuickGenericJniEndTrampoline call
1985    // (Thread*, result, result_f)
1986    //    x0       x1       x2        <= C calling convention
1987    mov x1, x0      // Result (from saved).
1988    mov x0, xSELF   // Thread register.
1989    fmov x2, d0     // d0 will contain floating point result, but needs to go into x2
1990
1991    bl artQuickGenericJniEndTrampoline
1992
1993    // Pending exceptions possible.
1994    ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
1995    cbnz x2, .Lexception_in_native
1996
1997    // Tear down the alloca.
1998    mov sp, x28
1999    .cfi_def_cfa_register sp
2000
2001    // Tear down the callee-save frame.
2002    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
2003
2004    // store into fpr, for when it's a fpr return...
2005    fmov d0, x0
2006    ret
2007
2008.Lexception_in_native:
2009    // Move to x1 then sp to please assembler.
2010    ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
2011    mov sp, x1
2012    .cfi_def_cfa_register sp
2013    # This will create a new save-all frame, required by the runtime.
2014    DELIVER_PENDING_EXCEPTION
2015END art_quick_generic_jni_trampoline
2016
2017/*
2018 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those
2019 * of a quick call:
2020 * x0 = method being called/to bridge to.
2021 * x1..x7, d0..d7 = arguments to that method.
2022 */
2023ENTRY art_quick_to_interpreter_bridge
2024    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME   // Set up frame and save arguments.
2025
2026    //  x0 will contain mirror::ArtMethod* method.
2027    mov x1, xSELF                          // How to get Thread::Current() ???
2028    mov x2, sp
2029
2030    // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
2031    //                                      mirror::ArtMethod** sp)
2032    bl   artQuickToInterpreterBridge
2033
2034    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // TODO: no need to restore arguments in this case.
2035
2036    fmov d0, x0
2037
2038    RETURN_OR_DELIVER_PENDING_EXCEPTION
2039END art_quick_to_interpreter_bridge
2040
2041
2042//
2043// Instrumentation-related stubs
2044//
2045    .extern artInstrumentationMethodEntryFromCode
2046ENTRY art_quick_instrumentation_entry
2047    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
2048
2049    mov   x20, x0             // Preserve method reference in a callee-save.
2050
2051    mov   x2, xSELF
2052    mov   x3, xLR
2053    bl    artInstrumentationMethodEntryFromCode  // (Method*, Object*, Thread*, LR)
2054
2055    mov   xIP0, x0            // x0 = result of call.
2056    mov   x0, x20             // Reload method reference.
2057
2058    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME  // Note: will restore xSELF
2059    adr   xLR, art_quick_instrumentation_exit
2060    br    xIP0                // Tail-call method with lr set to art_quick_instrumentation_exit.
2061END art_quick_instrumentation_entry
2062
2063    .extern artInstrumentationMethodExitFromCode
2064ENTRY art_quick_instrumentation_exit
2065    mov   xLR, #0             // Clobber LR for later checks.
2066
2067    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
2068
2069    // We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then
2070    // we would need to fully restore it. As there are a lot of callee-save registers, it seems
2071    // easier to have an extra small stack area.
2072
2073    str x0, [sp, #-16]!       // Save integer result.
2074    .cfi_adjust_cfa_offset 16
2075    str d0,  [sp, #8]         // Save floating-point result.
2076
2077    add   x1, sp, #16         // Pass SP.
2078    mov   x2, x0              // Pass integer result.
2079    fmov  x3, d0              // Pass floating-point result.
2080    mov   x0, xSELF           // Pass Thread.
2081    bl   artInstrumentationMethodExitFromCode    // (Thread*, SP, gpr_res, fpr_res)
2082
2083    mov   xIP0, x0            // Return address from instrumentation call.
2084    mov   xLR, x1             // r1 is holding link register if we're to bounce to deoptimize
2085
2086    ldr   d0, [sp, #8]        // Restore floating-point result.
2087    ldr   x0, [sp], 16        // Restore integer result, and drop stack area.
2088    .cfi_adjust_cfa_offset 16
2089
2090    POP_REFS_ONLY_CALLEE_SAVE_FRAME
2091
2092    br    xIP0                // Tail-call out.
2093END art_quick_instrumentation_exit
2094
2095    /*
2096     * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
2097     * will long jump to the upcall with a special exception of -1.
2098     */
2099    .extern artDeoptimize
2100ENTRY art_quick_deoptimize
2101    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
2102    mov    x0, xSELF          // Pass thread.
2103    bl     artDeoptimize      // artDeoptimize(Thread*)
2104    brk 0
2105END art_quick_deoptimize
2106
2107    /*
2108     * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
2109     * will long jump to the upcall with a special exception of -1.
2110     */
2111    .extern artDeoptimizeFromCompiledCode
2112ENTRY art_quick_deoptimize_from_compiled_code
2113    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
2114    mov    x0, xSELF                      // Pass thread.
2115    bl     artDeoptimizeFromCompiledCode  // artDeoptimizeFromCompiledCode(Thread*)
2116    brk 0
2117END art_quick_deoptimize_from_compiled_code
2118
2119
2120    /*
2121     * String's indexOf.
2122     *
2123     * TODO: Not very optimized.
2124     * On entry:
2125     *    x0:   string object (known non-null)
2126     *    w1:   char to match (known <= 0xFFFF)
2127     *    w2:   Starting offset in string data
2128     */
2129ENTRY art_quick_indexof
2130    ldr   w3, [x0, #MIRROR_STRING_COUNT_OFFSET]
2131    add   x0, x0, #MIRROR_STRING_VALUE_OFFSET
2132
2133    /* Clamp start to [0..count] */
2134    cmp   w2, #0
2135    csel  w2, wzr, w2, lt
2136    cmp   w2, w3
2137    csel  w2, w3, w2, gt
2138
2139    /* Save a copy to compute result */
2140    mov   x5, x0
2141
2142    /* Build pointer to start of data to compare and pre-bias */
2143    add   x0, x0, x2, lsl #1
2144    sub   x0, x0, #2
2145
2146    /* Compute iteration count */
2147    sub   w2, w3, w2
2148
2149    /*
2150     * At this point we have:
2151     *  x0: start of the data to test
2152     *  w1: char to compare
2153     *  w2: iteration count
2154     *  x5: original start of string data
2155     */
2156
2157    subs  w2, w2, #4
2158    b.lt  .Lindexof_remainder
2159
2160.Lindexof_loop4:
2161    ldrh  w6, [x0, #2]!
2162    ldrh  w7, [x0, #2]!
2163    ldrh  wIP0, [x0, #2]!
2164    ldrh  wIP1, [x0, #2]!
2165    cmp   w6, w1
2166    b.eq  .Lmatch_0
2167    cmp   w7, w1
2168    b.eq  .Lmatch_1
2169    cmp   wIP0, w1
2170    b.eq  .Lmatch_2
2171    cmp   wIP1, w1
2172    b.eq  .Lmatch_3
2173    subs  w2, w2, #4
2174    b.ge  .Lindexof_loop4
2175
2176.Lindexof_remainder:
2177    adds  w2, w2, #4
2178    b.eq  .Lindexof_nomatch
2179
2180.Lindexof_loop1:
2181    ldrh  w6, [x0, #2]!
2182    cmp   w6, w1
2183    b.eq  .Lmatch_3
2184    subs  w2, w2, #1
2185    b.ne  .Lindexof_loop1
2186
2187.Lindexof_nomatch:
2188    mov   x0, #-1
2189    ret
2190
2191.Lmatch_0:
2192    sub   x0, x0, #6
2193    sub   x0, x0, x5
2194    asr   x0, x0, #1
2195    ret
2196.Lmatch_1:
2197    sub   x0, x0, #4
2198    sub   x0, x0, x5
2199    asr   x0, x0, #1
2200    ret
2201.Lmatch_2:
2202    sub   x0, x0, #2
2203    sub   x0, x0, x5
2204    asr   x0, x0, #1
2205    ret
2206.Lmatch_3:
2207    sub   x0, x0, x5
2208    asr   x0, x0, #1
2209    ret
2210END art_quick_indexof
2211
2212   /*
2213     * String's compareTo.
2214     *
2215     * TODO: Not very optimized.
2216     *
2217     * On entry:
2218     *    x0:   this object pointer
2219     *    x1:   comp object pointer
2220     *
2221     */
2222    .extern __memcmp16
2223ENTRY art_quick_string_compareto
2224    mov    x2, x0         // x0 is return, use x2 for first input.
2225    sub    x0, x2, x1     // Same string object?
2226    cbnz   x0,1f
2227    ret
22281:                        // Different string objects.
2229
2230    ldr    w4, [x2, #MIRROR_STRING_COUNT_OFFSET]
2231    ldr    w3, [x1, #MIRROR_STRING_COUNT_OFFSET]
2232    add    x2, x2, #MIRROR_STRING_VALUE_OFFSET
2233    add    x1, x1, #MIRROR_STRING_VALUE_OFFSET
2234
2235    /*
2236     * Now:           Data*  Count
2237     *    first arg    x2      w4
2238     *   second arg    x1      w3
2239     */
2240
2241    // x0 := str1.length(w4) - str2.length(w3). ldr zero-extended w3/w4 into x3/x4.
2242    subs x0, x4, x3
2243    // Min(count1, count2) into w3.
2244    csel x3, x3, x4, ge
2245
2246    // TODO: Tune this value.
2247    // Check for long string, do memcmp16 for them.
2248    cmp w3, #28  // Constant from arm32.
2249    bgt .Ldo_memcmp16
2250
2251    /*
2252     * Now:
2253     *   x2: *first string data
2254     *   x1: *second string data
2255     *   w3: iteration count
2256     *   x0: return value if comparison equal
2257     *   x4, x5, x6, x7: free
2258     */
2259
2260    // Do a simple unrolled loop.
2261.Lloop:
2262    // At least two more elements?
2263    subs w3, w3, #2
2264    b.lt .Lremainder_or_done
2265
2266    ldrh w4, [x2], #2
2267    ldrh w5, [x1], #2
2268
2269    ldrh w6, [x2], #2
2270    ldrh w7, [x1], #2
2271
2272    subs w4, w4, w5
2273    b.ne .Lw4_result
2274
2275    subs w6, w6, w7
2276    b.ne .Lw6_result
2277
2278    b .Lloop
2279
2280.Lremainder_or_done:
2281    adds w3, w3, #1
2282    b.eq .Lremainder
2283    ret
2284
2285.Lremainder:
2286    ldrh w4, [x2], #2
2287    ldrh w5, [x1], #2
2288    subs w4, w4, w5
2289    b.ne .Lw4_result
2290    ret
2291
2292// Result is in w4
2293.Lw4_result:
2294    sxtw x0, w4
2295    ret
2296
2297// Result is in w6
2298.Lw6_result:
2299    sxtw x0, w6
2300    ret
2301
2302.Ldo_memcmp16:
2303    mov x14, x0                  // Save x0 and LR. __memcmp16 does not use these temps.
2304    mov x15, xLR                 //                 TODO: Codify and check that?
2305
2306    mov x0, x2
2307    uxtw x2, w3
2308    bl __memcmp16
2309
2310    mov xLR, x15                 // Restore LR.
2311
2312    cmp x0, #0                   // Check the memcmp difference.
2313    csel x0, x0, x14, ne         // x0 := x0 != 0 ? x14(prev x0=length diff) : x1.
2314    ret
2315END art_quick_string_compareto
2316