quick_entrypoints_arm64.S revision 05846475c8d48ce191dcd333c76d5ccc17aea9dd
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "asm_support_arm64.S" 18 19#include "arch/quick_alloc_entrypoints.S" 20 21 22.macro INCREASE_FRAME frame_adjustment 23 sub sp, sp, #(\frame_adjustment) 24 .cfi_adjust_cfa_offset (\frame_adjustment) 25.endm 26 27.macro DECREASE_FRAME frame_adjustment 28 add sp, sp, #(\frame_adjustment) 29 .cfi_adjust_cfa_offset -(\frame_adjustment) 30.endm 31 32.macro SAVE_REG reg, offset 33 str \reg, [sp, #(\offset)] 34 .cfi_rel_offset \reg, (\offset) 35.endm 36 37.macro RESTORE_REG reg, offset 38 ldr \reg, [sp, #(\offset)] 39 .cfi_restore \reg 40.endm 41 42.macro SAVE_TWO_REGS reg1, reg2, offset 43 stp \reg1, \reg2, [sp, #(\offset)] 44 .cfi_rel_offset \reg1, (\offset) 45 .cfi_rel_offset \reg2, (\offset) + 8 46.endm 47 48.macro RESTORE_TWO_REGS reg1, reg2, offset 49 ldp \reg1, \reg2, [sp, #(\offset)] 50 .cfi_restore \reg1 51 .cfi_restore \reg2 52.endm 53 54.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment 55 stp \reg1, \reg2, [sp, #-(\frame_adjustment)]! 56 .cfi_adjust_cfa_offset (\frame_adjustment) 57 .cfi_rel_offset \reg1, 0 58 .cfi_rel_offset \reg2, 8 59.endm 60 61.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment 62 ldp \reg1, \reg2, [sp], #(\frame_adjustment) 63 .cfi_restore \reg1 64 .cfi_restore \reg2 65 .cfi_adjust_cfa_offset -(\frame_adjustment) 66.endm 67 68 /* 69 * Macro that sets up the callee save frame to conform with 70 * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves) 71 */ 72.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 73 // art::Runtime** xIP0 = &art::Runtime::instance_ 74 adrp xIP0, :got:_ZN3art7Runtime9instance_E 75 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 76 77 // Our registers aren't intermixed - just spill in order. 78 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 79 80 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves]; 81 ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET] 82 83 INCREASE_FRAME 176 84 85 // Ugly compile-time check, but we only have the preprocessor. 86#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 176) 87#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM64) size not as expected." 88#endif 89 90 // Stack alignment filler [sp, #8]. 91 // FP callee-saves. 92 stp d8, d9, [sp, #16] 93 stp d10, d11, [sp, #32] 94 stp d12, d13, [sp, #48] 95 stp d14, d15, [sp, #64] 96 97 // GP callee-saves 98 SAVE_TWO_REGS x19, x20, 80 99 SAVE_TWO_REGS x21, x22, 96 100 SAVE_TWO_REGS x23, x24, 112 101 SAVE_TWO_REGS x25, x26, 128 102 SAVE_TWO_REGS x27, x28, 144 103 SAVE_TWO_REGS x29, xLR, 160 104 105 // Store ArtMethod* Runtime::callee_save_methods_[kSaveAllCalleeSaves]. 106 str xIP0, [sp] 107 // Place sp in Thread::Current()->top_quick_frame. 108 mov xIP0, sp 109 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 110.endm 111 112 /* 113 * Macro that sets up the callee save frame to conform with 114 * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). 115 */ 116.macro SETUP_SAVE_REFS_ONLY_FRAME 117 // art::Runtime** xIP0 = &art::Runtime::instance_ 118 adrp xIP0, :got:_ZN3art7Runtime9instance_E 119 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 120 121 // Our registers aren't intermixed - just spill in order. 122 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 123 124 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly]; 125 ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET] 126 127 INCREASE_FRAME 96 128 129 // Ugly compile-time check, but we only have the preprocessor. 130#if (FRAME_SIZE_SAVE_REFS_ONLY != 96) 131#error "FRAME_SIZE_SAVE_REFS_ONLY(ARM64) size not as expected." 132#endif 133 134 // GP callee-saves. 135 // x20 paired with ArtMethod* - see below. 136 SAVE_TWO_REGS x21, x22, 16 137 SAVE_TWO_REGS x23, x24, 32 138 SAVE_TWO_REGS x25, x26, 48 139 SAVE_TWO_REGS x27, x28, 64 140 SAVE_TWO_REGS x29, xLR, 80 141 142 // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsOnly]. 143 stp xIP0, x20, [sp] 144 .cfi_rel_offset x20, 8 145 146 // Place sp in Thread::Current()->top_quick_frame. 147 mov xIP0, sp 148 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 149.endm 150 151// TODO: Probably no need to restore registers preserved by aapcs64. 152.macro RESTORE_SAVE_REFS_ONLY_FRAME 153 // Callee-saves. 154 RESTORE_REG x20, 8 155 RESTORE_TWO_REGS x21, x22, 16 156 RESTORE_TWO_REGS x23, x24, 32 157 RESTORE_TWO_REGS x25, x26, 48 158 RESTORE_TWO_REGS x27, x28, 64 159 RESTORE_TWO_REGS x29, xLR, 80 160 161 DECREASE_FRAME 96 162.endm 163 164.macro POP_SAVE_REFS_ONLY_FRAME 165 DECREASE_FRAME 96 166.endm 167 168.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN 169 RESTORE_SAVE_REFS_ONLY_FRAME 170 ret 171.endm 172 173 174.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 175 INCREASE_FRAME 224 176 177 // Ugly compile-time check, but we only have the preprocessor. 178#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 224) 179#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM64) size not as expected." 180#endif 181 182 // Stack alignment filler [sp, #8]. 183 // FP args. 184 stp d0, d1, [sp, #16] 185 stp d2, d3, [sp, #32] 186 stp d4, d5, [sp, #48] 187 stp d6, d7, [sp, #64] 188 189 // Core args. 190 SAVE_TWO_REGS x1, x2, 80 191 SAVE_TWO_REGS x3, x4, 96 192 SAVE_TWO_REGS x5, x6, 112 193 194 // x7, Callee-saves. 195 SAVE_TWO_REGS x7, x20, 128 196 SAVE_TWO_REGS x21, x22, 144 197 SAVE_TWO_REGS x23, x24, 160 198 SAVE_TWO_REGS x25, x26, 176 199 SAVE_TWO_REGS x27, x28, 192 200 201 // x29(callee-save) and LR. 202 SAVE_TWO_REGS x29, xLR, 208 203 204.endm 205 206 /* 207 * Macro that sets up the callee save frame to conform with 208 * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). 209 * 210 * TODO This is probably too conservative - saving FP & LR. 211 */ 212.macro SETUP_SAVE_REFS_AND_ARGS_FRAME 213 // art::Runtime** xIP0 = &art::Runtime::instance_ 214 adrp xIP0, :got:_ZN3art7Runtime9instance_E 215 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 216 217 // Our registers aren't intermixed - just spill in order. 218 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 219 220 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefAndArgs]; 221 ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET] 222 223 SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 224 225 str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsAndArgs]. 226 // Place sp in Thread::Current()->top_quick_frame. 227 mov xIP0, sp 228 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 229.endm 230 231.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 232 SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 233 str x0, [sp, #0] // Store ArtMethod* to bottom of stack. 234 // Place sp in Thread::Current()->top_quick_frame. 235 mov xIP0, sp 236 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 237.endm 238 239// TODO: Probably no need to restore registers preserved by aapcs64. 240.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME 241 // FP args. 242 ldp d0, d1, [sp, #16] 243 ldp d2, d3, [sp, #32] 244 ldp d4, d5, [sp, #48] 245 ldp d6, d7, [sp, #64] 246 247 // Core args. 248 RESTORE_TWO_REGS x1, x2, 80 249 RESTORE_TWO_REGS x3, x4, 96 250 RESTORE_TWO_REGS x5, x6, 112 251 252 // x7, Callee-saves. 253 RESTORE_TWO_REGS x7, x20, 128 254 RESTORE_TWO_REGS x21, x22, 144 255 RESTORE_TWO_REGS x23, x24, 160 256 RESTORE_TWO_REGS x25, x26, 176 257 RESTORE_TWO_REGS x27, x28, 192 258 259 // x29(callee-save) and LR. 260 RESTORE_TWO_REGS x29, xLR, 208 261 262 DECREASE_FRAME 224 263.endm 264 265 /* 266 * Macro that sets up the callee save frame to conform with 267 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 268 * when the SP has already been decremented by FRAME_SIZE_SAVE_EVERYTHING 269 * and saving registers x29 and LR is handled elsewhere. 270 */ 271.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR 272 // Ugly compile-time check, but we only have the preprocessor. 273#if (FRAME_SIZE_SAVE_EVERYTHING != 512) 274#error "FRAME_SIZE_SAVE_EVERYTHING(ARM64) size not as expected." 275#endif 276 277 // Save FP registers. 278 // For better performance, store d0 and d31 separately, so that all STPs are 16-byte aligned. 279 str d0, [sp, #8] 280 stp d1, d2, [sp, #16] 281 stp d3, d4, [sp, #32] 282 stp d5, d6, [sp, #48] 283 stp d7, d8, [sp, #64] 284 stp d9, d10, [sp, #80] 285 stp d11, d12, [sp, #96] 286 stp d13, d14, [sp, #112] 287 stp d15, d16, [sp, #128] 288 stp d17, d18, [sp, #144] 289 stp d19, d20, [sp, #160] 290 stp d21, d22, [sp, #176] 291 stp d23, d24, [sp, #192] 292 stp d25, d26, [sp, #208] 293 stp d27, d28, [sp, #224] 294 stp d29, d30, [sp, #240] 295 str d31, [sp, #256] 296 297 // Save core registers. 298 SAVE_REG x0, 264 299 SAVE_TWO_REGS x1, x2, 272 300 SAVE_TWO_REGS x3, x4, 288 301 SAVE_TWO_REGS x5, x6, 304 302 SAVE_TWO_REGS x7, x8, 320 303 SAVE_TWO_REGS x9, x10, 336 304 SAVE_TWO_REGS x11, x12, 352 305 SAVE_TWO_REGS x13, x14, 368 306 SAVE_TWO_REGS x15, x16, 384 307 SAVE_TWO_REGS x17, x18, 400 308 SAVE_TWO_REGS x19, x20, 416 309 SAVE_TWO_REGS x21, x22, 432 310 SAVE_TWO_REGS x23, x24, 448 311 SAVE_TWO_REGS x25, x26, 464 312 SAVE_TWO_REGS x27, x28, 480 313 314 // art::Runtime** xIP0 = &art::Runtime::instance_ 315 adrp xIP0, :got:_ZN3art7Runtime9instance_E 316 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 317 318 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 319 320 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveEverything]; 321 ldr xIP0, [xIP0, RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET] 322 323 // Store ArtMethod* Runtime::callee_save_methods_[kSaveEverything]. 324 str xIP0, [sp] 325 // Place sp in Thread::Current()->top_quick_frame. 326 mov xIP0, sp 327 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 328.endm 329 330 /* 331 * Macro that sets up the callee save frame to conform with 332 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 333 */ 334.macro SETUP_SAVE_EVERYTHING_FRAME 335 INCREASE_FRAME 512 336 SAVE_TWO_REGS x29, xLR, 496 337 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR 338.endm 339 340.macro RESTORE_SAVE_EVERYTHING_FRAME 341 // Restore FP registers. 342 // For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned. 343 ldr d0, [sp, #8] 344 ldp d1, d2, [sp, #16] 345 ldp d3, d4, [sp, #32] 346 ldp d5, d6, [sp, #48] 347 ldp d7, d8, [sp, #64] 348 ldp d9, d10, [sp, #80] 349 ldp d11, d12, [sp, #96] 350 ldp d13, d14, [sp, #112] 351 ldp d15, d16, [sp, #128] 352 ldp d17, d18, [sp, #144] 353 ldp d19, d20, [sp, #160] 354 ldp d21, d22, [sp, #176] 355 ldp d23, d24, [sp, #192] 356 ldp d25, d26, [sp, #208] 357 ldp d27, d28, [sp, #224] 358 ldp d29, d30, [sp, #240] 359 ldr d31, [sp, #256] 360 361 // Restore core registers. 362 RESTORE_REG x0, 264 363 RESTORE_TWO_REGS x1, x2, 272 364 RESTORE_TWO_REGS x3, x4, 288 365 RESTORE_TWO_REGS x5, x6, 304 366 RESTORE_TWO_REGS x7, x8, 320 367 RESTORE_TWO_REGS x9, x10, 336 368 RESTORE_TWO_REGS x11, x12, 352 369 RESTORE_TWO_REGS x13, x14, 368 370 RESTORE_TWO_REGS x15, x16, 384 371 RESTORE_TWO_REGS x17, x18, 400 372 RESTORE_TWO_REGS x19, x20, 416 373 RESTORE_TWO_REGS x21, x22, 432 374 RESTORE_TWO_REGS x23, x24, 448 375 RESTORE_TWO_REGS x25, x26, 464 376 RESTORE_TWO_REGS x27, x28, 480 377 RESTORE_TWO_REGS x29, xLR, 496 378 379 DECREASE_FRAME 512 380.endm 381 382.macro RETURN_IF_RESULT_IS_ZERO 383 cbnz x0, 1f // result non-zero branch over 384 ret // return 3851: 386.endm 387 388.macro RETURN_IF_RESULT_IS_NON_ZERO 389 cbz x0, 1f // result zero branch over 390 ret // return 3911: 392.endm 393 394 /* 395 * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending 396 * exception is Thread::Current()->exception_ 397 */ 398.macro DELIVER_PENDING_EXCEPTION 399 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 400 mov x0, xSELF 401 402 // Point of no return. 403 b artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*) 404 brk 0 // Unreached 405.endm 406 407.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg 408 ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field. 409 cbnz \reg, 1f 410 ret 4111: 412 DELIVER_PENDING_EXCEPTION 413.endm 414 415.macro RETURN_OR_DELIVER_PENDING_EXCEPTION 416 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0 417.endm 418 419// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register. 420.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 421 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1 422.endm 423 424.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER 425 cbnz w0, 1f // result non-zero branch over 426 ret // return 4271: 428 DELIVER_PENDING_EXCEPTION 429.endm 430 431.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 432 .extern \cxx_name 433ENTRY \c_name 434 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 435 mov x0, xSELF // pass Thread::Current 436 b \cxx_name // \cxx_name(Thread*) 437END \c_name 438.endm 439 440.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name 441 .extern \cxx_name 442ENTRY \c_name 443 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context. 444 mov x1, xSELF // pass Thread::Current. 445 b \cxx_name // \cxx_name(arg, Thread*). 446 brk 0 447END \c_name 448.endm 449 450.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 451 .extern \cxx_name 452ENTRY \c_name 453 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 454 mov x2, xSELF // pass Thread::Current 455 b \cxx_name // \cxx_name(arg1, arg2, Thread*) 456 brk 0 457END \c_name 458.endm 459 460 /* 461 * Called by managed code, saves callee saves and then calls artThrowException 462 * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. 463 */ 464ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode 465 466 /* 467 * Called by managed code to create and deliver a NullPointerException. 468 */ 469NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode 470 471 /* 472 * Call installed by a signal handler to create and deliver a NullPointerException. 473 */ 474 .extern art_quick_throw_null_pointer_exception_from_signal 475ENTRY art_quick_throw_null_pointer_exception_from_signal 476 // The fault handler pushes the gc map address, i.e. "return address", to stack 477 // and passes the fault address in LR. So we need to set up the CFI info accordingly. 478 .cfi_def_cfa_offset __SIZEOF_POINTER__ 479 .cfi_rel_offset lr, 0 480 // Save all registers as basis for long jump context. 481 INCREASE_FRAME (FRAME_SIZE_SAVE_EVERYTHING - __SIZEOF_POINTER__) 482 SAVE_REG x29, (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__) // LR already saved. 483 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR 484 mov x0, lr // pass the fault address stored in LR by the fault handler. 485 mov x1, xSELF // pass Thread::Current. 486 // TODO: Change other throwing entrypoints to use BL instead of B. http://b/31468464 487 bl artThrowNullPointerExceptionFromSignal // (arg, Thread*). 488 brk 0 489END art_quick_throw_null_pointer_exception_from_signal 490 491 /* 492 * Called by managed code to create and deliver an ArithmeticException. 493 */ 494NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode 495 496 /* 497 * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds 498 * index, arg2 holds limit. 499 */ 500TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode 501 502 /* 503 * Called by managed code to create and deliver a StringIndexOutOfBoundsException 504 * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. 505 */ 506TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode 507 508 /* 509 * Called by managed code to create and deliver a StackOverflowError. 510 */ 511NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode 512 513 /* 514 * All generated callsites for interface invokes and invocation slow paths will load arguments 515 * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain 516 * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. 517 * NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1. 518 * 519 * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting 520 * of the target Method* in x0 and method->code_ in x1. 521 * 522 * If unsuccessful, the helper will return null/????. There will be a pending exception in the 523 * thread and we branch to another stub to deliver it. 524 * 525 * On success this wrapper will restore arguments and *jump* to the target, leaving the lr 526 * pointing back to the original caller. 527 * 528 * Adapted from ARM32 code. 529 * 530 * Clobbers xIP0. 531 */ 532.macro INVOKE_TRAMPOLINE_BODY cxx_name 533 .extern \cxx_name 534 SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves in case allocation triggers GC 535 // Helper signature is always 536 // (method_idx, *this_object, *caller_method, *self, sp) 537 538 mov x2, xSELF // pass Thread::Current 539 mov x3, sp 540 bl \cxx_name // (method_idx, this, Thread*, SP) 541 mov xIP0, x1 // save Method*->code_ 542 RESTORE_SAVE_REFS_AND_ARGS_FRAME 543 cbz x0, 1f // did we find the target? if not go to exception delivery 544 br xIP0 // tail call to target 5451: 546 DELIVER_PENDING_EXCEPTION 547.endm 548.macro INVOKE_TRAMPOLINE c_name, cxx_name 549ENTRY \c_name 550 INVOKE_TRAMPOLINE_BODY \cxx_name 551END \c_name 552.endm 553 554INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck 555 556INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck 557INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck 558INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck 559INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck 560 561 562.macro INVOKE_STUB_CREATE_FRAME 563 564SAVE_SIZE=15*8 // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved. 565SAVE_SIZE_AND_METHOD=SAVE_SIZE+8 566 567 568 mov x9, sp // Save stack pointer. 569 .cfi_register sp,x9 570 571 add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame. 572 sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args 573 and x10, x10, # ~0xf // Enforce 16 byte stack alignment. 574 mov sp, x10 // Set new SP. 575 576 sub x10, x9, #SAVE_SIZE // Calculate new FP (later). Done here as we must move SP 577 .cfi_def_cfa_register x10 // before this. 578 .cfi_adjust_cfa_offset SAVE_SIZE 579 580 str x28, [x10, #112] 581 .cfi_rel_offset x28, 112 582 583 stp x26, x27, [x10, #96] 584 .cfi_rel_offset x26, 96 585 .cfi_rel_offset x27, 104 586 587 stp x24, x25, [x10, #80] 588 .cfi_rel_offset x24, 80 589 .cfi_rel_offset x25, 88 590 591 stp x22, x23, [x10, #64] 592 .cfi_rel_offset x22, 64 593 .cfi_rel_offset x23, 72 594 595 stp x20, x21, [x10, #48] 596 .cfi_rel_offset x20, 48 597 .cfi_rel_offset x21, 56 598 599 stp x9, x19, [x10, #32] // Save old stack pointer and x19. 600 .cfi_rel_offset sp, 32 601 .cfi_rel_offset x19, 40 602 603 stp x4, x5, [x10, #16] // Save result and shorty addresses. 604 .cfi_rel_offset x4, 16 605 .cfi_rel_offset x5, 24 606 607 stp xFP, xLR, [x10] // Store LR & FP. 608 .cfi_rel_offset x29, 0 609 .cfi_rel_offset x30, 8 610 611 mov xFP, x10 // Use xFP now, as it's callee-saved. 612 .cfi_def_cfa_register x29 613 mov xSELF, x3 // Move thread pointer into SELF register. 614 615 // Copy arguments into stack frame. 616 // Use simple copy routine for now. 617 // 4 bytes per slot. 618 // X1 - source address 619 // W2 - args length 620 // X9 - destination address. 621 // W10 - temporary 622 add x9, sp, #8 // Destination address is bottom of stack + null. 623 624 // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler 625 // does not have unique-id variables. 6261: 627 cmp w2, #0 628 beq 2f 629 sub w2, w2, #4 // Need 65536 bytes of range. 630 ldr w10, [x1, x2] 631 str w10, [x9, x2] 632 633 b 1b 634 6352: 636 // Store null into ArtMethod* at bottom of frame. 637 str xzr, [sp] 638.endm 639 640.macro INVOKE_STUB_CALL_AND_RETURN 641 642 // load method-> METHOD_QUICK_CODE_OFFSET 643 ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] 644 // Branch to method. 645 blr x9 646 647 // Restore return value address and shorty address. 648 ldp x4,x5, [xFP, #16] 649 .cfi_restore x4 650 .cfi_restore x5 651 652 ldr x28, [xFP, #112] 653 .cfi_restore x28 654 655 ldp x26, x27, [xFP, #96] 656 .cfi_restore x26 657 .cfi_restore x27 658 659 ldp x24, x25, [xFP, #80] 660 .cfi_restore x24 661 .cfi_restore x25 662 663 ldp x22, x23, [xFP, #64] 664 .cfi_restore x22 665 .cfi_restore x23 666 667 ldp x20, x21, [xFP, #48] 668 .cfi_restore x20 669 .cfi_restore x21 670 671 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. 672 ldrb w10, [x5] 673 674 // Check the return type and store the correct register into the jvalue in memory. 675 // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables. 676 677 // Don't set anything for a void type. 678 cmp w10, #'V' 679 beq 3f 680 681 // Is it a double? 682 cmp w10, #'D' 683 bne 1f 684 str d0, [x4] 685 b 3f 686 6871: // Is it a float? 688 cmp w10, #'F' 689 bne 2f 690 str s0, [x4] 691 b 3f 692 6932: // Just store x0. Doesn't matter if it is 64 or 32 bits. 694 str x0, [x4] 695 6963: // Finish up. 697 ldp x2, x19, [xFP, #32] // Restore stack pointer and x19. 698 .cfi_restore x19 699 mov sp, x2 700 .cfi_restore sp 701 702 ldp xFP, xLR, [xFP] // Restore old frame pointer and link register. 703 .cfi_restore x29 704 .cfi_restore x30 705 706 ret 707 708.endm 709 710 711/* 712 * extern"C" void art_quick_invoke_stub(ArtMethod *method, x0 713 * uint32_t *args, x1 714 * uint32_t argsize, w2 715 * Thread *self, x3 716 * JValue *result, x4 717 * char *shorty); x5 718 * +----------------------+ 719 * | | 720 * | C/C++ frame | 721 * | LR'' | 722 * | FP'' | <- SP' 723 * +----------------------+ 724 * +----------------------+ 725 * | x28 | <- TODO: Remove callee-saves. 726 * | : | 727 * | x19 | 728 * | SP' | 729 * | X5 | 730 * | X4 | Saved registers 731 * | LR' | 732 * | FP' | <- FP 733 * +----------------------+ 734 * | uint32_t out[n-1] | 735 * | : : | Outs 736 * | uint32_t out[0] | 737 * | ArtMethod* | <- SP value=null 738 * +----------------------+ 739 * 740 * Outgoing registers: 741 * x0 - Method* 742 * x1-x7 - integer parameters. 743 * d0-d7 - Floating point parameters. 744 * xSELF = self 745 * SP = & of ArtMethod* 746 * x1 = "this" pointer. 747 * 748 */ 749ENTRY art_quick_invoke_stub 750 // Spill registers as per AACPS64 calling convention. 751 INVOKE_STUB_CREATE_FRAME 752 753 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 754 // Parse the passed shorty to determine which register to load. 755 // Load addresses for routines that load WXSD registers. 756 adr x11, .LstoreW2 757 adr x12, .LstoreX2 758 adr x13, .LstoreS0 759 adr x14, .LstoreD0 760 761 // Initialize routine offsets to 0 for integers and floats. 762 // x8 for integers, x15 for floating point. 763 mov x8, #0 764 mov x15, #0 765 766 add x10, x5, #1 // Load shorty address, plus one to skip return value. 767 ldr w1, [x9],#4 // Load "this" parameter, and increment arg pointer. 768 769 // Loop to fill registers. 770.LfillRegisters: 771 ldrb w17, [x10], #1 // Load next character in signature, and increment. 772 cbz w17, .LcallFunction // Exit at end of signature. Shorty 0 terminated. 773 774 cmp w17, #'F' // is this a float? 775 bne .LisDouble 776 777 cmp x15, # 8*12 // Skip this load if all registers full. 778 beq .Ladvance4 779 780 add x17, x13, x15 // Calculate subroutine to jump to. 781 br x17 782 783.LisDouble: 784 cmp w17, #'D' // is this a double? 785 bne .LisLong 786 787 cmp x15, # 8*12 // Skip this load if all registers full. 788 beq .Ladvance8 789 790 add x17, x14, x15 // Calculate subroutine to jump to. 791 br x17 792 793.LisLong: 794 cmp w17, #'J' // is this a long? 795 bne .LisOther 796 797 cmp x8, # 6*12 // Skip this load if all registers full. 798 beq .Ladvance8 799 800 add x17, x12, x8 // Calculate subroutine to jump to. 801 br x17 802 803.LisOther: // Everything else takes one vReg. 804 cmp x8, # 6*12 // Skip this load if all registers full. 805 beq .Ladvance4 806 807 add x17, x11, x8 // Calculate subroutine to jump to. 808 br x17 809 810.Ladvance4: 811 add x9, x9, #4 812 b .LfillRegisters 813 814.Ladvance8: 815 add x9, x9, #8 816 b .LfillRegisters 817 818// Macro for loading a parameter into a register. 819// counter - the register with offset into these tables 820// size - the size of the register - 4 or 8 bytes. 821// register - the name of the register to be loaded. 822.macro LOADREG counter size register return 823 ldr \register , [x9], #\size 824 add \counter, \counter, 12 825 b \return 826.endm 827 828// Store ints. 829.LstoreW2: 830 LOADREG x8 4 w2 .LfillRegisters 831 LOADREG x8 4 w3 .LfillRegisters 832 LOADREG x8 4 w4 .LfillRegisters 833 LOADREG x8 4 w5 .LfillRegisters 834 LOADREG x8 4 w6 .LfillRegisters 835 LOADREG x8 4 w7 .LfillRegisters 836 837// Store longs. 838.LstoreX2: 839 LOADREG x8 8 x2 .LfillRegisters 840 LOADREG x8 8 x3 .LfillRegisters 841 LOADREG x8 8 x4 .LfillRegisters 842 LOADREG x8 8 x5 .LfillRegisters 843 LOADREG x8 8 x6 .LfillRegisters 844 LOADREG x8 8 x7 .LfillRegisters 845 846// Store singles. 847.LstoreS0: 848 LOADREG x15 4 s0 .LfillRegisters 849 LOADREG x15 4 s1 .LfillRegisters 850 LOADREG x15 4 s2 .LfillRegisters 851 LOADREG x15 4 s3 .LfillRegisters 852 LOADREG x15 4 s4 .LfillRegisters 853 LOADREG x15 4 s5 .LfillRegisters 854 LOADREG x15 4 s6 .LfillRegisters 855 LOADREG x15 4 s7 .LfillRegisters 856 857// Store doubles. 858.LstoreD0: 859 LOADREG x15 8 d0 .LfillRegisters 860 LOADREG x15 8 d1 .LfillRegisters 861 LOADREG x15 8 d2 .LfillRegisters 862 LOADREG x15 8 d3 .LfillRegisters 863 LOADREG x15 8 d4 .LfillRegisters 864 LOADREG x15 8 d5 .LfillRegisters 865 LOADREG x15 8 d6 .LfillRegisters 866 LOADREG x15 8 d7 .LfillRegisters 867 868 869.LcallFunction: 870 871 INVOKE_STUB_CALL_AND_RETURN 872 873END art_quick_invoke_stub 874 875/* extern"C" 876 * void art_quick_invoke_static_stub(ArtMethod *method, x0 877 * uint32_t *args, x1 878 * uint32_t argsize, w2 879 * Thread *self, x3 880 * JValue *result, x4 881 * char *shorty); x5 882 */ 883ENTRY art_quick_invoke_static_stub 884 // Spill registers as per AACPS64 calling convention. 885 INVOKE_STUB_CREATE_FRAME 886 887 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 888 // Parse the passed shorty to determine which register to load. 889 // Load addresses for routines that load WXSD registers. 890 adr x11, .LstoreW1_2 891 adr x12, .LstoreX1_2 892 adr x13, .LstoreS0_2 893 adr x14, .LstoreD0_2 894 895 // Initialize routine offsets to 0 for integers and floats. 896 // x8 for integers, x15 for floating point. 897 mov x8, #0 898 mov x15, #0 899 900 add x10, x5, #1 // Load shorty address, plus one to skip return value. 901 902 // Loop to fill registers. 903.LfillRegisters2: 904 ldrb w17, [x10], #1 // Load next character in signature, and increment. 905 cbz w17, .LcallFunction2 // Exit at end of signature. Shorty 0 terminated. 906 907 cmp w17, #'F' // is this a float? 908 bne .LisDouble2 909 910 cmp x15, # 8*12 // Skip this load if all registers full. 911 beq .Ladvance4_2 912 913 add x17, x13, x15 // Calculate subroutine to jump to. 914 br x17 915 916.LisDouble2: 917 cmp w17, #'D' // is this a double? 918 bne .LisLong2 919 920 cmp x15, # 8*12 // Skip this load if all registers full. 921 beq .Ladvance8_2 922 923 add x17, x14, x15 // Calculate subroutine to jump to. 924 br x17 925 926.LisLong2: 927 cmp w17, #'J' // is this a long? 928 bne .LisOther2 929 930 cmp x8, # 7*12 // Skip this load if all registers full. 931 beq .Ladvance8_2 932 933 add x17, x12, x8 // Calculate subroutine to jump to. 934 br x17 935 936.LisOther2: // Everything else takes one vReg. 937 cmp x8, # 7*12 // Skip this load if all registers full. 938 beq .Ladvance4_2 939 940 add x17, x11, x8 // Calculate subroutine to jump to. 941 br x17 942 943.Ladvance4_2: 944 add x9, x9, #4 945 b .LfillRegisters2 946 947.Ladvance8_2: 948 add x9, x9, #8 949 b .LfillRegisters2 950 951// Store ints. 952.LstoreW1_2: 953 LOADREG x8 4 w1 .LfillRegisters2 954 LOADREG x8 4 w2 .LfillRegisters2 955 LOADREG x8 4 w3 .LfillRegisters2 956 LOADREG x8 4 w4 .LfillRegisters2 957 LOADREG x8 4 w5 .LfillRegisters2 958 LOADREG x8 4 w6 .LfillRegisters2 959 LOADREG x8 4 w7 .LfillRegisters2 960 961// Store longs. 962.LstoreX1_2: 963 LOADREG x8 8 x1 .LfillRegisters2 964 LOADREG x8 8 x2 .LfillRegisters2 965 LOADREG x8 8 x3 .LfillRegisters2 966 LOADREG x8 8 x4 .LfillRegisters2 967 LOADREG x8 8 x5 .LfillRegisters2 968 LOADREG x8 8 x6 .LfillRegisters2 969 LOADREG x8 8 x7 .LfillRegisters2 970 971// Store singles. 972.LstoreS0_2: 973 LOADREG x15 4 s0 .LfillRegisters2 974 LOADREG x15 4 s1 .LfillRegisters2 975 LOADREG x15 4 s2 .LfillRegisters2 976 LOADREG x15 4 s3 .LfillRegisters2 977 LOADREG x15 4 s4 .LfillRegisters2 978 LOADREG x15 4 s5 .LfillRegisters2 979 LOADREG x15 4 s6 .LfillRegisters2 980 LOADREG x15 4 s7 .LfillRegisters2 981 982// Store doubles. 983.LstoreD0_2: 984 LOADREG x15 8 d0 .LfillRegisters2 985 LOADREG x15 8 d1 .LfillRegisters2 986 LOADREG x15 8 d2 .LfillRegisters2 987 LOADREG x15 8 d3 .LfillRegisters2 988 LOADREG x15 8 d4 .LfillRegisters2 989 LOADREG x15 8 d5 .LfillRegisters2 990 LOADREG x15 8 d6 .LfillRegisters2 991 LOADREG x15 8 d7 .LfillRegisters2 992 993 994.LcallFunction2: 995 996 INVOKE_STUB_CALL_AND_RETURN 997 998END art_quick_invoke_static_stub 999 1000 1001 1002/* extern"C" void art_quick_osr_stub(void** stack, x0 1003 * size_t stack_size_in_bytes, x1 1004 * const uin8_t* native_pc, x2 1005 * JValue *result, x3 1006 * char *shorty, x4 1007 * Thread *self) x5 1008 */ 1009ENTRY art_quick_osr_stub 1010SAVE_SIZE=15*8 // x3, x4, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved. 1011 mov x9, sp // Save stack pointer. 1012 .cfi_register sp,x9 1013 1014 sub x10, sp, # SAVE_SIZE 1015 and x10, x10, # ~0xf // Enforce 16 byte stack alignment. 1016 mov sp, x10 // Set new SP. 1017 1018 str x28, [sp, #112] 1019 stp x26, x27, [sp, #96] 1020 stp x24, x25, [sp, #80] 1021 stp x22, x23, [sp, #64] 1022 stp x20, x21, [sp, #48] 1023 stp x9, x19, [sp, #32] // Save old stack pointer and x19. 1024 stp x3, x4, [sp, #16] // Save result and shorty addresses. 1025 stp xFP, xLR, [sp] // Store LR & FP. 1026 mov xSELF, x5 // Move thread pointer into SELF register. 1027 1028 sub sp, sp, #16 1029 str xzr, [sp] // Store null for ArtMethod* slot 1030 // Branch to stub. 1031 bl .Losr_entry 1032 add sp, sp, #16 1033 1034 // Restore return value address and shorty address. 1035 ldp x3,x4, [sp, #16] 1036 ldr x28, [sp, #112] 1037 ldp x26, x27, [sp, #96] 1038 ldp x24, x25, [sp, #80] 1039 ldp x22, x23, [sp, #64] 1040 ldp x20, x21, [sp, #48] 1041 1042 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. 1043 ldrb w10, [x4] 1044 1045 // Check the return type and store the correct register into the jvalue in memory. 1046 1047 // Don't set anything for a void type. 1048 cmp w10, #'V' 1049 beq .Losr_exit 1050 1051 // Is it a double? 1052 cmp w10, #'D' 1053 bne .Lno_double 1054 str d0, [x3] 1055 b .Losr_exit 1056 1057.Lno_double: // Is it a float? 1058 cmp w10, #'F' 1059 bne .Lno_float 1060 str s0, [x3] 1061 b .Losr_exit 1062 1063.Lno_float: // Just store x0. Doesn't matter if it is 64 or 32 bits. 1064 str x0, [x3] 1065 1066.Losr_exit: // Finish up. 1067 ldp x2, x19, [sp, #32] // Restore stack pointer and x19. 1068 ldp xFP, xLR, [sp] // Restore old frame pointer and link register. 1069 mov sp, x2 1070 ret 1071 1072.Losr_entry: 1073 // Update stack pointer for the callee 1074 sub sp, sp, x1 1075 1076 // Update link register slot expected by the callee. 1077 sub w1, w1, #8 1078 str lr, [sp, x1] 1079 1080 // Copy arguments into stack frame. 1081 // Use simple copy routine for now. 1082 // 4 bytes per slot. 1083 // X0 - source address 1084 // W1 - args length 1085 // SP - destination address. 1086 // W10 - temporary 1087.Losr_loop_entry: 1088 cmp w1, #0 1089 beq .Losr_loop_exit 1090 sub w1, w1, #4 1091 ldr w10, [x0, x1] 1092 str w10, [sp, x1] 1093 b .Losr_loop_entry 1094 1095.Losr_loop_exit: 1096 // Branch to the OSR entry point. 1097 br x2 1098 1099END art_quick_osr_stub 1100 1101 /* 1102 * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_ 1103 */ 1104 1105ENTRY art_quick_do_long_jump 1106 // Load FPRs 1107 ldp d0, d1, [x1], #16 1108 ldp d2, d3, [x1], #16 1109 ldp d4, d5, [x1], #16 1110 ldp d6, d7, [x1], #16 1111 ldp d8, d9, [x1], #16 1112 ldp d10, d11, [x1], #16 1113 ldp d12, d13, [x1], #16 1114 ldp d14, d15, [x1], #16 1115 ldp d16, d17, [x1], #16 1116 ldp d18, d19, [x1], #16 1117 ldp d20, d21, [x1], #16 1118 ldp d22, d23, [x1], #16 1119 ldp d24, d25, [x1], #16 1120 ldp d26, d27, [x1], #16 1121 ldp d28, d29, [x1], #16 1122 ldp d30, d31, [x1] 1123 1124 // Load GPRs 1125 // TODO: lots of those are smashed, could optimize. 1126 add x0, x0, #30*8 1127 ldp x30, x1, [x0], #-16 // LR & SP 1128 ldp x28, x29, [x0], #-16 1129 ldp x26, x27, [x0], #-16 1130 ldp x24, x25, [x0], #-16 1131 ldp x22, x23, [x0], #-16 1132 ldp x20, x21, [x0], #-16 1133 ldp x18, x19, [x0], #-16 1134 ldp x16, x17, [x0], #-16 1135 ldp x14, x15, [x0], #-16 1136 ldp x12, x13, [x0], #-16 1137 ldp x10, x11, [x0], #-16 1138 ldp x8, x9, [x0], #-16 1139 ldp x6, x7, [x0], #-16 1140 ldp x4, x5, [x0], #-16 1141 ldp x2, x3, [x0], #-16 1142 mov sp, x1 1143 1144 // Need to load PC, it's at the end (after the space for the unused XZR). Use x1. 1145 ldr x1, [x0, #33*8] 1146 // And the value of x0. 1147 ldr x0, [x0] 1148 1149 br x1 1150END art_quick_do_long_jump 1151 1152 /* 1153 * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the 1154 * possibly null object to lock. 1155 * 1156 * Derived from arm32 code. 1157 */ 1158 .extern artLockObjectFromCode 1159ENTRY art_quick_lock_object 1160 cbz w0, .Lslow_lock 1161 add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore 1162.Lretry_lock: 1163 ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop? 1164 ldxr w1, [x4] 1165 mov x3, x1 1166 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1167 cbnz w3, .Lnot_unlocked // already thin locked 1168 // unlocked case - x1: original lock word that's zero except for the read barrier bits. 1169 orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits 1170 stxr w3, w2, [x4] 1171 cbnz w3, .Llock_stxr_fail // store failed, retry 1172 dmb ishld // full (LoadLoad|LoadStore) memory barrier 1173 ret 1174.Lnot_unlocked: // x1: original lock word 1175 lsr w3, w1, LOCK_WORD_STATE_SHIFT 1176 cbnz w3, .Lslow_lock // if either of the top two bits are set, go slow path 1177 eor w2, w1, w2 // lock_word.ThreadId() ^ self->ThreadId() 1178 uxth w2, w2 // zero top 16 bits 1179 cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock 1180 // else contention, go to slow path 1181 mov x3, x1 // copy the lock word to check count overflow. 1182 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits. 1183 add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow 1184 lsr w3, w2, #LOCK_WORD_GC_STATE_SHIFT // if the first gc state bit is set, we overflowed. 1185 cbnz w3, .Lslow_lock // if we overflow the count go slow path 1186 add w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count for real 1187 stxr w3, w2, [x4] 1188 cbnz w3, .Llock_stxr_fail // store failed, retry 1189 ret 1190.Llock_stxr_fail: 1191 b .Lretry_lock // retry 1192.Lslow_lock: 1193 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block 1194 mov x1, xSELF // pass Thread::Current 1195 bl artLockObjectFromCode // (Object* obj, Thread*) 1196 RESTORE_SAVE_REFS_ONLY_FRAME 1197 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1198END art_quick_lock_object 1199 1200ENTRY art_quick_lock_object_no_inline 1201 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block 1202 mov x1, xSELF // pass Thread::Current 1203 bl artLockObjectFromCode // (Object* obj, Thread*) 1204 RESTORE_SAVE_REFS_ONLY_FRAME 1205 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1206END art_quick_lock_object_no_inline 1207 1208 /* 1209 * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. 1210 * x0 holds the possibly null object to lock. 1211 * 1212 * Derived from arm32 code. 1213 */ 1214 .extern artUnlockObjectFromCode 1215ENTRY art_quick_unlock_object 1216 cbz x0, .Lslow_unlock 1217 add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore 1218.Lretry_unlock: 1219#ifndef USE_READ_BARRIER 1220 ldr w1, [x4] 1221#else 1222 ldxr w1, [x4] // Need to use atomic instructions for read barrier 1223#endif 1224 lsr w2, w1, LOCK_WORD_STATE_SHIFT 1225 cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path 1226 ldr w2, [xSELF, #THREAD_ID_OFFSET] 1227 mov x3, x1 // copy lock word to check thread id equality 1228 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1229 eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId() 1230 uxth w3, w3 // zero top 16 bits 1231 cbnz w3, .Lslow_unlock // do lock word and self thread id's match? 1232 mov x3, x1 // copy lock word to detect transition to unlocked 1233 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1234 cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE 1235 bpl .Lrecursive_thin_unlock 1236 // transition to unlocked 1237 mov x3, x1 1238 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits 1239 dmb ish // full (LoadStore|StoreStore) memory barrier 1240#ifndef USE_READ_BARRIER 1241 str w3, [x4] 1242#else 1243 stxr w2, w3, [x4] // Need to use atomic instructions for read barrier 1244 cbnz w2, .Lunlock_stxr_fail // store failed, retry 1245#endif 1246 ret 1247.Lrecursive_thin_unlock: // w1: original lock word 1248 sub w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count 1249#ifndef USE_READ_BARRIER 1250 str w1, [x4] 1251#else 1252 stxr w2, w1, [x4] // Need to use atomic instructions for read barrier 1253 cbnz w2, .Lunlock_stxr_fail // store failed, retry 1254#endif 1255 ret 1256.Lunlock_stxr_fail: 1257 b .Lretry_unlock // retry 1258.Lslow_unlock: 1259 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC 1260 mov x1, xSELF // pass Thread::Current 1261 bl artUnlockObjectFromCode // (Object* obj, Thread*) 1262 RESTORE_SAVE_REFS_ONLY_FRAME 1263 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1264END art_quick_unlock_object 1265 1266ENTRY art_quick_unlock_object_no_inline 1267 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC 1268 mov x1, xSELF // pass Thread::Current 1269 bl artUnlockObjectFromCode // (Object* obj, Thread*) 1270 RESTORE_SAVE_REFS_ONLY_FRAME 1271 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1272END art_quick_unlock_object_no_inline 1273 1274 /* 1275 * Entry from managed code that calls artIsAssignableFromCode and on failure calls 1276 * artThrowClassCastException. 1277 */ 1278 .extern artThrowClassCastException 1279ENTRY art_quick_check_cast 1280 // Store arguments and link register 1281 // Stack needs to be 16B aligned on calls. 1282 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 1283 SAVE_REG xLR, 24 1284 1285 // Call runtime code 1286 bl artIsAssignableFromCode 1287 1288 // Check for exception 1289 cbz x0, .Lthrow_class_cast_exception 1290 1291 // Restore and return 1292 .cfi_remember_state 1293 RESTORE_REG xLR, 24 1294 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1295 ret 1296 .cfi_restore_state // Reset unwind info so following code unwinds. 1297 1298.Lthrow_class_cast_exception: 1299 // Restore 1300 RESTORE_REG xLR, 24 1301 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1302 1303 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 1304 mov x2, xSELF // pass Thread::Current 1305 b artThrowClassCastException // (Class*, Class*, Thread*) 1306 brk 0 // We should not return here... 1307END art_quick_check_cast 1308 1309// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude. 1310.macro POP_REG_NE xReg, offset, xExclude 1311 .ifnc \xReg, \xExclude 1312 ldr \xReg, [sp, #\offset] // restore xReg 1313 .cfi_restore \xReg 1314 .endif 1315.endm 1316 1317// Restore xReg1's value from [sp, #offset] if xReg1 is not the same as xExclude. 1318// Restore xReg2's value from [sp, #(offset + 8)] if xReg2 is not the same as xExclude. 1319.macro POP_REGS_NE xReg1, xReg2, offset, xExclude 1320 .ifc \xReg1, \xExclude 1321 ldr \xReg2, [sp, #(\offset + 8)] // restore xReg2 1322 .else 1323 .ifc \xReg2, \xExclude 1324 ldr \xReg1, [sp, #\offset] // restore xReg1 1325 .else 1326 ldp \xReg1, \xReg2, [sp, #\offset] // restore xReg1 and xReg2 1327 .endif 1328 .endif 1329 .cfi_restore \xReg1 1330 .cfi_restore \xReg2 1331.endm 1332 1333 /* 1334 * Macro to insert read barrier, only used in art_quick_aput_obj. 1335 * xDest, wDest and xObj are registers, offset is a defined literal such as 1336 * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle 1337 * name mismatch between instructions. This macro uses the lower 32b of register when possible. 1338 * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. 1339 */ 1340.macro READ_BARRIER xDest, wDest, xObj, xTemp, wTemp, offset, number 1341#ifdef USE_READ_BARRIER 1342#ifdef USE_BAKER_READ_BARRIER 1343 ldr \wTemp, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1344 tbnz \wTemp, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, .Lrb_slowpath\number 1345 // False dependency to avoid needing load/load fence. 1346 add \xObj, \xObj, \xTemp, lsr #32 1347 ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. 1348 UNPOISON_HEAP_REF \wDest 1349 b .Lrb_exit\number 1350#endif 1351.Lrb_slowpath\number: 1352 // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned. 1353 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 48 1354 SAVE_TWO_REGS x2, x3, 16 1355 SAVE_TWO_REGS x4, xLR, 32 1356 1357 // mov x0, \xRef // pass ref in x0 (no-op for now since parameter ref is unused) 1358 .ifnc \xObj, x1 1359 mov x1, \xObj // pass xObj 1360 .endif 1361 mov w2, #\offset // pass offset 1362 bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset) 1363 // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning. 1364 .ifnc \wDest, w0 1365 mov \wDest, w0 // save return value in wDest 1366 .endif 1367 1368 // Conditionally restore saved registers 1369 POP_REG_NE x0, 0, \xDest 1370 POP_REG_NE x1, 8, \xDest 1371 POP_REG_NE x2, 16, \xDest 1372 POP_REG_NE x3, 24, \xDest 1373 POP_REG_NE x4, 32, \xDest 1374 RESTORE_REG xLR, 40 1375 DECREASE_FRAME 48 1376.Lrb_exit\number: 1377#else 1378 ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. 1379 UNPOISON_HEAP_REF \wDest 1380#endif // USE_READ_BARRIER 1381.endm 1382 1383 /* 1384 * Entry from managed code for array put operations of objects where the value being stored 1385 * needs to be checked for compatibility. 1386 * x0 = array, x1 = index, x2 = value 1387 * 1388 * Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We 1389 * assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by 1390 * using index-zero-extension in load/stores. 1391 * 1392 * Temporaries: x3, x4 1393 * TODO: x4 OK? ip seems wrong here. 1394 */ 1395ENTRY art_quick_aput_obj_with_null_and_bound_check 1396 tst x0, x0 1397 bne art_quick_aput_obj_with_bound_check 1398 b art_quick_throw_null_pointer_exception 1399END art_quick_aput_obj_with_null_and_bound_check 1400 1401ENTRY art_quick_aput_obj_with_bound_check 1402 ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] 1403 cmp w3, w1 1404 bhi art_quick_aput_obj 1405 mov x0, x1 1406 mov x1, x3 1407 b art_quick_throw_array_bounds 1408END art_quick_aput_obj_with_bound_check 1409 1410#ifdef USE_READ_BARRIER 1411 .extern artReadBarrierSlow 1412#endif 1413ENTRY art_quick_aput_obj 1414 cbz x2, .Ldo_aput_null 1415 READ_BARRIER x3, w3, x0, x3, w3, MIRROR_OBJECT_CLASS_OFFSET, 0 // Heap reference = 32b 1416 // This also zero-extends to x3 1417 READ_BARRIER x3, w3, x3, x4, w4, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, 1 // Heap reference = 32b 1418 // This also zero-extends to x3 1419 READ_BARRIER x4, w4, x2, x4, w4, MIRROR_OBJECT_CLASS_OFFSET, 2 // Heap reference = 32b 1420 // This also zero-extends to x4 1421 cmp w3, w4 // value's type == array's component type - trivial assignability 1422 bne .Lcheck_assignability 1423.Ldo_aput: 1424 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1425 // "Compress" = do nothing 1426 POISON_HEAP_REF w2 1427 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1428 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1429 lsr x0, x0, #7 1430 strb w3, [x3, x0] 1431 ret 1432.Ldo_aput_null: 1433 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1434 // "Compress" = do nothing 1435 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1436 ret 1437.Lcheck_assignability: 1438 // Store arguments and link register 1439 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 1440 SAVE_TWO_REGS x2, xLR, 16 1441 1442 // Call runtime code 1443 mov x0, x3 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1444 mov x1, x4 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1445 bl artIsAssignableFromCode 1446 1447 // Check for exception 1448 cbz x0, .Lthrow_array_store_exception 1449 1450 // Restore 1451 .cfi_remember_state 1452 RESTORE_TWO_REGS x2, xLR, 16 1453 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1454 1455 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1456 // "Compress" = do nothing 1457 POISON_HEAP_REF w2 1458 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1459 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1460 lsr x0, x0, #7 1461 strb w3, [x3, x0] 1462 ret 1463 .cfi_restore_state // Reset unwind info so following code unwinds. 1464.Lthrow_array_store_exception: 1465 RESTORE_TWO_REGS x2, xLR, 16 1466 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1467 1468 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 1469 mov x1, x2 // Pass value. 1470 mov x2, xSELF // Pass Thread::Current. 1471 b artThrowArrayStoreException // (Object*, Object*, Thread*). 1472 brk 0 // Unreached. 1473END art_quick_aput_obj 1474 1475// Macro to facilitate adding new allocation entrypoints. 1476.macro ONE_ARG_DOWNCALL name, entrypoint, return 1477 .extern \entrypoint 1478ENTRY \name 1479 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1480 mov x1, xSELF // pass Thread::Current 1481 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1482 RESTORE_SAVE_REFS_ONLY_FRAME 1483 \return 1484END \name 1485.endm 1486 1487// Macro to facilitate adding new allocation entrypoints. 1488.macro TWO_ARG_DOWNCALL name, entrypoint, return 1489 .extern \entrypoint 1490ENTRY \name 1491 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1492 mov x2, xSELF // pass Thread::Current 1493 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1494 RESTORE_SAVE_REFS_ONLY_FRAME 1495 \return 1496END \name 1497.endm 1498 1499// Macro to facilitate adding new allocation entrypoints. 1500.macro THREE_ARG_DOWNCALL name, entrypoint, return 1501 .extern \entrypoint 1502ENTRY \name 1503 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1504 mov x3, xSELF // pass Thread::Current 1505 bl \entrypoint 1506 RESTORE_SAVE_REFS_ONLY_FRAME 1507 \return 1508END \name 1509.endm 1510 1511// Macro to facilitate adding new allocation entrypoints. 1512.macro FOUR_ARG_DOWNCALL name, entrypoint, return 1513 .extern \entrypoint 1514ENTRY \name 1515 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1516 mov x4, xSELF // pass Thread::Current 1517 bl \entrypoint // 1518 RESTORE_SAVE_REFS_ONLY_FRAME 1519 \return 1520 DELIVER_PENDING_EXCEPTION 1521END \name 1522.endm 1523 1524// Macros taking opportunity of code similarities for downcalls with referrer. 1525.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return 1526 .extern \entrypoint 1527ENTRY \name 1528 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1529 ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1530 mov x2, xSELF // pass Thread::Current 1531 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP) 1532 RESTORE_SAVE_REFS_ONLY_FRAME 1533 \return 1534END \name 1535.endm 1536 1537.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return 1538 .extern \entrypoint 1539ENTRY \name 1540 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1541 ldr x2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1542 mov x3, xSELF // pass Thread::Current 1543 bl \entrypoint 1544 RESTORE_SAVE_REFS_ONLY_FRAME 1545 \return 1546END \name 1547.endm 1548 1549.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return 1550 .extern \entrypoint 1551ENTRY \name 1552 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1553 ldr x3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1554 mov x4, xSELF // pass Thread::Current 1555 bl \entrypoint 1556 RESTORE_SAVE_REFS_ONLY_FRAME 1557 \return 1558END \name 1559.endm 1560 1561.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1562 cbz w0, 1f // result zero branch over 1563 ret // return 15641: 1565 DELIVER_PENDING_EXCEPTION 1566.endm 1567 1568 /* 1569 * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on 1570 * failure. 1571 */ 1572TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1573 1574 /* 1575 * Entry from managed code when uninitialized static storage, this stub will run the class 1576 * initializer and deliver the exception on error. On success the static storage base is 1577 * returned. 1578 */ 1579ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1580 1581ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1582ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1583 1584ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1585ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1586ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1587ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1588ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1589ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1590ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1591 1592TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1593TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1594TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1595TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1596TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1597TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1598TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1599 1600TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1601TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1602TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1603TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1604 1605THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1606THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1607THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1608THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1609THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1610 1611// This is separated out as the argument order is different. 1612 .extern artSet64StaticFromCode 1613ENTRY art_quick_set64_static 1614 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1615 ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1616 // x2 contains the parameter 1617 mov x3, xSELF // pass Thread::Current 1618 bl artSet64StaticFromCode 1619 RESTORE_SAVE_REFS_ONLY_FRAME 1620 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1621END art_quick_set64_static 1622 1623 /* 1624 * Entry from managed code to resolve a string, this stub will 1625 * check the dex cache for a matching string (the fast path), and if not found, 1626 * it will allocate a String and deliver an exception on error. 1627 * On success the String is returned. R0 holds the string index. 1628 */ 1629 1630ENTRY art_quick_resolve_string 1631 ldr x1, [sp] // load referrer 1632 ldr w2, [x1, #ART_METHOD_DECLARING_CLASS_OFFSET] // load declaring class 1633 ldr x1, [x2, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] // load string dex cache 1634 ubfx x2, x0, #0, #STRING_DEX_CACHE_HASH_BITS // get masked string index into x2 1635 ldr x2, [x1, x2, lsl #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT] // load dex cache pair into x2 1636 cmp x0, x2, lsr #32 // compare against upper 32 bits 1637 bne .Lart_quick_resolve_string_slow_path 1638 ubfx x0, x2, #0, #32 // extract lower 32 bits into x0 1639#ifdef USE_READ_BARRIER 1640 // Most common case: GC is not marking. 1641 ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 1642 cbnz x3, .Lart_quick_resolve_string_marking 1643#endif 1644 ret 1645 1646// Slow path case, the index did not match. 1647.Lart_quick_resolve_string_slow_path: 1648 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1649 mov x1, xSELF // pass Thread::Current 1650 bl artResolveStringFromCode // (int32_t string_idx, Thread* self) 1651 RESTORE_SAVE_REFS_ONLY_FRAME 1652 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1653 1654// GC is marking case, need to check the mark bit. 1655.Lart_quick_resolve_string_marking: 1656 ldr x3, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1657 tbnz x3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_resolve_string_no_rb 1658 // Save LR so that we can return, also x1 for alignment purposes. 1659 SAVE_TWO_REGS_INCREASE_FRAME x1, xLR, 16 // Save x1, LR. 1660 bl artReadBarrierMark // Get the marked string back. 1661 RESTORE_TWO_REGS_DECREASE_FRAME x1, xLR, 16 // Restore registers. 1662.Lart_quick_resolve_string_no_rb: 1663 ret 1664 1665END art_quick_resolve_string 1666 1667// Generate the allocation entrypoints for each allocator. 1668GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS 1669// Comment out allocators that have arm64 specific asm. 1670// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm 1671// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) 1672// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) 1673GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1674// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB) implemented in asm 1675// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB) 1676GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1677GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB) 1678GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1679GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB) 1680GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB) 1681GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB) 1682 1683// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). 1684ENTRY art_quick_alloc_object_rosalloc 1685 // Fast path rosalloc allocation. 1686 // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1687 // x2-x7: free. 1688 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1689 // Load the class (x2) 1690 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1691 cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class 1692 ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local 1693 // allocation stack has room. 1694 // ldp won't work due to large offset. 1695 ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET] 1696 cmp x3, x4 1697 bhs .Lart_quick_alloc_object_rosalloc_slow_path 1698 ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3) 1699 cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread 1700 // local allocation. Also does the 1701 // finalizable and initialization 1702 // checks. 1703 bhs .Lart_quick_alloc_object_rosalloc_slow_path 1704 // Compute the rosalloc bracket index 1705 // from the size. Since the size is 1706 // already aligned we can combine the 1707 // two shifts together. 1708 add x4, xSELF, x3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT) 1709 // Subtract pointer size since ther 1710 // are no runs for 0 byte allocations 1711 // and the size is already aligned. 1712 ldr x4, [x4, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)] 1713 // Load the free list head (x3). This 1714 // will be the return val. 1715 ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1716 cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path 1717 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1718 ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head 1719 // and update the list head with the 1720 // next pointer. 1721 str x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1722 // Store the class pointer in the 1723 // header. This also overwrites the 1724 // next pointer. The offsets are 1725 // asserted to match. 1726#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET 1727#error "Class pointer needs to overwrite next pointer." 1728#endif 1729 POISON_HEAP_REF w2 1730 str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET] 1731 // Fence. This is "ish" not "ishst" so 1732 // that it also ensures ordering of 1733 // the object size load with respect 1734 // to later accesses to the class 1735 // object. Alternatively we could use 1736 // "ishst" if we use load-acquire for 1737 // the class status load. 1738 // Needs to be done before pushing on 1739 // allocation since Heap::VisitObjects 1740 // relies on seeing the class pointer. 1741 // b/28790624 1742 dmb ish 1743 // Push the new object onto the thread 1744 // local allocation stack and 1745 // increment the thread local 1746 // allocation stack top. 1747 ldr x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1748 str w3, [x1], #COMPRESSED_REFERENCE_SIZE // (Increment x1 as a side effect.) 1749 str x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1750 // Decrement the size of the free list 1751 ldr w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1752 sub x1, x1, #1 1753 // TODO: consider combining this store 1754 // and the list head store above using 1755 // strd. 1756 str w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1757 1758 mov x0, x3 // Set the return value and return. 1759 ret 1760.Lart_quick_alloc_object_rosalloc_slow_path: 1761 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1762 mov x2, xSELF // pass Thread::Current 1763 bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*) 1764 RESTORE_SAVE_REFS_ONLY_FRAME 1765 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1766END art_quick_alloc_object_rosalloc 1767 1768 1769// The common fast path code for art_quick_alloc_array_region_tlab. 1770.macro ALLOC_ARRAY_TLAB_FAST_PATH slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1771 // Check null class 1772 cbz \wClass, \slowPathLabel 1773 ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED \slowPathLabel, \xClass, \wClass, \xCount, \wCount, \xTemp0, \wTemp0, \xTemp1, \wTemp1, \xTemp2, \wTemp2 1774.endm 1775 1776// The common fast path code for art_quick_alloc_array_region_tlab. 1777.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1778 // Array classes are never finalizable or uninitialized, no need to check. 1779 ldr \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type 1780 UNPOISON_HEAP_REF \wTemp0 1781 ldr \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET] 1782 lsr \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16 1783 // bits. 1784 // xCount is holding a 32 bit value, 1785 // it can not overflow. 1786 lsl \xTemp1, \xCount, \xTemp0 // Calculate data size 1787 // Add array data offset and alignment. 1788 add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1789#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4 1790#error Long array data offset must be 4 greater than int array data offset. 1791#endif 1792 1793 add \xTemp0, \xTemp0, #1 // Add 4 to the length only if the 1794 // component size shift is 3 1795 // (for 64 bit alignment). 1796 and \xTemp0, \xTemp0, #4 1797 add \xTemp1, \xTemp1, \xTemp0 1798 and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask 1799 // (addr + 7) & ~7. The mask must 1800 // be 64 bits to keep high bits in 1801 // case of overflow. 1802 // Negative sized arrays are handled here since xCount holds a zero extended 32 bit value. 1803 // Negative ints become large 64 bit unsigned ints which will always be larger than max signed 1804 // 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int. 1805 cmp \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD // Possibly a large object, go slow 1806 bhs \slowPathLabel // path. 1807 1808 ldr \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Check tlab for space, note that 1809 // we use (end - begin) to handle 1810 // negative size arrays. It is 1811 // assumed that a negative size will 1812 // always be greater unsigned than 1813 // region size. 1814 ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET] 1815 sub \xTemp2, \xTemp2, \xTemp0 1816 cmp \xTemp1, \xTemp2 1817 bhi \slowPathLabel 1818 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1819 // Move old thread_local_pos to x0 1820 // for the return value. 1821 mov x0, \xTemp0 1822 add \xTemp0, \xTemp0, \xTemp1 1823 str \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1824 ldr \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. 1825 add \xTemp0, \xTemp0, #1 1826 str \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] 1827 POISON_HEAP_REF \wClass 1828 str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1829 str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length. 1830 // Fence. 1831 dmb ishst 1832 ret 1833.endm 1834 1835// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. 1836// 1837// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current 1838// x3-x7: free. 1839// Need to preserve x0 and x1 to the slow path. 1840.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel 1841 cbz x2, \slowPathLabel // Check null class 1842 ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel 1843.endm 1844 1845// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as 1846// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED. 1847.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel 1848 ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel 1849.endm 1850 1851.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel 1852 ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET] 1853 ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET] 1854 ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7). 1855 add x6, x4, x7 // Add object size to tlab pos. 1856 cmp x6, x5 // Check if it fits, overflow works 1857 // since the tlab pos and end are 32 1858 // bit values. 1859 bhi \slowPathLabel 1860 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1861 mov x0, x4 1862 str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1863 ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. 1864 add x5, x5, #1 1865 str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] 1866 POISON_HEAP_REF w2 1867 str w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1868 // Fence. This is "ish" not "ishst" so 1869 // that the code after this allocation 1870 // site will see the right values in 1871 // the fields of the class. 1872 // Alternatively we could use "ishst" 1873 // if we use load-acquire for the 1874 // object size load.) 1875 dmb ish 1876 ret 1877.endm 1878 1879// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). 1880ENTRY art_quick_alloc_object_tlab 1881 // Fast path tlab allocation. 1882 // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1883 // x2-x7: free. 1884#if defined(USE_READ_BARRIER) 1885 mvn x0, xzr // Read barrier not supported here. 1886 ret // Return -1. 1887#endif 1888 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1889 // Load the class (x2) 1890 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1891 ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path 1892.Lart_quick_alloc_object_tlab_slow_path: 1893 SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. 1894 mov x2, xSELF // Pass Thread::Current. 1895 bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*) 1896 RESTORE_SAVE_REFS_ONLY_FRAME 1897 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1898END art_quick_alloc_object_tlab 1899 1900// The common code for art_quick_alloc_object_*region_tlab 1901.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier 1902ENTRY \name 1903 // Fast path region tlab allocation. 1904 // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1905 // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index. 1906 // x2-x7: free. 1907#if !defined(USE_READ_BARRIER) 1908 mvn x0, xzr // Read barrier must be enabled here. 1909 ret // Return -1. 1910#endif 1911.if \is_resolved 1912 mov x2, x0 // class is actually stored in x0 already 1913.else 1914 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1915 // Load the class (x2) 1916 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1917 // If the class is null, go slow path. The check is required to read the lock word. 1918 cbz w2, .Lslow_path\name 1919.endif 1920.if \read_barrier 1921 // Most common case: GC is not marking. 1922 ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 1923 cbnz x3, .Lmarking\name 1924.endif 1925.Ldo_allocation\name: 1926 \fast_path .Lslow_path\name 1927.Lmarking\name: 1928.if \read_barrier 1929 // GC is marking, check the lock word of the class for the mark bit. 1930 // Class is not null, check mark bit in lock word. 1931 ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1932 // If the bit is not zero, do the allocation. 1933 tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name 1934 // The read barrier slow path. Mark 1935 // the class. 1936 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 // Save registers (x0, x1, lr). 1937 SAVE_REG xLR, 24 // Align sp by 16 bytes. 1938 mov x0, x2 // Pass the class as the first param. 1939 bl artReadBarrierMark 1940 mov x2, x0 // Get the (marked) class back. 1941 RESTORE_REG xLR, 24 1942 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 // Restore registers. 1943 b .Ldo_allocation\name 1944.endif 1945.Lslow_path\name: 1946 SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. 1947 mov x2, xSELF // Pass Thread::Current. 1948 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1949 RESTORE_SAVE_REFS_ONLY_FRAME 1950 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1951END \name 1952.endm 1953 1954// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB. 1955GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1 1956// No read barrier for the resolved or initialized cases since the caller is responsible for the 1957// read barrier due to the to-space invariant. 1958GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0 1959GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0 1960 1961// TODO: We could use this macro for the normal tlab allocator too. 1962 1963// The common code for art_quick_alloc_array_*region_tlab 1964.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path, is_resolved 1965ENTRY \name 1966 // Fast path array allocation for region tlab allocation. 1967 // x0: uint32_t type_idx 1968 // x1: int32_t component_count 1969 // x2: ArtMethod* method 1970 // x3-x7: free. 1971#if !defined(USE_READ_BARRIER) 1972 mvn x0, xzr // Read barrier must be enabled here. 1973 ret // Return -1. 1974#endif 1975.if \is_resolved 1976 mov x3, x0 1977 // If already resolved, class is stored in x0 1978.else 1979 ldr x3, [x2, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1980 // Load the class (x2) 1981 ldr w3, [x3, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1982.endif 1983 // Most common case: GC is not marking. 1984 ldr w4, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 1985 cbnz x4, .Lmarking\name 1986.Ldo_allocation\name: 1987 \fast_path .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6 1988.Lmarking\name: 1989 // GC is marking, check the lock word of the class for the mark bit. 1990 // If the class is null, go slow path. The check is required to read the lock word. 1991 cbz w3, .Lslow_path\name 1992 // Class is not null, check mark bit in lock word. 1993 ldr w4, [x3, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1994 // If the bit is not zero, do the allocation. 1995 tbnz w4, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name 1996 // The read barrier slow path. Mark 1997 // the class. 1998 stp x0, x1, [sp, #-32]! // Save registers (x0, x1, x2, lr). 1999 stp x2, xLR, [sp, #16] 2000 mov x0, x3 // Pass the class as the first param. 2001 bl artReadBarrierMark 2002 mov x3, x0 // Get the (marked) class back. 2003 ldp x2, xLR, [sp, #16] 2004 ldp x0, x1, [sp], #32 // Restore registers. 2005 b .Ldo_allocation\name 2006.Lslow_path\name: 2007 // x0: uint32_t type_idx / mirror::Class* klass (if resolved) 2008 // x1: int32_t component_count 2009 // x2: ArtMethod* method 2010 // x3: Thread* self 2011 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 2012 mov x3, xSELF // pass Thread::Current 2013 bl \entrypoint 2014 RESTORE_SAVE_REFS_ONLY_FRAME 2015 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 2016END \name 2017.endm 2018 2019GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_region_tlab, artAllocArrayFromCodeRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH, 0 2020// TODO: art_quick_alloc_array_resolved_region_tlab seems to not get called. Investigate compiler. 2021GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, 1 2022 2023 /* 2024 * Called by managed code when the thread has been asked to suspend. 2025 */ 2026 .extern artTestSuspendFromCode 2027ENTRY art_quick_test_suspend 2028 SETUP_SAVE_EVERYTHING_FRAME // save callee saves for stack crawl 2029 mov x0, xSELF 2030 bl artTestSuspendFromCode // (Thread*) 2031 RESTORE_SAVE_EVERYTHING_FRAME 2032 ret 2033END art_quick_test_suspend 2034 2035ENTRY art_quick_implicit_suspend 2036 mov x0, xSELF 2037 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves for stack crawl 2038 bl artTestSuspendFromCode // (Thread*) 2039 RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN 2040END art_quick_implicit_suspend 2041 2042 /* 2043 * Called by managed code that is attempting to call a method on a proxy class. On entry 2044 * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy 2045 * method agrees with a ref and args callee save frame. 2046 */ 2047 .extern artQuickProxyInvokeHandler 2048ENTRY art_quick_proxy_invoke_handler 2049 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 2050 mov x2, xSELF // pass Thread::Current 2051 mov x3, sp // pass SP 2052 bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP) 2053 ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] 2054 cbnz x2, .Lexception_in_proxy // success if no exception is pending 2055 RESTORE_SAVE_REFS_AND_ARGS_FRAME // Restore frame 2056 fmov d0, x0 // Store result in d0 in case it was float or double 2057 ret // return on success 2058.Lexception_in_proxy: 2059 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2060 DELIVER_PENDING_EXCEPTION 2061END art_quick_proxy_invoke_handler 2062 2063 /* 2064 * Called to resolve an imt conflict. 2065 * x0 is the conflict ArtMethod. 2066 * xIP1 is a hidden argument that holds the target interface method's dex method index. 2067 * 2068 * Note that this stub writes to xIP0, xIP1, and x0. 2069 */ 2070 .extern artInvokeInterfaceTrampoline 2071ENTRY art_quick_imt_conflict_trampoline 2072 ldr xIP0, [sp, #0] // Load referrer 2073 ldr xIP0, [xIP0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_64] // Load dex cache methods array 2074 ldr xIP0, [xIP0, xIP1, lsl #POINTER_SIZE_SHIFT] // Load interface method 2075 ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64] // Load ImtConflictTable 2076 ldr x0, [xIP1] // Load first entry in ImtConflictTable. 2077.Limt_table_iterate: 2078 cmp x0, xIP0 2079 // Branch if found. Benchmarks have shown doing a branch here is better. 2080 beq .Limt_table_found 2081 // If the entry is null, the interface method is not in the ImtConflictTable. 2082 cbz x0, .Lconflict_trampoline 2083 // Iterate over the entries of the ImtConflictTable. 2084 ldr x0, [xIP1, #(2 * __SIZEOF_POINTER__)]! 2085 b .Limt_table_iterate 2086.Limt_table_found: 2087 // We successfully hit an entry in the table. Load the target method 2088 // and jump to it. 2089 ldr x0, [xIP1, #__SIZEOF_POINTER__] 2090 ldr xIP0, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] 2091 br xIP0 2092.Lconflict_trampoline: 2093 // Call the runtime stub to populate the ImtConflictTable and jump to the 2094 // resolved method. 2095 INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline 2096END art_quick_imt_conflict_trampoline 2097 2098ENTRY art_quick_resolution_trampoline 2099 SETUP_SAVE_REFS_AND_ARGS_FRAME 2100 mov x2, xSELF 2101 mov x3, sp 2102 bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP) 2103 cbz x0, 1f 2104 mov xIP0, x0 // Remember returned code pointer in xIP0. 2105 ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP. 2106 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2107 br xIP0 21081: 2109 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2110 DELIVER_PENDING_EXCEPTION 2111END art_quick_resolution_trampoline 2112 2113/* 2114 * Generic JNI frame layout: 2115 * 2116 * #-------------------# 2117 * | | 2118 * | caller method... | 2119 * #-------------------# <--- SP on entry 2120 * | Return X30/LR | 2121 * | X29/FP | callee save 2122 * | X28 | callee save 2123 * | X27 | callee save 2124 * | X26 | callee save 2125 * | X25 | callee save 2126 * | X24 | callee save 2127 * | X23 | callee save 2128 * | X22 | callee save 2129 * | X21 | callee save 2130 * | X20 | callee save 2131 * | X19 | callee save 2132 * | X7 | arg7 2133 * | X6 | arg6 2134 * | X5 | arg5 2135 * | X4 | arg4 2136 * | X3 | arg3 2137 * | X2 | arg2 2138 * | X1 | arg1 2139 * | D7 | float arg 8 2140 * | D6 | float arg 7 2141 * | D5 | float arg 6 2142 * | D4 | float arg 5 2143 * | D3 | float arg 4 2144 * | D2 | float arg 3 2145 * | D1 | float arg 2 2146 * | D0 | float arg 1 2147 * | Method* | <- X0 2148 * #-------------------# 2149 * | local ref cookie | // 4B 2150 * | handle scope size | // 4B 2151 * #-------------------# 2152 * | JNI Call Stack | 2153 * #-------------------# <--- SP on native call 2154 * | | 2155 * | Stack for Regs | The trampoline assembly will pop these values 2156 * | | into registers for native call 2157 * #-------------------# 2158 * | Native code ptr | 2159 * #-------------------# 2160 * | Free scratch | 2161 * #-------------------# 2162 * | Ptr to (1) | <--- SP 2163 * #-------------------# 2164 */ 2165 /* 2166 * Called to do a generic JNI down-call 2167 */ 2168ENTRY art_quick_generic_jni_trampoline 2169 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 2170 2171 // Save SP , so we can have static CFI info. 2172 mov x28, sp 2173 .cfi_def_cfa_register x28 2174 2175 // This looks the same, but is different: this will be updated to point to the bottom 2176 // of the frame when the handle scope is inserted. 2177 mov xFP, sp 2178 2179 mov xIP0, #5120 2180 sub sp, sp, xIP0 2181 2182 // prepare for artQuickGenericJniTrampoline call 2183 // (Thread*, SP) 2184 // x0 x1 <= C calling convention 2185 // xSELF xFP <= where they are 2186 2187 mov x0, xSELF // Thread* 2188 mov x1, xFP 2189 bl artQuickGenericJniTrampoline // (Thread*, sp) 2190 2191 // The C call will have registered the complete save-frame on success. 2192 // The result of the call is: 2193 // x0: pointer to native code, 0 on error. 2194 // x1: pointer to the bottom of the used area of the alloca, can restore stack till there. 2195 2196 // Check for error = 0. 2197 cbz x0, .Lexception_in_native 2198 2199 // Release part of the alloca. 2200 mov sp, x1 2201 2202 // Save the code pointer 2203 mov xIP0, x0 2204 2205 // Load parameters from frame into registers. 2206 // TODO Check with artQuickGenericJniTrampoline. 2207 // Also, check again APPCS64 - the stack arguments are interleaved. 2208 ldp x0, x1, [sp] 2209 ldp x2, x3, [sp, #16] 2210 ldp x4, x5, [sp, #32] 2211 ldp x6, x7, [sp, #48] 2212 2213 ldp d0, d1, [sp, #64] 2214 ldp d2, d3, [sp, #80] 2215 ldp d4, d5, [sp, #96] 2216 ldp d6, d7, [sp, #112] 2217 2218 add sp, sp, #128 2219 2220 blr xIP0 // native call. 2221 2222 // result sign extension is handled in C code 2223 // prepare for artQuickGenericJniEndTrampoline call 2224 // (Thread*, result, result_f) 2225 // x0 x1 x2 <= C calling convention 2226 mov x1, x0 // Result (from saved). 2227 mov x0, xSELF // Thread register. 2228 fmov x2, d0 // d0 will contain floating point result, but needs to go into x2 2229 2230 bl artQuickGenericJniEndTrampoline 2231 2232 // Pending exceptions possible. 2233 ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] 2234 cbnz x2, .Lexception_in_native 2235 2236 // Tear down the alloca. 2237 mov sp, x28 2238 .cfi_def_cfa_register sp 2239 2240 // Tear down the callee-save frame. 2241 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2242 2243 // store into fpr, for when it's a fpr return... 2244 fmov d0, x0 2245 ret 2246 2247.Lexception_in_native: 2248 // Move to x1 then sp to please assembler. 2249 ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 2250 mov sp, x1 2251 .cfi_def_cfa_register sp 2252 # This will create a new save-all frame, required by the runtime. 2253 DELIVER_PENDING_EXCEPTION 2254END art_quick_generic_jni_trampoline 2255 2256/* 2257 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those 2258 * of a quick call: 2259 * x0 = method being called/to bridge to. 2260 * x1..x7, d0..d7 = arguments to that method. 2261 */ 2262ENTRY art_quick_to_interpreter_bridge 2263 SETUP_SAVE_REFS_AND_ARGS_FRAME // Set up frame and save arguments. 2264 2265 // x0 will contain mirror::ArtMethod* method. 2266 mov x1, xSELF // How to get Thread::Current() ??? 2267 mov x2, sp 2268 2269 // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 2270 // mirror::ArtMethod** sp) 2271 bl artQuickToInterpreterBridge 2272 2273 RESTORE_SAVE_REFS_AND_ARGS_FRAME // TODO: no need to restore arguments in this case. 2274 2275 fmov d0, x0 2276 2277 RETURN_OR_DELIVER_PENDING_EXCEPTION 2278END art_quick_to_interpreter_bridge 2279 2280 2281// 2282// Instrumentation-related stubs 2283// 2284 .extern artInstrumentationMethodEntryFromCode 2285ENTRY art_quick_instrumentation_entry 2286 SETUP_SAVE_REFS_AND_ARGS_FRAME 2287 2288 mov x20, x0 // Preserve method reference in a callee-save. 2289 2290 mov x2, xSELF 2291 mov x3, xLR 2292 bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, LR) 2293 2294 mov xIP0, x0 // x0 = result of call. 2295 mov x0, x20 // Reload method reference. 2296 2297 RESTORE_SAVE_REFS_AND_ARGS_FRAME // Note: will restore xSELF 2298 adr xLR, art_quick_instrumentation_exit 2299 br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit. 2300END art_quick_instrumentation_entry 2301 2302 .extern artInstrumentationMethodExitFromCode 2303ENTRY art_quick_instrumentation_exit 2304 mov xLR, #0 // Clobber LR for later checks. 2305 2306 SETUP_SAVE_REFS_ONLY_FRAME 2307 2308 // We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then 2309 // we would need to fully restore it. As there are a lot of callee-save registers, it seems 2310 // easier to have an extra small stack area. 2311 2312 str x0, [sp, #-16]! // Save integer result. 2313 .cfi_adjust_cfa_offset 16 2314 str d0, [sp, #8] // Save floating-point result. 2315 2316 add x1, sp, #16 // Pass SP. 2317 mov x2, x0 // Pass integer result. 2318 fmov x3, d0 // Pass floating-point result. 2319 mov x0, xSELF // Pass Thread. 2320 bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res) 2321 2322 mov xIP0, x0 // Return address from instrumentation call. 2323 mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize 2324 2325 ldr d0, [sp, #8] // Restore floating-point result. 2326 ldr x0, [sp], #16 // Restore integer result, and drop stack area. 2327 .cfi_adjust_cfa_offset 16 2328 2329 POP_SAVE_REFS_ONLY_FRAME 2330 2331 br xIP0 // Tail-call out. 2332END art_quick_instrumentation_exit 2333 2334 /* 2335 * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization 2336 * will long jump to the upcall with a special exception of -1. 2337 */ 2338 .extern artDeoptimize 2339ENTRY art_quick_deoptimize 2340 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 2341 mov x0, xSELF // Pass thread. 2342 bl artDeoptimize // artDeoptimize(Thread*) 2343 brk 0 2344END art_quick_deoptimize 2345 2346 /* 2347 * Compiled code has requested that we deoptimize into the interpreter. The deoptimization 2348 * will long jump to the upcall with a special exception of -1. 2349 */ 2350 .extern artDeoptimizeFromCompiledCode 2351ENTRY art_quick_deoptimize_from_compiled_code 2352 SETUP_SAVE_EVERYTHING_FRAME 2353 mov x0, xSELF // Pass thread. 2354 bl artDeoptimizeFromCompiledCode // artDeoptimizeFromCompiledCode(Thread*) 2355 brk 0 2356END art_quick_deoptimize_from_compiled_code 2357 2358 2359 /* 2360 * String's indexOf. 2361 * 2362 * TODO: Not very optimized. 2363 * On entry: 2364 * x0: string object (known non-null) 2365 * w1: char to match (known <= 0xFFFF) 2366 * w2: Starting offset in string data 2367 */ 2368ENTRY art_quick_indexof 2369 ldr w3, [x0, #MIRROR_STRING_COUNT_OFFSET] 2370 add x0, x0, #MIRROR_STRING_VALUE_OFFSET 2371 2372 /* Clamp start to [0..count] */ 2373 cmp w2, #0 2374 csel w2, wzr, w2, lt 2375 cmp w2, w3 2376 csel w2, w3, w2, gt 2377 2378 /* Save a copy to compute result */ 2379 mov x5, x0 2380 2381 /* Build pointer to start of data to compare and pre-bias */ 2382 add x0, x0, x2, lsl #1 2383 sub x0, x0, #2 2384 2385 /* Compute iteration count */ 2386 sub w2, w3, w2 2387 2388 /* 2389 * At this point we have: 2390 * x0: start of the data to test 2391 * w1: char to compare 2392 * w2: iteration count 2393 * x5: original start of string data 2394 */ 2395 2396 subs w2, w2, #4 2397 b.lt .Lindexof_remainder 2398 2399.Lindexof_loop4: 2400 ldrh w6, [x0, #2]! 2401 ldrh w7, [x0, #2]! 2402 ldrh wIP0, [x0, #2]! 2403 ldrh wIP1, [x0, #2]! 2404 cmp w6, w1 2405 b.eq .Lmatch_0 2406 cmp w7, w1 2407 b.eq .Lmatch_1 2408 cmp wIP0, w1 2409 b.eq .Lmatch_2 2410 cmp wIP1, w1 2411 b.eq .Lmatch_3 2412 subs w2, w2, #4 2413 b.ge .Lindexof_loop4 2414 2415.Lindexof_remainder: 2416 adds w2, w2, #4 2417 b.eq .Lindexof_nomatch 2418 2419.Lindexof_loop1: 2420 ldrh w6, [x0, #2]! 2421 cmp w6, w1 2422 b.eq .Lmatch_3 2423 subs w2, w2, #1 2424 b.ne .Lindexof_loop1 2425 2426.Lindexof_nomatch: 2427 mov x0, #-1 2428 ret 2429 2430.Lmatch_0: 2431 sub x0, x0, #6 2432 sub x0, x0, x5 2433 asr x0, x0, #1 2434 ret 2435.Lmatch_1: 2436 sub x0, x0, #4 2437 sub x0, x0, x5 2438 asr x0, x0, #1 2439 ret 2440.Lmatch_2: 2441 sub x0, x0, #2 2442 sub x0, x0, x5 2443 asr x0, x0, #1 2444 ret 2445.Lmatch_3: 2446 sub x0, x0, x5 2447 asr x0, x0, #1 2448 ret 2449END art_quick_indexof 2450 2451 /* 2452 * Create a function `name` calling the ReadBarrier::Mark routine, 2453 * getting its argument and returning its result through W register 2454 * `wreg` (corresponding to X register `xreg`), saving and restoring 2455 * all caller-save registers. 2456 * 2457 * If `wreg` is different from `w0`, the generated function follows a 2458 * non-standard runtime calling convention: 2459 * - register `wreg` is used to pass the (sole) argument of this 2460 * function (instead of W0); 2461 * - register `wreg` is used to return the result of this function 2462 * (instead of W0); 2463 * - W0 is treated like a normal (non-argument) caller-save register; 2464 * - everything else is the same as in the standard runtime calling 2465 * convention (e.g. standard callee-save registers are preserved). 2466 */ 2467.macro READ_BARRIER_MARK_REG name, wreg, xreg 2468ENTRY \name 2469 // Reference is null, no work to do at all. 2470 cbz \wreg, .Lret_rb_\name 2471 /* 2472 * Allocate 46 stack slots * 8 = 368 bytes: 2473 * - 20 slots for core registers X0-X19 2474 * - 24 slots for floating-point registers D0-D7 and D16-D31 2475 * - 1 slot for return address register XLR 2476 * - 1 padding slot for 16-byte stack alignment 2477 */ 2478 // Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler. 2479 ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 2480 tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lslow_path_rb_\name 2481 ret 2482.Lslow_path_rb_\name: 2483 // Save all potentially live caller-save core registers. 2484 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 368 2485 SAVE_TWO_REGS x2, x3, 16 2486 SAVE_TWO_REGS x4, x5, 32 2487 SAVE_TWO_REGS x6, x7, 48 2488 SAVE_TWO_REGS x8, x9, 64 2489 SAVE_TWO_REGS x10, x11, 80 2490 SAVE_TWO_REGS x12, x13, 96 2491 SAVE_TWO_REGS x14, x15, 112 2492 SAVE_TWO_REGS x16, x17, 128 2493 SAVE_TWO_REGS x18, x19, 144 2494 // Save all potentially live caller-save floating-point registers. 2495 stp d0, d1, [sp, #160] 2496 stp d2, d3, [sp, #176] 2497 stp d4, d5, [sp, #192] 2498 stp d6, d7, [sp, #208] 2499 stp d16, d17, [sp, #224] 2500 stp d18, d19, [sp, #240] 2501 stp d20, d21, [sp, #256] 2502 stp d22, d23, [sp, #272] 2503 stp d24, d25, [sp, #288] 2504 stp d26, d27, [sp, #304] 2505 stp d28, d29, [sp, #320] 2506 stp d30, d31, [sp, #336] 2507 // Save return address. 2508 // (sp + #352 is a padding slot) 2509 SAVE_REG xLR, 360 2510 2511 .ifnc \wreg, w0 2512 mov w0, \wreg // Pass arg1 - obj from `wreg` 2513 .endif 2514 bl artReadBarrierMark // artReadBarrierMark(obj) 2515 .ifnc \wreg, w0 2516 mov \wreg, w0 // Return result into `wreg` 2517 .endif 2518 2519 // Restore core regs, except `xreg`, as `wreg` is used to return the 2520 // result of this function (simply remove it from the stack instead). 2521 POP_REGS_NE x0, x1, 0, \xreg 2522 POP_REGS_NE x2, x3, 16, \xreg 2523 POP_REGS_NE x4, x5, 32, \xreg 2524 POP_REGS_NE x6, x7, 48, \xreg 2525 POP_REGS_NE x8, x9, 64, \xreg 2526 POP_REGS_NE x10, x11, 80, \xreg 2527 POP_REGS_NE x12, x13, 96, \xreg 2528 POP_REGS_NE x14, x15, 112, \xreg 2529 POP_REGS_NE x16, x17, 128, \xreg 2530 POP_REGS_NE x18, x19, 144, \xreg 2531 // Restore floating-point registers. 2532 ldp d0, d1, [sp, #160] 2533 ldp d2, d3, [sp, #176] 2534 ldp d4, d5, [sp, #192] 2535 ldp d6, d7, [sp, #208] 2536 ldp d16, d17, [sp, #224] 2537 ldp d18, d19, [sp, #240] 2538 ldp d20, d21, [sp, #256] 2539 ldp d22, d23, [sp, #272] 2540 ldp d24, d25, [sp, #288] 2541 ldp d26, d27, [sp, #304] 2542 ldp d28, d29, [sp, #320] 2543 ldp d30, d31, [sp, #336] 2544 // Restore return address and remove padding. 2545 RESTORE_REG xLR, 360 2546 DECREASE_FRAME 368 2547.Lret_rb_\name: 2548 ret 2549END \name 2550.endm 2551 2552READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg00, w0, x0 2553READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, w1, x1 2554READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, w2, x2 2555READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, w3, x3 2556READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, w4, x4 2557READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, w5, x5 2558READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, w6, x6 2559READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, w7, x7 2560READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, w8, x8 2561READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, w9, x9 2562READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, w10, x10 2563READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, w11, x11 2564READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, w12, x12 2565READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, w13, x13 2566READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, w14, x14 2567READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg15, w15, x15 2568// READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg16, w16, x16 ip0 is blocked 2569READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, w17, x17 2570READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18 2571READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, w19, x19 2572READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, w20, x20 2573READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, w21, x21 2574READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, w22, x22 2575READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg23, w23, x23 2576READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg24, w24, x24 2577READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg25, w25, x25 2578READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg26, w26, x26 2579READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg27, w27, x27 2580READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg28, w28, x28 2581READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29 2582