quick_entrypoints_arm64.S revision 215076b9f2211f09146d92f6f011fe1787b0b6cd
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "asm_support_arm64.S" 18 19#include "arch/quick_alloc_entrypoints.S" 20 21 22.macro SAVE_REG reg, offset 23 str \reg, [sp, #(\offset)] 24 .cfi_rel_offset \reg, (\offset) 25.endm 26 27.macro RESTORE_REG reg, offset 28 ldr \reg, [sp, #(\offset)] 29 .cfi_restore \reg 30.endm 31 32.macro SAVE_TWO_REGS reg1, reg2, offset 33 stp \reg1, \reg2, [sp, #(\offset)] 34 .cfi_rel_offset \reg1, (\offset) 35 .cfi_rel_offset \reg2, (\offset) + 8 36.endm 37 38.macro RESTORE_TWO_REGS reg1, reg2, offset 39 ldp \reg1, \reg2, [sp, #(\offset)] 40 .cfi_restore \reg1 41 .cfi_restore \reg2 42.endm 43 44.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment 45 stp \reg1, \reg2, [sp, #-(\frame_adjustment)]! 46 .cfi_adjust_cfa_offset (\frame_adjustment) 47 .cfi_rel_offset \reg1, 0 48 .cfi_rel_offset \reg2, 8 49.endm 50 51.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment 52 ldp \reg1, \reg2, [sp], #(\frame_adjustment) 53 .cfi_restore \reg1 54 .cfi_restore \reg2 55 .cfi_adjust_cfa_offset -(\frame_adjustment) 56.endm 57 58 /* 59 * Macro that sets up the callee save frame to conform with 60 * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves) 61 */ 62.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 63 // art::Runtime** xIP0 = &art::Runtime::instance_ 64 adrp xIP0, :got:_ZN3art7Runtime9instance_E 65 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 66 67 // Our registers aren't intermixed - just spill in order. 68 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 69 70 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves]; 71 ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET] 72 73 sub sp, sp, #176 74 .cfi_adjust_cfa_offset 176 75 76 // Ugly compile-time check, but we only have the preprocessor. 77#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 176) 78#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM64) size not as expected." 79#endif 80 81 // Stack alignment filler [sp, #8]. 82 // FP callee-saves. 83 stp d8, d9, [sp, #16] 84 stp d10, d11, [sp, #32] 85 stp d12, d13, [sp, #48] 86 stp d14, d15, [sp, #64] 87 88 // GP callee-saves 89 SAVE_TWO_REGS x19, x20, 80 90 SAVE_TWO_REGS x21, x22, 96 91 SAVE_TWO_REGS x23, x24, 112 92 SAVE_TWO_REGS x25, x26, 128 93 SAVE_TWO_REGS x27, x28, 144 94 SAVE_TWO_REGS x29, xLR, 160 95 96 // Store ArtMethod* Runtime::callee_save_methods_[kSaveAllCalleeSaves]. 97 str xIP0, [sp] 98 // Place sp in Thread::Current()->top_quick_frame. 99 mov xIP0, sp 100 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 101.endm 102 103 /* 104 * Macro that sets up the callee save frame to conform with 105 * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). 106 */ 107.macro SETUP_SAVE_REFS_ONLY_FRAME 108 // art::Runtime** xIP0 = &art::Runtime::instance_ 109 adrp xIP0, :got:_ZN3art7Runtime9instance_E 110 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 111 112 // Our registers aren't intermixed - just spill in order. 113 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 114 115 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly]; 116 ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET] 117 118 sub sp, sp, #96 119 .cfi_adjust_cfa_offset 96 120 121 // Ugly compile-time check, but we only have the preprocessor. 122#if (FRAME_SIZE_SAVE_REFS_ONLY != 96) 123#error "FRAME_SIZE_SAVE_REFS_ONLY(ARM64) size not as expected." 124#endif 125 126 // GP callee-saves. 127 // x20 paired with ArtMethod* - see below. 128 SAVE_TWO_REGS x21, x22, 16 129 SAVE_TWO_REGS x23, x24, 32 130 SAVE_TWO_REGS x25, x26, 48 131 SAVE_TWO_REGS x27, x28, 64 132 SAVE_TWO_REGS x29, xLR, 80 133 134 // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsOnly]. 135 stp xIP0, x20, [sp] 136 .cfi_rel_offset x20, 8 137 138 // Place sp in Thread::Current()->top_quick_frame. 139 mov xIP0, sp 140 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 141.endm 142 143// TODO: Probably no need to restore registers preserved by aapcs64. 144.macro RESTORE_SAVE_REFS_ONLY_FRAME 145 // Callee-saves. 146 RESTORE_REG x20, 8 147 RESTORE_TWO_REGS x21, x22, 16 148 RESTORE_TWO_REGS x23, x24, 32 149 RESTORE_TWO_REGS x25, x26, 48 150 RESTORE_TWO_REGS x27, x28, 64 151 RESTORE_TWO_REGS x29, xLR, 80 152 153 add sp, sp, #96 154 .cfi_adjust_cfa_offset -96 155.endm 156 157.macro POP_SAVE_REFS_ONLY_FRAME 158 add sp, sp, #96 159 .cfi_adjust_cfa_offset - 96 160.endm 161 162.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN 163 RESTORE_SAVE_REFS_ONLY_FRAME 164 ret 165.endm 166 167 168.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 169 sub sp, sp, #224 170 .cfi_adjust_cfa_offset 224 171 172 // Ugly compile-time check, but we only have the preprocessor. 173#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 224) 174#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM64) size not as expected." 175#endif 176 177 // Stack alignment filler [sp, #8]. 178 // FP args. 179 stp d0, d1, [sp, #16] 180 stp d2, d3, [sp, #32] 181 stp d4, d5, [sp, #48] 182 stp d6, d7, [sp, #64] 183 184 // Core args. 185 SAVE_TWO_REGS x1, x2, 80 186 SAVE_TWO_REGS x3, x4, 96 187 SAVE_TWO_REGS x5, x6, 112 188 189 // x7, Callee-saves. 190 SAVE_TWO_REGS x7, x20, 128 191 SAVE_TWO_REGS x21, x22, 144 192 SAVE_TWO_REGS x23, x24, 160 193 SAVE_TWO_REGS x25, x26, 176 194 SAVE_TWO_REGS x27, x28, 192 195 196 // x29(callee-save) and LR. 197 SAVE_TWO_REGS x29, xLR, 208 198 199.endm 200 201 /* 202 * Macro that sets up the callee save frame to conform with 203 * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). 204 * 205 * TODO This is probably too conservative - saving FP & LR. 206 */ 207.macro SETUP_SAVE_REFS_AND_ARGS_FRAME 208 // art::Runtime** xIP0 = &art::Runtime::instance_ 209 adrp xIP0, :got:_ZN3art7Runtime9instance_E 210 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 211 212 // Our registers aren't intermixed - just spill in order. 213 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 214 215 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefAndArgs]; 216 ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET] 217 218 SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 219 220 str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsAndArgs]. 221 // Place sp in Thread::Current()->top_quick_frame. 222 mov xIP0, sp 223 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 224.endm 225 226.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 227 SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 228 str x0, [sp, #0] // Store ArtMethod* to bottom of stack. 229 // Place sp in Thread::Current()->top_quick_frame. 230 mov xIP0, sp 231 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 232.endm 233 234// TODO: Probably no need to restore registers preserved by aapcs64. 235.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME 236 // FP args. 237 ldp d0, d1, [sp, #16] 238 ldp d2, d3, [sp, #32] 239 ldp d4, d5, [sp, #48] 240 ldp d6, d7, [sp, #64] 241 242 // Core args. 243 RESTORE_TWO_REGS x1, x2, 80 244 RESTORE_TWO_REGS x3, x4, 96 245 RESTORE_TWO_REGS x5, x6, 112 246 247 // x7, Callee-saves. 248 RESTORE_TWO_REGS x7, x20, 128 249 RESTORE_TWO_REGS x21, x22, 144 250 RESTORE_TWO_REGS x23, x24, 160 251 RESTORE_TWO_REGS x25, x26, 176 252 RESTORE_TWO_REGS x27, x28, 192 253 254 // x29(callee-save) and LR. 255 RESTORE_TWO_REGS x29, xLR, 208 256 257 add sp, sp, #224 258 .cfi_adjust_cfa_offset -224 259.endm 260 261 /* 262 * Macro that sets up the callee save frame to conform with 263 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 264 */ 265.macro SETUP_SAVE_EVERYTHING_FRAME 266 sub sp, sp, #512 267 .cfi_adjust_cfa_offset 512 268 269 // Ugly compile-time check, but we only have the preprocessor. 270#if (FRAME_SIZE_SAVE_EVERYTHING != 512) 271#error "FRAME_SIZE_SAVE_EVERYTHING(ARM64) size not as expected." 272#endif 273 274 // Save FP registers. 275 // For better performance, store d0 and d31 separately, so that all STPs are 16-byte aligned. 276 str d0, [sp, #8] 277 stp d1, d2, [sp, #16] 278 stp d3, d4, [sp, #32] 279 stp d5, d6, [sp, #48] 280 stp d7, d8, [sp, #64] 281 stp d9, d10, [sp, #80] 282 stp d11, d12, [sp, #96] 283 stp d13, d14, [sp, #112] 284 stp d15, d16, [sp, #128] 285 stp d17, d18, [sp, #144] 286 stp d19, d20, [sp, #160] 287 stp d21, d22, [sp, #176] 288 stp d23, d24, [sp, #192] 289 stp d25, d26, [sp, #208] 290 stp d27, d28, [sp, #224] 291 stp d29, d30, [sp, #240] 292 str d31, [sp, #256] 293 294 // Save core registers. 295 SAVE_REG x0, 264 296 SAVE_TWO_REGS x1, x2, 272 297 SAVE_TWO_REGS x3, x4, 288 298 SAVE_TWO_REGS x5, x6, 304 299 SAVE_TWO_REGS x7, x8, 320 300 SAVE_TWO_REGS x9, x10, 336 301 SAVE_TWO_REGS x11, x12, 352 302 SAVE_TWO_REGS x13, x14, 368 303 SAVE_TWO_REGS x15, x16, 384 304 SAVE_TWO_REGS x17, x18, 400 305 SAVE_TWO_REGS x19, x20, 416 306 SAVE_TWO_REGS x21, x22, 432 307 SAVE_TWO_REGS x23, x24, 448 308 SAVE_TWO_REGS x25, x26, 464 309 SAVE_TWO_REGS x27, x28, 480 310 SAVE_TWO_REGS x29, xLR, 496 311 312 // art::Runtime** xIP0 = &art::Runtime::instance_ 313 adrp xIP0, :got:_ZN3art7Runtime9instance_E 314 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 315 316 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 317 318 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveEverything]; 319 ldr xIP0, [xIP0, RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET] 320 321 // Store ArtMethod* Runtime::callee_save_methods_[kSaveEverything]. 322 str xIP0, [sp] 323 // Place sp in Thread::Current()->top_quick_frame. 324 mov xIP0, sp 325 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 326.endm 327 328.macro RESTORE_SAVE_EVERYTHING_FRAME 329 // Restore FP registers. 330 // For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned. 331 ldr d0, [sp, #8] 332 ldp d1, d2, [sp, #16] 333 ldp d3, d4, [sp, #32] 334 ldp d5, d6, [sp, #48] 335 ldp d7, d8, [sp, #64] 336 ldp d9, d10, [sp, #80] 337 ldp d11, d12, [sp, #96] 338 ldp d13, d14, [sp, #112] 339 ldp d15, d16, [sp, #128] 340 ldp d17, d18, [sp, #144] 341 ldp d19, d20, [sp, #160] 342 ldp d21, d22, [sp, #176] 343 ldp d23, d24, [sp, #192] 344 ldp d25, d26, [sp, #208] 345 ldp d27, d28, [sp, #224] 346 ldp d29, d30, [sp, #240] 347 ldr d31, [sp, #256] 348 349 // Restore core registers. 350 RESTORE_REG x0, 264 351 RESTORE_TWO_REGS x1, x2, 272 352 RESTORE_TWO_REGS x3, x4, 288 353 RESTORE_TWO_REGS x5, x6, 304 354 RESTORE_TWO_REGS x7, x8, 320 355 RESTORE_TWO_REGS x9, x10, 336 356 RESTORE_TWO_REGS x11, x12, 352 357 RESTORE_TWO_REGS x13, x14, 368 358 RESTORE_TWO_REGS x15, x16, 384 359 RESTORE_TWO_REGS x17, x18, 400 360 RESTORE_TWO_REGS x19, x20, 416 361 RESTORE_TWO_REGS x21, x22, 432 362 RESTORE_TWO_REGS x23, x24, 448 363 RESTORE_TWO_REGS x25, x26, 464 364 RESTORE_TWO_REGS x27, x28, 480 365 RESTORE_TWO_REGS x29, xLR, 496 366 367 add sp, sp, #512 368 .cfi_adjust_cfa_offset -512 369.endm 370 371.macro RETURN_IF_RESULT_IS_ZERO 372 cbnz x0, 1f // result non-zero branch over 373 ret // return 3741: 375.endm 376 377.macro RETURN_IF_RESULT_IS_NON_ZERO 378 cbz x0, 1f // result zero branch over 379 ret // return 3801: 381.endm 382 383 /* 384 * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending 385 * exception is Thread::Current()->exception_ 386 */ 387.macro DELIVER_PENDING_EXCEPTION 388 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 389 mov x0, xSELF 390 391 // Point of no return. 392 b artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*) 393 brk 0 // Unreached 394.endm 395 396.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg 397 ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field. 398 cbnz \reg, 1f 399 ret 4001: 401 DELIVER_PENDING_EXCEPTION 402.endm 403 404.macro RETURN_OR_DELIVER_PENDING_EXCEPTION 405 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0 406.endm 407 408// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register. 409.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 410 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1 411.endm 412 413.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER 414 cbnz w0, 1f // result non-zero branch over 415 ret // return 4161: 417 DELIVER_PENDING_EXCEPTION 418.endm 419 420.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 421 .extern \cxx_name 422ENTRY \c_name 423 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 424 mov x0, xSELF // pass Thread::Current 425 b \cxx_name // \cxx_name(Thread*) 426END \c_name 427.endm 428 429.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name 430 .extern \cxx_name 431ENTRY \c_name 432 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context. 433 mov x1, xSELF // pass Thread::Current. 434 b \cxx_name // \cxx_name(arg, Thread*). 435 brk 0 436END \c_name 437.endm 438 439.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 440 .extern \cxx_name 441ENTRY \c_name 442 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 443 mov x2, xSELF // pass Thread::Current 444 b \cxx_name // \cxx_name(arg1, arg2, Thread*) 445 brk 0 446END \c_name 447.endm 448 449 /* 450 * Called by managed code, saves callee saves and then calls artThrowException 451 * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. 452 */ 453ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode 454 455 /* 456 * Called by managed code to create and deliver a NullPointerException. 457 */ 458NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode 459 460 /* 461 * Call installed by a signal handler to create and deliver a NullPointerException. 462 */ 463ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal 464 465 /* 466 * Called by managed code to create and deliver an ArithmeticException. 467 */ 468NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode 469 470 /* 471 * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds 472 * index, arg2 holds limit. 473 */ 474TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode 475 476 /* 477 * Called by managed code to create and deliver a StringIndexOutOfBoundsException 478 * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. 479 */ 480TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode 481 482 /* 483 * Called by managed code to create and deliver a StackOverflowError. 484 */ 485NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode 486 487 /* 488 * Called by managed code to create and deliver a NoSuchMethodError. 489 */ 490ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode 491 492 /* 493 * All generated callsites for interface invokes and invocation slow paths will load arguments 494 * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain 495 * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. 496 * NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1. 497 * 498 * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting 499 * of the target Method* in x0 and method->code_ in x1. 500 * 501 * If unsuccessful, the helper will return null/????. There will be a pending exception in the 502 * thread and we branch to another stub to deliver it. 503 * 504 * On success this wrapper will restore arguments and *jump* to the target, leaving the lr 505 * pointing back to the original caller. 506 * 507 * Adapted from ARM32 code. 508 * 509 * Clobbers xIP0. 510 */ 511.macro INVOKE_TRAMPOLINE_BODY cxx_name 512 .extern \cxx_name 513 SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves in case allocation triggers GC 514 // Helper signature is always 515 // (method_idx, *this_object, *caller_method, *self, sp) 516 517 mov x2, xSELF // pass Thread::Current 518 mov x3, sp 519 bl \cxx_name // (method_idx, this, Thread*, SP) 520 mov xIP0, x1 // save Method*->code_ 521 RESTORE_SAVE_REFS_AND_ARGS_FRAME 522 cbz x0, 1f // did we find the target? if not go to exception delivery 523 br xIP0 // tail call to target 5241: 525 DELIVER_PENDING_EXCEPTION 526.endm 527.macro INVOKE_TRAMPOLINE c_name, cxx_name 528ENTRY \c_name 529 INVOKE_TRAMPOLINE_BODY \cxx_name 530END \c_name 531.endm 532 533INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck 534 535INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck 536INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck 537INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck 538INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck 539 540 541.macro INVOKE_STUB_CREATE_FRAME 542 543SAVE_SIZE=15*8 // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved. 544SAVE_SIZE_AND_METHOD=SAVE_SIZE+8 545 546 547 mov x9, sp // Save stack pointer. 548 .cfi_register sp,x9 549 550 add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame. 551 sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args 552 and x10, x10, # ~0xf // Enforce 16 byte stack alignment. 553 mov sp, x10 // Set new SP. 554 555 sub x10, x9, #SAVE_SIZE // Calculate new FP (later). Done here as we must move SP 556 .cfi_def_cfa_register x10 // before this. 557 .cfi_adjust_cfa_offset SAVE_SIZE 558 559 str x28, [x10, #112] 560 .cfi_rel_offset x28, 112 561 562 stp x26, x27, [x10, #96] 563 .cfi_rel_offset x26, 96 564 .cfi_rel_offset x27, 104 565 566 stp x24, x25, [x10, #80] 567 .cfi_rel_offset x24, 80 568 .cfi_rel_offset x25, 88 569 570 stp x22, x23, [x10, #64] 571 .cfi_rel_offset x22, 64 572 .cfi_rel_offset x23, 72 573 574 stp x20, x21, [x10, #48] 575 .cfi_rel_offset x20, 48 576 .cfi_rel_offset x21, 56 577 578 stp x9, x19, [x10, #32] // Save old stack pointer and x19. 579 .cfi_rel_offset sp, 32 580 .cfi_rel_offset x19, 40 581 582 stp x4, x5, [x10, #16] // Save result and shorty addresses. 583 .cfi_rel_offset x4, 16 584 .cfi_rel_offset x5, 24 585 586 stp xFP, xLR, [x10] // Store LR & FP. 587 .cfi_rel_offset x29, 0 588 .cfi_rel_offset x30, 8 589 590 mov xFP, x10 // Use xFP now, as it's callee-saved. 591 .cfi_def_cfa_register x29 592 mov xSELF, x3 // Move thread pointer into SELF register. 593 594 // Copy arguments into stack frame. 595 // Use simple copy routine for now. 596 // 4 bytes per slot. 597 // X1 - source address 598 // W2 - args length 599 // X9 - destination address. 600 // W10 - temporary 601 add x9, sp, #8 // Destination address is bottom of stack + null. 602 603 // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler 604 // does not have unique-id variables. 6051: 606 cmp w2, #0 607 beq 2f 608 sub w2, w2, #4 // Need 65536 bytes of range. 609 ldr w10, [x1, x2] 610 str w10, [x9, x2] 611 612 b 1b 613 6142: 615 // Store null into ArtMethod* at bottom of frame. 616 str xzr, [sp] 617.endm 618 619.macro INVOKE_STUB_CALL_AND_RETURN 620 621 // load method-> METHOD_QUICK_CODE_OFFSET 622 ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] 623 // Branch to method. 624 blr x9 625 626 // Restore return value address and shorty address. 627 ldp x4,x5, [xFP, #16] 628 .cfi_restore x4 629 .cfi_restore x5 630 631 ldr x28, [xFP, #112] 632 .cfi_restore x28 633 634 ldp x26, x27, [xFP, #96] 635 .cfi_restore x26 636 .cfi_restore x27 637 638 ldp x24, x25, [xFP, #80] 639 .cfi_restore x24 640 .cfi_restore x25 641 642 ldp x22, x23, [xFP, #64] 643 .cfi_restore x22 644 .cfi_restore x23 645 646 ldp x20, x21, [xFP, #48] 647 .cfi_restore x20 648 .cfi_restore x21 649 650 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. 651 ldrb w10, [x5] 652 653 // Check the return type and store the correct register into the jvalue in memory. 654 // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables. 655 656 // Don't set anything for a void type. 657 cmp w10, #'V' 658 beq 3f 659 660 // Is it a double? 661 cmp w10, #'D' 662 bne 1f 663 str d0, [x4] 664 b 3f 665 6661: // Is it a float? 667 cmp w10, #'F' 668 bne 2f 669 str s0, [x4] 670 b 3f 671 6722: // Just store x0. Doesn't matter if it is 64 or 32 bits. 673 str x0, [x4] 674 6753: // Finish up. 676 ldp x2, x19, [xFP, #32] // Restore stack pointer and x19. 677 .cfi_restore x19 678 mov sp, x2 679 .cfi_restore sp 680 681 ldp xFP, xLR, [xFP] // Restore old frame pointer and link register. 682 .cfi_restore x29 683 .cfi_restore x30 684 685 ret 686 687.endm 688 689 690/* 691 * extern"C" void art_quick_invoke_stub(ArtMethod *method, x0 692 * uint32_t *args, x1 693 * uint32_t argsize, w2 694 * Thread *self, x3 695 * JValue *result, x4 696 * char *shorty); x5 697 * +----------------------+ 698 * | | 699 * | C/C++ frame | 700 * | LR'' | 701 * | FP'' | <- SP' 702 * +----------------------+ 703 * +----------------------+ 704 * | x28 | <- TODO: Remove callee-saves. 705 * | : | 706 * | x19 | 707 * | SP' | 708 * | X5 | 709 * | X4 | Saved registers 710 * | LR' | 711 * | FP' | <- FP 712 * +----------------------+ 713 * | uint32_t out[n-1] | 714 * | : : | Outs 715 * | uint32_t out[0] | 716 * | ArtMethod* | <- SP value=null 717 * +----------------------+ 718 * 719 * Outgoing registers: 720 * x0 - Method* 721 * x1-x7 - integer parameters. 722 * d0-d7 - Floating point parameters. 723 * xSELF = self 724 * SP = & of ArtMethod* 725 * x1 = "this" pointer. 726 * 727 */ 728ENTRY art_quick_invoke_stub 729 // Spill registers as per AACPS64 calling convention. 730 INVOKE_STUB_CREATE_FRAME 731 732 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 733 // Parse the passed shorty to determine which register to load. 734 // Load addresses for routines that load WXSD registers. 735 adr x11, .LstoreW2 736 adr x12, .LstoreX2 737 adr x13, .LstoreS0 738 adr x14, .LstoreD0 739 740 // Initialize routine offsets to 0 for integers and floats. 741 // x8 for integers, x15 for floating point. 742 mov x8, #0 743 mov x15, #0 744 745 add x10, x5, #1 // Load shorty address, plus one to skip return value. 746 ldr w1, [x9],#4 // Load "this" parameter, and increment arg pointer. 747 748 // Loop to fill registers. 749.LfillRegisters: 750 ldrb w17, [x10], #1 // Load next character in signature, and increment. 751 cbz w17, .LcallFunction // Exit at end of signature. Shorty 0 terminated. 752 753 cmp w17, #'F' // is this a float? 754 bne .LisDouble 755 756 cmp x15, # 8*12 // Skip this load if all registers full. 757 beq .Ladvance4 758 759 add x17, x13, x15 // Calculate subroutine to jump to. 760 br x17 761 762.LisDouble: 763 cmp w17, #'D' // is this a double? 764 bne .LisLong 765 766 cmp x15, # 8*12 // Skip this load if all registers full. 767 beq .Ladvance8 768 769 add x17, x14, x15 // Calculate subroutine to jump to. 770 br x17 771 772.LisLong: 773 cmp w17, #'J' // is this a long? 774 bne .LisOther 775 776 cmp x8, # 6*12 // Skip this load if all registers full. 777 beq .Ladvance8 778 779 add x17, x12, x8 // Calculate subroutine to jump to. 780 br x17 781 782.LisOther: // Everything else takes one vReg. 783 cmp x8, # 6*12 // Skip this load if all registers full. 784 beq .Ladvance4 785 786 add x17, x11, x8 // Calculate subroutine to jump to. 787 br x17 788 789.Ladvance4: 790 add x9, x9, #4 791 b .LfillRegisters 792 793.Ladvance8: 794 add x9, x9, #8 795 b .LfillRegisters 796 797// Macro for loading a parameter into a register. 798// counter - the register with offset into these tables 799// size - the size of the register - 4 or 8 bytes. 800// register - the name of the register to be loaded. 801.macro LOADREG counter size register return 802 ldr \register , [x9], #\size 803 add \counter, \counter, 12 804 b \return 805.endm 806 807// Store ints. 808.LstoreW2: 809 LOADREG x8 4 w2 .LfillRegisters 810 LOADREG x8 4 w3 .LfillRegisters 811 LOADREG x8 4 w4 .LfillRegisters 812 LOADREG x8 4 w5 .LfillRegisters 813 LOADREG x8 4 w6 .LfillRegisters 814 LOADREG x8 4 w7 .LfillRegisters 815 816// Store longs. 817.LstoreX2: 818 LOADREG x8 8 x2 .LfillRegisters 819 LOADREG x8 8 x3 .LfillRegisters 820 LOADREG x8 8 x4 .LfillRegisters 821 LOADREG x8 8 x5 .LfillRegisters 822 LOADREG x8 8 x6 .LfillRegisters 823 LOADREG x8 8 x7 .LfillRegisters 824 825// Store singles. 826.LstoreS0: 827 LOADREG x15 4 s0 .LfillRegisters 828 LOADREG x15 4 s1 .LfillRegisters 829 LOADREG x15 4 s2 .LfillRegisters 830 LOADREG x15 4 s3 .LfillRegisters 831 LOADREG x15 4 s4 .LfillRegisters 832 LOADREG x15 4 s5 .LfillRegisters 833 LOADREG x15 4 s6 .LfillRegisters 834 LOADREG x15 4 s7 .LfillRegisters 835 836// Store doubles. 837.LstoreD0: 838 LOADREG x15 8 d0 .LfillRegisters 839 LOADREG x15 8 d1 .LfillRegisters 840 LOADREG x15 8 d2 .LfillRegisters 841 LOADREG x15 8 d3 .LfillRegisters 842 LOADREG x15 8 d4 .LfillRegisters 843 LOADREG x15 8 d5 .LfillRegisters 844 LOADREG x15 8 d6 .LfillRegisters 845 LOADREG x15 8 d7 .LfillRegisters 846 847 848.LcallFunction: 849 850 INVOKE_STUB_CALL_AND_RETURN 851 852END art_quick_invoke_stub 853 854/* extern"C" 855 * void art_quick_invoke_static_stub(ArtMethod *method, x0 856 * uint32_t *args, x1 857 * uint32_t argsize, w2 858 * Thread *self, x3 859 * JValue *result, x4 860 * char *shorty); x5 861 */ 862ENTRY art_quick_invoke_static_stub 863 // Spill registers as per AACPS64 calling convention. 864 INVOKE_STUB_CREATE_FRAME 865 866 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 867 // Parse the passed shorty to determine which register to load. 868 // Load addresses for routines that load WXSD registers. 869 adr x11, .LstoreW1_2 870 adr x12, .LstoreX1_2 871 adr x13, .LstoreS0_2 872 adr x14, .LstoreD0_2 873 874 // Initialize routine offsets to 0 for integers and floats. 875 // x8 for integers, x15 for floating point. 876 mov x8, #0 877 mov x15, #0 878 879 add x10, x5, #1 // Load shorty address, plus one to skip return value. 880 881 // Loop to fill registers. 882.LfillRegisters2: 883 ldrb w17, [x10], #1 // Load next character in signature, and increment. 884 cbz w17, .LcallFunction2 // Exit at end of signature. Shorty 0 terminated. 885 886 cmp w17, #'F' // is this a float? 887 bne .LisDouble2 888 889 cmp x15, # 8*12 // Skip this load if all registers full. 890 beq .Ladvance4_2 891 892 add x17, x13, x15 // Calculate subroutine to jump to. 893 br x17 894 895.LisDouble2: 896 cmp w17, #'D' // is this a double? 897 bne .LisLong2 898 899 cmp x15, # 8*12 // Skip this load if all registers full. 900 beq .Ladvance8_2 901 902 add x17, x14, x15 // Calculate subroutine to jump to. 903 br x17 904 905.LisLong2: 906 cmp w17, #'J' // is this a long? 907 bne .LisOther2 908 909 cmp x8, # 7*12 // Skip this load if all registers full. 910 beq .Ladvance8_2 911 912 add x17, x12, x8 // Calculate subroutine to jump to. 913 br x17 914 915.LisOther2: // Everything else takes one vReg. 916 cmp x8, # 7*12 // Skip this load if all registers full. 917 beq .Ladvance4_2 918 919 add x17, x11, x8 // Calculate subroutine to jump to. 920 br x17 921 922.Ladvance4_2: 923 add x9, x9, #4 924 b .LfillRegisters2 925 926.Ladvance8_2: 927 add x9, x9, #8 928 b .LfillRegisters2 929 930// Store ints. 931.LstoreW1_2: 932 LOADREG x8 4 w1 .LfillRegisters2 933 LOADREG x8 4 w2 .LfillRegisters2 934 LOADREG x8 4 w3 .LfillRegisters2 935 LOADREG x8 4 w4 .LfillRegisters2 936 LOADREG x8 4 w5 .LfillRegisters2 937 LOADREG x8 4 w6 .LfillRegisters2 938 LOADREG x8 4 w7 .LfillRegisters2 939 940// Store longs. 941.LstoreX1_2: 942 LOADREG x8 8 x1 .LfillRegisters2 943 LOADREG x8 8 x2 .LfillRegisters2 944 LOADREG x8 8 x3 .LfillRegisters2 945 LOADREG x8 8 x4 .LfillRegisters2 946 LOADREG x8 8 x5 .LfillRegisters2 947 LOADREG x8 8 x6 .LfillRegisters2 948 LOADREG x8 8 x7 .LfillRegisters2 949 950// Store singles. 951.LstoreS0_2: 952 LOADREG x15 4 s0 .LfillRegisters2 953 LOADREG x15 4 s1 .LfillRegisters2 954 LOADREG x15 4 s2 .LfillRegisters2 955 LOADREG x15 4 s3 .LfillRegisters2 956 LOADREG x15 4 s4 .LfillRegisters2 957 LOADREG x15 4 s5 .LfillRegisters2 958 LOADREG x15 4 s6 .LfillRegisters2 959 LOADREG x15 4 s7 .LfillRegisters2 960 961// Store doubles. 962.LstoreD0_2: 963 LOADREG x15 8 d0 .LfillRegisters2 964 LOADREG x15 8 d1 .LfillRegisters2 965 LOADREG x15 8 d2 .LfillRegisters2 966 LOADREG x15 8 d3 .LfillRegisters2 967 LOADREG x15 8 d4 .LfillRegisters2 968 LOADREG x15 8 d5 .LfillRegisters2 969 LOADREG x15 8 d6 .LfillRegisters2 970 LOADREG x15 8 d7 .LfillRegisters2 971 972 973.LcallFunction2: 974 975 INVOKE_STUB_CALL_AND_RETURN 976 977END art_quick_invoke_static_stub 978 979 980 981/* extern"C" void art_quick_osr_stub(void** stack, x0 982 * size_t stack_size_in_bytes, x1 983 * const uin8_t* native_pc, x2 984 * JValue *result, x3 985 * char *shorty, x4 986 * Thread *self) x5 987 */ 988ENTRY art_quick_osr_stub 989SAVE_SIZE=15*8 // x3, x4, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved. 990 mov x9, sp // Save stack pointer. 991 .cfi_register sp,x9 992 993 sub x10, sp, # SAVE_SIZE 994 and x10, x10, # ~0xf // Enforce 16 byte stack alignment. 995 mov sp, x10 // Set new SP. 996 997 str x28, [sp, #112] 998 stp x26, x27, [sp, #96] 999 stp x24, x25, [sp, #80] 1000 stp x22, x23, [sp, #64] 1001 stp x20, x21, [sp, #48] 1002 stp x9, x19, [sp, #32] // Save old stack pointer and x19. 1003 stp x3, x4, [sp, #16] // Save result and shorty addresses. 1004 stp xFP, xLR, [sp] // Store LR & FP. 1005 mov xSELF, x5 // Move thread pointer into SELF register. 1006 1007 sub sp, sp, #16 1008 str xzr, [sp] // Store null for ArtMethod* slot 1009 // Branch to stub. 1010 bl .Losr_entry 1011 add sp, sp, #16 1012 1013 // Restore return value address and shorty address. 1014 ldp x3,x4, [sp, #16] 1015 ldr x28, [sp, #112] 1016 ldp x26, x27, [sp, #96] 1017 ldp x24, x25, [sp, #80] 1018 ldp x22, x23, [sp, #64] 1019 ldp x20, x21, [sp, #48] 1020 1021 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. 1022 ldrb w10, [x4] 1023 1024 // Check the return type and store the correct register into the jvalue in memory. 1025 1026 // Don't set anything for a void type. 1027 cmp w10, #'V' 1028 beq .Losr_exit 1029 1030 // Is it a double? 1031 cmp w10, #'D' 1032 bne .Lno_double 1033 str d0, [x3] 1034 b .Losr_exit 1035 1036.Lno_double: // Is it a float? 1037 cmp w10, #'F' 1038 bne .Lno_float 1039 str s0, [x3] 1040 b .Losr_exit 1041 1042.Lno_float: // Just store x0. Doesn't matter if it is 64 or 32 bits. 1043 str x0, [x3] 1044 1045.Losr_exit: // Finish up. 1046 ldp x2, x19, [sp, #32] // Restore stack pointer and x19. 1047 ldp xFP, xLR, [sp] // Restore old frame pointer and link register. 1048 mov sp, x2 1049 ret 1050 1051.Losr_entry: 1052 // Update stack pointer for the callee 1053 sub sp, sp, x1 1054 1055 // Update link register slot expected by the callee. 1056 sub w1, w1, #8 1057 str lr, [sp, x1] 1058 1059 // Copy arguments into stack frame. 1060 // Use simple copy routine for now. 1061 // 4 bytes per slot. 1062 // X0 - source address 1063 // W1 - args length 1064 // SP - destination address. 1065 // W10 - temporary 1066.Losr_loop_entry: 1067 cmp w1, #0 1068 beq .Losr_loop_exit 1069 sub w1, w1, #4 1070 ldr w10, [x0, x1] 1071 str w10, [sp, x1] 1072 b .Losr_loop_entry 1073 1074.Losr_loop_exit: 1075 // Branch to the OSR entry point. 1076 br x2 1077 1078END art_quick_osr_stub 1079 1080 /* 1081 * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_ 1082 */ 1083 1084ENTRY art_quick_do_long_jump 1085 // Load FPRs 1086 ldp d0, d1, [x1], #16 1087 ldp d2, d3, [x1], #16 1088 ldp d4, d5, [x1], #16 1089 ldp d6, d7, [x1], #16 1090 ldp d8, d9, [x1], #16 1091 ldp d10, d11, [x1], #16 1092 ldp d12, d13, [x1], #16 1093 ldp d14, d15, [x1], #16 1094 ldp d16, d17, [x1], #16 1095 ldp d18, d19, [x1], #16 1096 ldp d20, d21, [x1], #16 1097 ldp d22, d23, [x1], #16 1098 ldp d24, d25, [x1], #16 1099 ldp d26, d27, [x1], #16 1100 ldp d28, d29, [x1], #16 1101 ldp d30, d31, [x1] 1102 1103 // Load GPRs 1104 // TODO: lots of those are smashed, could optimize. 1105 add x0, x0, #30*8 1106 ldp x30, x1, [x0], #-16 // LR & SP 1107 ldp x28, x29, [x0], #-16 1108 ldp x26, x27, [x0], #-16 1109 ldp x24, x25, [x0], #-16 1110 ldp x22, x23, [x0], #-16 1111 ldp x20, x21, [x0], #-16 1112 ldp x18, x19, [x0], #-16 1113 ldp x16, x17, [x0], #-16 1114 ldp x14, x15, [x0], #-16 1115 ldp x12, x13, [x0], #-16 1116 ldp x10, x11, [x0], #-16 1117 ldp x8, x9, [x0], #-16 1118 ldp x6, x7, [x0], #-16 1119 ldp x4, x5, [x0], #-16 1120 ldp x2, x3, [x0], #-16 1121 mov sp, x1 1122 1123 // Need to load PC, it's at the end (after the space for the unused XZR). Use x1. 1124 ldr x1, [x0, #33*8] 1125 // And the value of x0. 1126 ldr x0, [x0] 1127 1128 br x1 1129END art_quick_do_long_jump 1130 1131 /* 1132 * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the 1133 * possibly null object to lock. 1134 * 1135 * Derived from arm32 code. 1136 */ 1137 .extern artLockObjectFromCode 1138ENTRY art_quick_lock_object 1139 cbz w0, .Lslow_lock 1140 add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore 1141.Lretry_lock: 1142 ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop? 1143 ldxr w1, [x4] 1144 mov x3, x1 1145 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1146 cbnz w3, .Lnot_unlocked // already thin locked 1147 // unlocked case - x1: original lock word that's zero except for the read barrier bits. 1148 orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits 1149 stxr w3, w2, [x4] 1150 cbnz w3, .Llock_stxr_fail // store failed, retry 1151 dmb ishld // full (LoadLoad|LoadStore) memory barrier 1152 ret 1153.Lnot_unlocked: // x1: original lock word 1154 lsr w3, w1, LOCK_WORD_STATE_SHIFT 1155 cbnz w3, .Lslow_lock // if either of the top two bits are set, go slow path 1156 eor w2, w1, w2 // lock_word.ThreadId() ^ self->ThreadId() 1157 uxth w2, w2 // zero top 16 bits 1158 cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock 1159 // else contention, go to slow path 1160 mov x3, x1 // copy the lock word to check count overflow. 1161 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits. 1162 add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow 1163 lsr w3, w2, #LOCK_WORD_GC_STATE_SHIFT // if the first gc state bit is set, we overflowed. 1164 cbnz w3, .Lslow_lock // if we overflow the count go slow path 1165 add w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count for real 1166 stxr w3, w2, [x4] 1167 cbnz w3, .Llock_stxr_fail // store failed, retry 1168 ret 1169.Llock_stxr_fail: 1170 b .Lretry_lock // retry 1171.Lslow_lock: 1172 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block 1173 mov x1, xSELF // pass Thread::Current 1174 bl artLockObjectFromCode // (Object* obj, Thread*) 1175 RESTORE_SAVE_REFS_ONLY_FRAME 1176 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1177END art_quick_lock_object 1178 1179ENTRY art_quick_lock_object_no_inline 1180 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block 1181 mov x1, xSELF // pass Thread::Current 1182 bl artLockObjectFromCode // (Object* obj, Thread*) 1183 RESTORE_SAVE_REFS_ONLY_FRAME 1184 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1185END art_quick_lock_object_no_inline 1186 1187 /* 1188 * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. 1189 * x0 holds the possibly null object to lock. 1190 * 1191 * Derived from arm32 code. 1192 */ 1193 .extern artUnlockObjectFromCode 1194ENTRY art_quick_unlock_object 1195 cbz x0, .Lslow_unlock 1196 add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore 1197.Lretry_unlock: 1198#ifndef USE_READ_BARRIER 1199 ldr w1, [x4] 1200#else 1201 ldxr w1, [x4] // Need to use atomic instructions for read barrier 1202#endif 1203 lsr w2, w1, LOCK_WORD_STATE_SHIFT 1204 cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path 1205 ldr w2, [xSELF, #THREAD_ID_OFFSET] 1206 mov x3, x1 // copy lock word to check thread id equality 1207 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1208 eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId() 1209 uxth w3, w3 // zero top 16 bits 1210 cbnz w3, .Lslow_unlock // do lock word and self thread id's match? 1211 mov x3, x1 // copy lock word to detect transition to unlocked 1212 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1213 cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE 1214 bpl .Lrecursive_thin_unlock 1215 // transition to unlocked 1216 mov x3, x1 1217 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits 1218 dmb ish // full (LoadStore|StoreStore) memory barrier 1219#ifndef USE_READ_BARRIER 1220 str w3, [x4] 1221#else 1222 stxr w2, w3, [x4] // Need to use atomic instructions for read barrier 1223 cbnz w2, .Lunlock_stxr_fail // store failed, retry 1224#endif 1225 ret 1226.Lrecursive_thin_unlock: // w1: original lock word 1227 sub w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count 1228#ifndef USE_READ_BARRIER 1229 str w1, [x4] 1230#else 1231 stxr w2, w1, [x4] // Need to use atomic instructions for read barrier 1232 cbnz w2, .Lunlock_stxr_fail // store failed, retry 1233#endif 1234 ret 1235.Lunlock_stxr_fail: 1236 b .Lretry_unlock // retry 1237.Lslow_unlock: 1238 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC 1239 mov x1, xSELF // pass Thread::Current 1240 bl artUnlockObjectFromCode // (Object* obj, Thread*) 1241 RESTORE_SAVE_REFS_ONLY_FRAME 1242 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1243END art_quick_unlock_object 1244 1245ENTRY art_quick_unlock_object_no_inline 1246 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC 1247 mov x1, xSELF // pass Thread::Current 1248 bl artUnlockObjectFromCode // (Object* obj, Thread*) 1249 RESTORE_SAVE_REFS_ONLY_FRAME 1250 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1251END art_quick_unlock_object_no_inline 1252 1253 /* 1254 * Entry from managed code that calls artIsAssignableFromCode and on failure calls 1255 * artThrowClassCastException. 1256 */ 1257 .extern artThrowClassCastException 1258ENTRY art_quick_check_cast 1259 // Store arguments and link register 1260 // Stack needs to be 16B aligned on calls. 1261 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 1262 SAVE_REG xLR, 24 1263 1264 // Call runtime code 1265 bl artIsAssignableFromCode 1266 1267 // Check for exception 1268 cbz x0, .Lthrow_class_cast_exception 1269 1270 // Restore and return 1271 RESTORE_REG xLR, 24 1272 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1273 ret 1274 1275 .cfi_adjust_cfa_offset 32 // Reset unwind info so following code unwinds. 1276 1277.Lthrow_class_cast_exception: 1278 // Restore 1279 RESTORE_REG xLR, 24 1280 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1281 1282 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 1283 mov x2, xSELF // pass Thread::Current 1284 b artThrowClassCastException // (Class*, Class*, Thread*) 1285 brk 0 // We should not return here... 1286END art_quick_check_cast 1287 1288// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude. 1289.macro POP_REG_NE xReg, offset, xExclude 1290 .ifnc \xReg, \xExclude 1291 ldr \xReg, [sp, #\offset] // restore xReg 1292 .cfi_restore \xReg 1293 .endif 1294.endm 1295 1296// Restore xReg1's value from [sp, #offset] if xReg1 is not the same as xExclude. 1297// Restore xReg2's value from [sp, #(offset + 8)] if xReg2 is not the same as xExclude. 1298.macro POP_REGS_NE xReg1, xReg2, offset, xExclude 1299 .ifc \xReg1, \xExclude 1300 ldr \xReg2, [sp, #(\offset + 8)] // restore xReg2 1301 .else 1302 .ifc \xReg2, \xExclude 1303 ldr \xReg1, [sp, #\offset] // restore xReg1 1304 .else 1305 ldp \xReg1, \xReg2, [sp, #\offset] // restore xReg1 and xReg2 1306 .endif 1307 .endif 1308 .cfi_restore \xReg1 1309 .cfi_restore \xReg2 1310.endm 1311 1312 /* 1313 * Macro to insert read barrier, only used in art_quick_aput_obj. 1314 * xDest, wDest and xObj are registers, offset is a defined literal such as 1315 * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle 1316 * name mismatch between instructions. This macro uses the lower 32b of register when possible. 1317 * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. 1318 */ 1319.macro READ_BARRIER xDest, wDest, xObj, xTemp, wTemp, offset, number 1320#ifdef USE_READ_BARRIER 1321#ifdef USE_BAKER_READ_BARRIER 1322 ldr \wTemp, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1323 tbnz \wTemp, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, .Lrb_slowpath\number 1324 // False dependency to avoid needing load/load fence. 1325 add \xObj, \xObj, \xTemp, lsr #32 1326 ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. 1327 UNPOISON_HEAP_REF \wDest 1328 b .Lrb_exit\number 1329#endif 1330.Lrb_slowpath\number: 1331 // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned. 1332 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 48 1333 SAVE_TWO_REGS x2, x3, 16 1334 SAVE_TWO_REGS x4, xLR, 32 1335 1336 // mov x0, \xRef // pass ref in x0 (no-op for now since parameter ref is unused) 1337 .ifnc \xObj, x1 1338 mov x1, \xObj // pass xObj 1339 .endif 1340 mov w2, #\offset // pass offset 1341 bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset) 1342 // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning. 1343 .ifnc \wDest, w0 1344 mov \wDest, w0 // save return value in wDest 1345 .endif 1346 1347 // Conditionally restore saved registers 1348 POP_REG_NE x0, 0, \xDest 1349 POP_REG_NE x1, 8, \xDest 1350 POP_REG_NE x2, 16, \xDest 1351 POP_REG_NE x3, 24, \xDest 1352 POP_REG_NE x4, 32, \xDest 1353 RESTORE_REG xLR, 40 1354 add sp, sp, #48 1355 .cfi_adjust_cfa_offset -48 1356.Lrb_exit\number: 1357#else 1358 ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. 1359 UNPOISON_HEAP_REF \wDest 1360#endif // USE_READ_BARRIER 1361.endm 1362 1363 /* 1364 * Entry from managed code for array put operations of objects where the value being stored 1365 * needs to be checked for compatibility. 1366 * x0 = array, x1 = index, x2 = value 1367 * 1368 * Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We 1369 * assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by 1370 * using index-zero-extension in load/stores. 1371 * 1372 * Temporaries: x3, x4 1373 * TODO: x4 OK? ip seems wrong here. 1374 */ 1375ENTRY art_quick_aput_obj_with_null_and_bound_check 1376 tst x0, x0 1377 bne art_quick_aput_obj_with_bound_check 1378 b art_quick_throw_null_pointer_exception 1379END art_quick_aput_obj_with_null_and_bound_check 1380 1381ENTRY art_quick_aput_obj_with_bound_check 1382 ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] 1383 cmp w3, w1 1384 bhi art_quick_aput_obj 1385 mov x0, x1 1386 mov x1, x3 1387 b art_quick_throw_array_bounds 1388END art_quick_aput_obj_with_bound_check 1389 1390#ifdef USE_READ_BARRIER 1391 .extern artReadBarrierSlow 1392#endif 1393ENTRY art_quick_aput_obj 1394 cbz x2, .Ldo_aput_null 1395 READ_BARRIER x3, w3, x0, x3, w3, MIRROR_OBJECT_CLASS_OFFSET, 0 // Heap reference = 32b 1396 // This also zero-extends to x3 1397 READ_BARRIER x3, w3, x3, x4, w4, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, 1 // Heap reference = 32b 1398 // This also zero-extends to x3 1399 READ_BARRIER x4, w4, x2, x4, w4, MIRROR_OBJECT_CLASS_OFFSET, 2 // Heap reference = 32b 1400 // This also zero-extends to x4 1401 cmp w3, w4 // value's type == array's component type - trivial assignability 1402 bne .Lcheck_assignability 1403.Ldo_aput: 1404 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1405 // "Compress" = do nothing 1406 POISON_HEAP_REF w2 1407 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1408 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1409 lsr x0, x0, #7 1410 strb w3, [x3, x0] 1411 ret 1412.Ldo_aput_null: 1413 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1414 // "Compress" = do nothing 1415 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1416 ret 1417.Lcheck_assignability: 1418 // Store arguments and link register 1419 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 1420 SAVE_TWO_REGS x2, xLR, 16 1421 1422 // Call runtime code 1423 mov x0, x3 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1424 mov x1, x4 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1425 bl artIsAssignableFromCode 1426 1427 // Check for exception 1428 cbz x0, .Lthrow_array_store_exception 1429 1430 // Restore 1431 RESTORE_TWO_REGS x2, xLR, 16 1432 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1433 1434 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1435 // "Compress" = do nothing 1436 POISON_HEAP_REF w2 1437 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1438 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1439 lsr x0, x0, #7 1440 strb w3, [x3, x0] 1441 ret 1442 .cfi_adjust_cfa_offset 32 // 4 restores after cbz for unwinding. 1443.Lthrow_array_store_exception: 1444 RESTORE_TWO_REGS x2, xLR, 16 1445 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1446 1447 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 1448 mov x1, x2 // Pass value. 1449 mov x2, xSELF // Pass Thread::Current. 1450 b artThrowArrayStoreException // (Object*, Object*, Thread*). 1451 brk 0 // Unreached. 1452END art_quick_aput_obj 1453 1454// Macro to facilitate adding new allocation entrypoints. 1455.macro ONE_ARG_DOWNCALL name, entrypoint, return 1456 .extern \entrypoint 1457ENTRY \name 1458 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1459 mov x1, xSELF // pass Thread::Current 1460 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1461 RESTORE_SAVE_REFS_ONLY_FRAME 1462 \return 1463END \name 1464.endm 1465 1466// Macro to facilitate adding new allocation entrypoints. 1467.macro TWO_ARG_DOWNCALL name, entrypoint, return 1468 .extern \entrypoint 1469ENTRY \name 1470 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1471 mov x2, xSELF // pass Thread::Current 1472 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1473 RESTORE_SAVE_REFS_ONLY_FRAME 1474 \return 1475END \name 1476.endm 1477 1478// Macro to facilitate adding new allocation entrypoints. 1479.macro THREE_ARG_DOWNCALL name, entrypoint, return 1480 .extern \entrypoint 1481ENTRY \name 1482 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1483 mov x3, xSELF // pass Thread::Current 1484 bl \entrypoint 1485 RESTORE_SAVE_REFS_ONLY_FRAME 1486 \return 1487END \name 1488.endm 1489 1490// Macro to facilitate adding new allocation entrypoints. 1491.macro FOUR_ARG_DOWNCALL name, entrypoint, return 1492 .extern \entrypoint 1493ENTRY \name 1494 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1495 mov x4, xSELF // pass Thread::Current 1496 bl \entrypoint // 1497 RESTORE_SAVE_REFS_ONLY_FRAME 1498 \return 1499 DELIVER_PENDING_EXCEPTION 1500END \name 1501.endm 1502 1503// Macros taking opportunity of code similarities for downcalls with referrer. 1504.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return 1505 .extern \entrypoint 1506ENTRY \name 1507 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1508 ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1509 mov x2, xSELF // pass Thread::Current 1510 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP) 1511 RESTORE_SAVE_REFS_ONLY_FRAME 1512 \return 1513END \name 1514.endm 1515 1516.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return 1517 .extern \entrypoint 1518ENTRY \name 1519 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1520 ldr x2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1521 mov x3, xSELF // pass Thread::Current 1522 bl \entrypoint 1523 RESTORE_SAVE_REFS_ONLY_FRAME 1524 \return 1525END \name 1526.endm 1527 1528.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return 1529 .extern \entrypoint 1530ENTRY \name 1531 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1532 ldr x3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1533 mov x4, xSELF // pass Thread::Current 1534 bl \entrypoint 1535 RESTORE_SAVE_REFS_ONLY_FRAME 1536 \return 1537END \name 1538.endm 1539 1540.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1541 cbz w0, 1f // result zero branch over 1542 ret // return 15431: 1544 DELIVER_PENDING_EXCEPTION 1545.endm 1546 1547 /* 1548 * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on 1549 * failure. 1550 */ 1551TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1552 1553 /* 1554 * Entry from managed code when uninitialized static storage, this stub will run the class 1555 * initializer and deliver the exception on error. On success the static storage base is 1556 * returned. 1557 */ 1558ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1559 1560ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1561ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1562 1563ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1564ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1565ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1566ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1567ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1568ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1569ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1570 1571TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1572TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1573TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1574TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1575TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1576TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1577TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1578 1579TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1580TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1581TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1582TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1583 1584THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1585THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1586THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1587THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1588THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1589 1590// This is separated out as the argument order is different. 1591 .extern artSet64StaticFromCode 1592ENTRY art_quick_set64_static 1593 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1594 ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1595 // x2 contains the parameter 1596 mov x3, xSELF // pass Thread::Current 1597 bl artSet64StaticFromCode 1598 RESTORE_SAVE_REFS_ONLY_FRAME 1599 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1600END art_quick_set64_static 1601 1602 /* 1603 * Entry from managed code to resolve a string, this stub will 1604 * check the dex cache for a matching string (the fast path), and if not found, 1605 * it will allocate a String and deliver an exception on error. 1606 * On success the String is returned. R0 holds the string index. 1607 */ 1608 1609ENTRY art_quick_resolve_string 1610 ldr x1, [sp] // load referrer 1611 ldr w2, [x1, #ART_METHOD_DECLARING_CLASS_OFFSET] // load declaring class 1612 ldr x1, [x2, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] // load string dex cache 1613 ubfx x2, x0, #0, #STRING_DEX_CACHE_HASH_BITS // get masked string index into x2 1614 ldr x2, [x1, x2, lsl #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT] // load dex cache pair into x2 1615 cmp x0, x2, lsr #32 // compare against upper 32 bits 1616 bne .Lart_quick_resolve_string_slow_path 1617 ubfx x0, x2, #0, #32 // extract lower 32 bits into x0 1618#ifdef USE_READ_BARRIER 1619 // Most common case: GC is not marking. 1620 ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 1621 cbnz x3, .Lart_quick_resolve_string_marking 1622#endif 1623 ret 1624 1625// Slow path case, the index did not match. 1626.Lart_quick_resolve_string_slow_path: 1627 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1628 mov x1, xSELF // pass Thread::Current 1629 bl artResolveStringFromCode // (int32_t string_idx, Thread* self) 1630 RESTORE_SAVE_REFS_ONLY_FRAME 1631 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1632 1633// GC is marking case, need to check the mark bit. 1634.Lart_quick_resolve_string_marking: 1635 ldr x3, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1636 tbnz x3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_resolve_string_no_rb 1637 // Save LR so that we can return, also x1 for alignment purposes. 1638 SAVE_TWO_REGS_INCREASE_FRAME x1, xLR, 16 // Save x1, LR. 1639 bl artReadBarrierMark // Get the marked string back. 1640 RESTORE_TWO_REGS_DECREASE_FRAME x1, xLR, 16 // Restore registers. 1641.Lart_quick_resolve_string_no_rb: 1642 ret 1643 1644END art_quick_resolve_string 1645 1646// Generate the allocation entrypoints for each allocator. 1647GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS 1648// Comment out allocators that have arm64 specific asm. 1649// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm 1650// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) 1651// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) 1652GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1653// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB) implemented in asm 1654// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB) 1655GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1656GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB) 1657GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1658GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB) 1659GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB) 1660GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB) 1661 1662// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). 1663ENTRY art_quick_alloc_object_rosalloc 1664 // Fast path rosalloc allocation. 1665 // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1666 // x2-x7: free. 1667 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1668 // Load the class (x2) 1669 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1670 cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class 1671 ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local 1672 // allocation stack has room. 1673 // ldp won't work due to large offset. 1674 ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET] 1675 cmp x3, x4 1676 bhs .Lart_quick_alloc_object_rosalloc_slow_path 1677 ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3) 1678 cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread 1679 // local allocation. Also does the 1680 // finalizable and initialization 1681 // checks. 1682 bhs .Lart_quick_alloc_object_rosalloc_slow_path 1683 // Compute the rosalloc bracket index 1684 // from the size. Since the size is 1685 // already aligned we can combine the 1686 // two shifts together. 1687 add x4, xSELF, x3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT) 1688 // Subtract pointer size since ther 1689 // are no runs for 0 byte allocations 1690 // and the size is already aligned. 1691 ldr x4, [x4, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)] 1692 // Load the free list head (x3). This 1693 // will be the return val. 1694 ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1695 cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path 1696 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1697 ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head 1698 // and update the list head with the 1699 // next pointer. 1700 str x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1701 // Store the class pointer in the 1702 // header. This also overwrites the 1703 // next pointer. The offsets are 1704 // asserted to match. 1705#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET 1706#error "Class pointer needs to overwrite next pointer." 1707#endif 1708 POISON_HEAP_REF w2 1709 str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET] 1710 // Fence. This is "ish" not "ishst" so 1711 // that it also ensures ordering of 1712 // the object size load with respect 1713 // to later accesses to the class 1714 // object. Alternatively we could use 1715 // "ishst" if we use load-acquire for 1716 // the class status load. 1717 // Needs to be done before pushing on 1718 // allocation since Heap::VisitObjects 1719 // relies on seeing the class pointer. 1720 // b/28790624 1721 dmb ish 1722 // Push the new object onto the thread 1723 // local allocation stack and 1724 // increment the thread local 1725 // allocation stack top. 1726 ldr x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1727 str w3, [x1], #COMPRESSED_REFERENCE_SIZE // (Increment x1 as a side effect.) 1728 str x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1729 // Decrement the size of the free list 1730 ldr w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1731 sub x1, x1, #1 1732 // TODO: consider combining this store 1733 // and the list head store above using 1734 // strd. 1735 str w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1736 1737 mov x0, x3 // Set the return value and return. 1738 ret 1739.Lart_quick_alloc_object_rosalloc_slow_path: 1740 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1741 mov x2, xSELF // pass Thread::Current 1742 bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*) 1743 RESTORE_SAVE_REFS_ONLY_FRAME 1744 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1745END art_quick_alloc_object_rosalloc 1746 1747 1748// The common fast path code for art_quick_alloc_array_region_tlab. 1749.macro ALLOC_ARRAY_TLAB_FAST_PATH slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1750 // Check null class 1751 cbz \wClass, \slowPathLabel 1752 ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED \slowPathLabel, \xClass, \wClass, \xCount, \wCount, \xTemp0, \wTemp0, \xTemp1, \wTemp1, \xTemp2, \wTemp2 1753.endm 1754 1755// The common fast path code for art_quick_alloc_array_region_tlab. 1756.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1757 // Array classes are never finalizable or uninitialized, no need to check. 1758 ldr \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type 1759 UNPOISON_HEAP_REF \wTemp0 1760 ldr \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET] 1761 lsr \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16 1762 // bits. 1763 // xCount is holding a 32 bit value, 1764 // it can not overflow. 1765 lsl \xTemp1, \xCount, \xTemp0 // Calculate data size 1766 // Add array data offset and alignment. 1767 add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1768#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4 1769#error Long array data offset must be 4 greater than int array data offset. 1770#endif 1771 1772 add \xTemp0, \xTemp0, #1 // Add 4 to the length only if the 1773 // component size shift is 3 1774 // (for 64 bit alignment). 1775 and \xTemp0, \xTemp0, #4 1776 add \xTemp1, \xTemp1, \xTemp0 1777 and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask 1778 // (addr + 7) & ~7. The mask must 1779 // be 64 bits to keep high bits in 1780 // case of overflow. 1781 // Negative sized arrays are handled here since xCount holds a zero extended 32 bit value. 1782 // Negative ints become large 64 bit unsigned ints which will always be larger than max signed 1783 // 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int. 1784 cmp \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD // Possibly a large object, go slow 1785 bhs \slowPathLabel // path. 1786 1787 ldr \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Check tlab for space, note that 1788 // we use (end - begin) to handle 1789 // negative size arrays. It is 1790 // assumed that a negative size will 1791 // always be greater unsigned than 1792 // region size. 1793 ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET] 1794 sub \xTemp2, \xTemp2, \xTemp0 1795 cmp \xTemp1, \xTemp2 1796 bhi \slowPathLabel 1797 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1798 // Move old thread_local_pos to x0 1799 // for the return value. 1800 mov x0, \xTemp0 1801 add \xTemp0, \xTemp0, \xTemp1 1802 str \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1803 ldr \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. 1804 add \xTemp0, \xTemp0, #1 1805 str \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] 1806 POISON_HEAP_REF \wClass 1807 str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1808 str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length. 1809 // Fence. 1810 dmb ishst 1811 ret 1812.endm 1813 1814// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. 1815// 1816// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current 1817// x3-x7: free. 1818// Need to preserve x0 and x1 to the slow path. 1819.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel 1820 cbz x2, \slowPathLabel // Check null class 1821 ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel 1822.endm 1823 1824// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as 1825// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED. 1826.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel 1827 ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel 1828.endm 1829 1830.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel 1831 ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET] 1832 ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET] 1833 ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7). 1834 add x6, x4, x7 // Add object size to tlab pos. 1835 cmp x6, x5 // Check if it fits, overflow works 1836 // since the tlab pos and end are 32 1837 // bit values. 1838 bhi \slowPathLabel 1839 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1840 mov x0, x4 1841 str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1842 ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. 1843 add x5, x5, #1 1844 str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] 1845 POISON_HEAP_REF w2 1846 str w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1847 // Fence. This is "ish" not "ishst" so 1848 // that the code after this allocation 1849 // site will see the right values in 1850 // the fields of the class. 1851 // Alternatively we could use "ishst" 1852 // if we use load-acquire for the 1853 // object size load.) 1854 dmb ish 1855 ret 1856.endm 1857 1858// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). 1859ENTRY art_quick_alloc_object_tlab 1860 // Fast path tlab allocation. 1861 // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1862 // x2-x7: free. 1863#if defined(USE_READ_BARRIER) 1864 mvn x0, xzr // Read barrier not supported here. 1865 ret // Return -1. 1866#endif 1867 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1868 // Load the class (x2) 1869 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1870 ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path 1871.Lart_quick_alloc_object_tlab_slow_path: 1872 SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. 1873 mov x2, xSELF // Pass Thread::Current. 1874 bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*) 1875 RESTORE_SAVE_REFS_ONLY_FRAME 1876 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1877END art_quick_alloc_object_tlab 1878 1879// The common code for art_quick_alloc_object_*region_tlab 1880.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier 1881ENTRY \name 1882 // Fast path region tlab allocation. 1883 // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1884 // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index. 1885 // x2-x7: free. 1886#if !defined(USE_READ_BARRIER) 1887 mvn x0, xzr // Read barrier must be enabled here. 1888 ret // Return -1. 1889#endif 1890.if \is_resolved 1891 mov x2, x0 // class is actually stored in x0 already 1892.else 1893 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1894 // Load the class (x2) 1895 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1896 // If the class is null, go slow path. The check is required to read the lock word. 1897 cbz w2, .Lslow_path\name 1898.endif 1899.if \read_barrier 1900 // Most common case: GC is not marking. 1901 ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 1902 cbnz x3, .Lmarking\name 1903.endif 1904.Ldo_allocation\name: 1905 \fast_path .Lslow_path\name 1906.Lmarking\name: 1907.if \read_barrier 1908 // GC is marking, check the lock word of the class for the mark bit. 1909 // Class is not null, check mark bit in lock word. 1910 ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1911 // If the bit is not zero, do the allocation. 1912 tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name 1913 // The read barrier slow path. Mark 1914 // the class. 1915 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 // Save registers (x0, x1, lr). 1916 SAVE_REG xLR, 24 // Align sp by 16 bytes. 1917 mov x0, x2 // Pass the class as the first param. 1918 bl artReadBarrierMark 1919 mov x2, x0 // Get the (marked) class back. 1920 RESTORE_REG xLR, 24 1921 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 // Restore registers. 1922 b .Ldo_allocation\name 1923.endif 1924.Lslow_path\name: 1925 SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. 1926 mov x2, xSELF // Pass Thread::Current. 1927 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1928 RESTORE_SAVE_REFS_ONLY_FRAME 1929 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1930END \name 1931.endm 1932 1933// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB. 1934GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1 1935// No read barrier for the resolved or initialized cases since the caller is responsible for the 1936// read barrier due to the to-space invariant. 1937GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0 1938GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0 1939 1940// TODO: We could use this macro for the normal tlab allocator too. 1941 1942// The common code for art_quick_alloc_array_*region_tlab 1943.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path, is_resolved 1944ENTRY \name 1945 // Fast path array allocation for region tlab allocation. 1946 // x0: uint32_t type_idx 1947 // x1: int32_t component_count 1948 // x2: ArtMethod* method 1949 // x3-x7: free. 1950#if !defined(USE_READ_BARRIER) 1951 mvn x0, xzr // Read barrier must be enabled here. 1952 ret // Return -1. 1953#endif 1954.if \is_resolved 1955 mov x3, x0 1956 // If already resolved, class is stored in x0 1957.else 1958 ldr x3, [x2, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1959 // Load the class (x2) 1960 ldr w3, [x3, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1961.endif 1962 // Most common case: GC is not marking. 1963 ldr w4, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 1964 cbnz x4, .Lmarking\name 1965.Ldo_allocation\name: 1966 \fast_path .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6 1967.Lmarking\name: 1968 // GC is marking, check the lock word of the class for the mark bit. 1969 // If the class is null, go slow path. The check is required to read the lock word. 1970 cbz w3, .Lslow_path\name 1971 // Class is not null, check mark bit in lock word. 1972 ldr w4, [x3, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1973 // If the bit is not zero, do the allocation. 1974 tbnz w4, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name 1975 // The read barrier slow path. Mark 1976 // the class. 1977 stp x0, x1, [sp, #-32]! // Save registers (x0, x1, x2, lr). 1978 stp x2, xLR, [sp, #16] 1979 mov x0, x3 // Pass the class as the first param. 1980 bl artReadBarrierMark 1981 mov x3, x0 // Get the (marked) class back. 1982 ldp x2, xLR, [sp, #16] 1983 ldp x0, x1, [sp], #32 // Restore registers. 1984 b .Ldo_allocation\name 1985.Lslow_path\name: 1986 // x0: uint32_t type_idx / mirror::Class* klass (if resolved) 1987 // x1: int32_t component_count 1988 // x2: ArtMethod* method 1989 // x3: Thread* self 1990 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1991 mov x3, xSELF // pass Thread::Current 1992 bl \entrypoint 1993 RESTORE_SAVE_REFS_ONLY_FRAME 1994 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1995END \name 1996.endm 1997 1998GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_region_tlab, artAllocArrayFromCodeRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH, 0 1999// TODO: art_quick_alloc_array_resolved_region_tlab seems to not get called. Investigate compiler. 2000GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, 1 2001 2002 /* 2003 * Called by managed code when the thread has been asked to suspend. 2004 */ 2005 .extern artTestSuspendFromCode 2006ENTRY art_quick_test_suspend 2007 SETUP_SAVE_EVERYTHING_FRAME // save callee saves for stack crawl 2008 mov x0, xSELF 2009 bl artTestSuspendFromCode // (Thread*) 2010 RESTORE_SAVE_EVERYTHING_FRAME 2011 ret 2012END art_quick_test_suspend 2013 2014ENTRY art_quick_implicit_suspend 2015 mov x0, xSELF 2016 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves for stack crawl 2017 bl artTestSuspendFromCode // (Thread*) 2018 RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN 2019END art_quick_implicit_suspend 2020 2021 /* 2022 * Called by managed code that is attempting to call a method on a proxy class. On entry 2023 * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy 2024 * method agrees with a ref and args callee save frame. 2025 */ 2026 .extern artQuickProxyInvokeHandler 2027ENTRY art_quick_proxy_invoke_handler 2028 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 2029 mov x2, xSELF // pass Thread::Current 2030 mov x3, sp // pass SP 2031 bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP) 2032 ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] 2033 cbnz x2, .Lexception_in_proxy // success if no exception is pending 2034 RESTORE_SAVE_REFS_AND_ARGS_FRAME // Restore frame 2035 fmov d0, x0 // Store result in d0 in case it was float or double 2036 ret // return on success 2037.Lexception_in_proxy: 2038 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2039 DELIVER_PENDING_EXCEPTION 2040END art_quick_proxy_invoke_handler 2041 2042 /* 2043 * Called to resolve an imt conflict. 2044 * x0 is the conflict ArtMethod. 2045 * xIP1 is a hidden argument that holds the target interface method's dex method index. 2046 * 2047 * Note that this stub writes to xIP0, xIP1, and x0. 2048 */ 2049 .extern artInvokeInterfaceTrampoline 2050ENTRY art_quick_imt_conflict_trampoline 2051 ldr xIP0, [sp, #0] // Load referrer 2052 ldr xIP0, [xIP0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_64] // Load dex cache methods array 2053 ldr xIP0, [xIP0, xIP1, lsl #POINTER_SIZE_SHIFT] // Load interface method 2054 ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64] // Load ImtConflictTable 2055 ldr x0, [xIP1] // Load first entry in ImtConflictTable. 2056.Limt_table_iterate: 2057 cmp x0, xIP0 2058 // Branch if found. Benchmarks have shown doing a branch here is better. 2059 beq .Limt_table_found 2060 // If the entry is null, the interface method is not in the ImtConflictTable. 2061 cbz x0, .Lconflict_trampoline 2062 // Iterate over the entries of the ImtConflictTable. 2063 ldr x0, [xIP1, #(2 * __SIZEOF_POINTER__)]! 2064 b .Limt_table_iterate 2065.Limt_table_found: 2066 // We successfully hit an entry in the table. Load the target method 2067 // and jump to it. 2068 ldr x0, [xIP1, #__SIZEOF_POINTER__] 2069 ldr xIP0, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] 2070 br xIP0 2071.Lconflict_trampoline: 2072 // Call the runtime stub to populate the ImtConflictTable and jump to the 2073 // resolved method. 2074 INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline 2075END art_quick_imt_conflict_trampoline 2076 2077ENTRY art_quick_resolution_trampoline 2078 SETUP_SAVE_REFS_AND_ARGS_FRAME 2079 mov x2, xSELF 2080 mov x3, sp 2081 bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP) 2082 cbz x0, 1f 2083 mov xIP0, x0 // Remember returned code pointer in xIP0. 2084 ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP. 2085 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2086 br xIP0 20871: 2088 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2089 DELIVER_PENDING_EXCEPTION 2090END art_quick_resolution_trampoline 2091 2092/* 2093 * Generic JNI frame layout: 2094 * 2095 * #-------------------# 2096 * | | 2097 * | caller method... | 2098 * #-------------------# <--- SP on entry 2099 * | Return X30/LR | 2100 * | X29/FP | callee save 2101 * | X28 | callee save 2102 * | X27 | callee save 2103 * | X26 | callee save 2104 * | X25 | callee save 2105 * | X24 | callee save 2106 * | X23 | callee save 2107 * | X22 | callee save 2108 * | X21 | callee save 2109 * | X20 | callee save 2110 * | X19 | callee save 2111 * | X7 | arg7 2112 * | X6 | arg6 2113 * | X5 | arg5 2114 * | X4 | arg4 2115 * | X3 | arg3 2116 * | X2 | arg2 2117 * | X1 | arg1 2118 * | D7 | float arg 8 2119 * | D6 | float arg 7 2120 * | D5 | float arg 6 2121 * | D4 | float arg 5 2122 * | D3 | float arg 4 2123 * | D2 | float arg 3 2124 * | D1 | float arg 2 2125 * | D0 | float arg 1 2126 * | Method* | <- X0 2127 * #-------------------# 2128 * | local ref cookie | // 4B 2129 * | handle scope size | // 4B 2130 * #-------------------# 2131 * | JNI Call Stack | 2132 * #-------------------# <--- SP on native call 2133 * | | 2134 * | Stack for Regs | The trampoline assembly will pop these values 2135 * | | into registers for native call 2136 * #-------------------# 2137 * | Native code ptr | 2138 * #-------------------# 2139 * | Free scratch | 2140 * #-------------------# 2141 * | Ptr to (1) | <--- SP 2142 * #-------------------# 2143 */ 2144 /* 2145 * Called to do a generic JNI down-call 2146 */ 2147ENTRY art_quick_generic_jni_trampoline 2148 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 2149 2150 // Save SP , so we can have static CFI info. 2151 mov x28, sp 2152 .cfi_def_cfa_register x28 2153 2154 // This looks the same, but is different: this will be updated to point to the bottom 2155 // of the frame when the handle scope is inserted. 2156 mov xFP, sp 2157 2158 mov xIP0, #5120 2159 sub sp, sp, xIP0 2160 2161 // prepare for artQuickGenericJniTrampoline call 2162 // (Thread*, SP) 2163 // x0 x1 <= C calling convention 2164 // xSELF xFP <= where they are 2165 2166 mov x0, xSELF // Thread* 2167 mov x1, xFP 2168 bl artQuickGenericJniTrampoline // (Thread*, sp) 2169 2170 // The C call will have registered the complete save-frame on success. 2171 // The result of the call is: 2172 // x0: pointer to native code, 0 on error. 2173 // x1: pointer to the bottom of the used area of the alloca, can restore stack till there. 2174 2175 // Check for error = 0. 2176 cbz x0, .Lexception_in_native 2177 2178 // Release part of the alloca. 2179 mov sp, x1 2180 2181 // Save the code pointer 2182 mov xIP0, x0 2183 2184 // Load parameters from frame into registers. 2185 // TODO Check with artQuickGenericJniTrampoline. 2186 // Also, check again APPCS64 - the stack arguments are interleaved. 2187 ldp x0, x1, [sp] 2188 ldp x2, x3, [sp, #16] 2189 ldp x4, x5, [sp, #32] 2190 ldp x6, x7, [sp, #48] 2191 2192 ldp d0, d1, [sp, #64] 2193 ldp d2, d3, [sp, #80] 2194 ldp d4, d5, [sp, #96] 2195 ldp d6, d7, [sp, #112] 2196 2197 add sp, sp, #128 2198 2199 blr xIP0 // native call. 2200 2201 // result sign extension is handled in C code 2202 // prepare for artQuickGenericJniEndTrampoline call 2203 // (Thread*, result, result_f) 2204 // x0 x1 x2 <= C calling convention 2205 mov x1, x0 // Result (from saved). 2206 mov x0, xSELF // Thread register. 2207 fmov x2, d0 // d0 will contain floating point result, but needs to go into x2 2208 2209 bl artQuickGenericJniEndTrampoline 2210 2211 // Pending exceptions possible. 2212 ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] 2213 cbnz x2, .Lexception_in_native 2214 2215 // Tear down the alloca. 2216 mov sp, x28 2217 .cfi_def_cfa_register sp 2218 2219 // Tear down the callee-save frame. 2220 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2221 2222 // store into fpr, for when it's a fpr return... 2223 fmov d0, x0 2224 ret 2225 2226.Lexception_in_native: 2227 // Move to x1 then sp to please assembler. 2228 ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 2229 mov sp, x1 2230 .cfi_def_cfa_register sp 2231 # This will create a new save-all frame, required by the runtime. 2232 DELIVER_PENDING_EXCEPTION 2233END art_quick_generic_jni_trampoline 2234 2235/* 2236 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those 2237 * of a quick call: 2238 * x0 = method being called/to bridge to. 2239 * x1..x7, d0..d7 = arguments to that method. 2240 */ 2241ENTRY art_quick_to_interpreter_bridge 2242 SETUP_SAVE_REFS_AND_ARGS_FRAME // Set up frame and save arguments. 2243 2244 // x0 will contain mirror::ArtMethod* method. 2245 mov x1, xSELF // How to get Thread::Current() ??? 2246 mov x2, sp 2247 2248 // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 2249 // mirror::ArtMethod** sp) 2250 bl artQuickToInterpreterBridge 2251 2252 RESTORE_SAVE_REFS_AND_ARGS_FRAME // TODO: no need to restore arguments in this case. 2253 2254 fmov d0, x0 2255 2256 RETURN_OR_DELIVER_PENDING_EXCEPTION 2257END art_quick_to_interpreter_bridge 2258 2259 2260// 2261// Instrumentation-related stubs 2262// 2263 .extern artInstrumentationMethodEntryFromCode 2264ENTRY art_quick_instrumentation_entry 2265 SETUP_SAVE_REFS_AND_ARGS_FRAME 2266 2267 mov x20, x0 // Preserve method reference in a callee-save. 2268 2269 mov x2, xSELF 2270 mov x3, xLR 2271 bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, LR) 2272 2273 mov xIP0, x0 // x0 = result of call. 2274 mov x0, x20 // Reload method reference. 2275 2276 RESTORE_SAVE_REFS_AND_ARGS_FRAME // Note: will restore xSELF 2277 adr xLR, art_quick_instrumentation_exit 2278 br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit. 2279END art_quick_instrumentation_entry 2280 2281 .extern artInstrumentationMethodExitFromCode 2282ENTRY art_quick_instrumentation_exit 2283 mov xLR, #0 // Clobber LR for later checks. 2284 2285 SETUP_SAVE_REFS_ONLY_FRAME 2286 2287 // We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then 2288 // we would need to fully restore it. As there are a lot of callee-save registers, it seems 2289 // easier to have an extra small stack area. 2290 2291 str x0, [sp, #-16]! // Save integer result. 2292 .cfi_adjust_cfa_offset 16 2293 str d0, [sp, #8] // Save floating-point result. 2294 2295 add x1, sp, #16 // Pass SP. 2296 mov x2, x0 // Pass integer result. 2297 fmov x3, d0 // Pass floating-point result. 2298 mov x0, xSELF // Pass Thread. 2299 bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res) 2300 2301 mov xIP0, x0 // Return address from instrumentation call. 2302 mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize 2303 2304 ldr d0, [sp, #8] // Restore floating-point result. 2305 ldr x0, [sp], #16 // Restore integer result, and drop stack area. 2306 .cfi_adjust_cfa_offset 16 2307 2308 POP_SAVE_REFS_ONLY_FRAME 2309 2310 br xIP0 // Tail-call out. 2311END art_quick_instrumentation_exit 2312 2313 /* 2314 * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization 2315 * will long jump to the upcall with a special exception of -1. 2316 */ 2317 .extern artDeoptimize 2318ENTRY art_quick_deoptimize 2319 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 2320 mov x0, xSELF // Pass thread. 2321 bl artDeoptimize // artDeoptimize(Thread*) 2322 brk 0 2323END art_quick_deoptimize 2324 2325 /* 2326 * Compiled code has requested that we deoptimize into the interpreter. The deoptimization 2327 * will long jump to the upcall with a special exception of -1. 2328 */ 2329 .extern artDeoptimizeFromCompiledCode 2330ENTRY art_quick_deoptimize_from_compiled_code 2331 SETUP_SAVE_EVERYTHING_FRAME 2332 mov x0, xSELF // Pass thread. 2333 bl artDeoptimizeFromCompiledCode // artDeoptimizeFromCompiledCode(Thread*) 2334 brk 0 2335END art_quick_deoptimize_from_compiled_code 2336 2337 2338 /* 2339 * String's indexOf. 2340 * 2341 * TODO: Not very optimized. 2342 * On entry: 2343 * x0: string object (known non-null) 2344 * w1: char to match (known <= 0xFFFF) 2345 * w2: Starting offset in string data 2346 */ 2347ENTRY art_quick_indexof 2348 ldr w3, [x0, #MIRROR_STRING_COUNT_OFFSET] 2349 add x0, x0, #MIRROR_STRING_VALUE_OFFSET 2350 2351 /* Clamp start to [0..count] */ 2352 cmp w2, #0 2353 csel w2, wzr, w2, lt 2354 cmp w2, w3 2355 csel w2, w3, w2, gt 2356 2357 /* Save a copy to compute result */ 2358 mov x5, x0 2359 2360 /* Build pointer to start of data to compare and pre-bias */ 2361 add x0, x0, x2, lsl #1 2362 sub x0, x0, #2 2363 2364 /* Compute iteration count */ 2365 sub w2, w3, w2 2366 2367 /* 2368 * At this point we have: 2369 * x0: start of the data to test 2370 * w1: char to compare 2371 * w2: iteration count 2372 * x5: original start of string data 2373 */ 2374 2375 subs w2, w2, #4 2376 b.lt .Lindexof_remainder 2377 2378.Lindexof_loop4: 2379 ldrh w6, [x0, #2]! 2380 ldrh w7, [x0, #2]! 2381 ldrh wIP0, [x0, #2]! 2382 ldrh wIP1, [x0, #2]! 2383 cmp w6, w1 2384 b.eq .Lmatch_0 2385 cmp w7, w1 2386 b.eq .Lmatch_1 2387 cmp wIP0, w1 2388 b.eq .Lmatch_2 2389 cmp wIP1, w1 2390 b.eq .Lmatch_3 2391 subs w2, w2, #4 2392 b.ge .Lindexof_loop4 2393 2394.Lindexof_remainder: 2395 adds w2, w2, #4 2396 b.eq .Lindexof_nomatch 2397 2398.Lindexof_loop1: 2399 ldrh w6, [x0, #2]! 2400 cmp w6, w1 2401 b.eq .Lmatch_3 2402 subs w2, w2, #1 2403 b.ne .Lindexof_loop1 2404 2405.Lindexof_nomatch: 2406 mov x0, #-1 2407 ret 2408 2409.Lmatch_0: 2410 sub x0, x0, #6 2411 sub x0, x0, x5 2412 asr x0, x0, #1 2413 ret 2414.Lmatch_1: 2415 sub x0, x0, #4 2416 sub x0, x0, x5 2417 asr x0, x0, #1 2418 ret 2419.Lmatch_2: 2420 sub x0, x0, #2 2421 sub x0, x0, x5 2422 asr x0, x0, #1 2423 ret 2424.Lmatch_3: 2425 sub x0, x0, x5 2426 asr x0, x0, #1 2427 ret 2428END art_quick_indexof 2429 2430 /* 2431 * Create a function `name` calling the ReadBarrier::Mark routine, 2432 * getting its argument and returning its result through W register 2433 * `wreg` (corresponding to X register `xreg`), saving and restoring 2434 * all caller-save registers. 2435 * 2436 * If `wreg` is different from `w0`, the generated function follows a 2437 * non-standard runtime calling convention: 2438 * - register `wreg` is used to pass the (sole) argument of this 2439 * function (instead of W0); 2440 * - register `wreg` is used to return the result of this function 2441 * (instead of W0); 2442 * - W0 is treated like a normal (non-argument) caller-save register; 2443 * - everything else is the same as in the standard runtime calling 2444 * convention (e.g. standard callee-save registers are preserved). 2445 */ 2446.macro READ_BARRIER_MARK_REG name, wreg, xreg 2447ENTRY \name 2448 // Reference is null, no work to do at all. 2449 cbz \wreg, .Lret_rb_\name 2450 /* 2451 * Allocate 46 stack slots * 8 = 368 bytes: 2452 * - 20 slots for core registers X0-X19 2453 * - 24 slots for floating-point registers D0-D7 and D16-D31 2454 * - 1 slot for return address register XLR 2455 * - 1 padding slot for 16-byte stack alignment 2456 */ 2457 // Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler. 2458 ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 2459 tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lslow_path_rb_\name 2460 ret 2461.Lslow_path_rb_\name: 2462 // Save all potentially live caller-save core registers. 2463 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 368 2464 SAVE_TWO_REGS x2, x3, 16 2465 SAVE_TWO_REGS x4, x5, 32 2466 SAVE_TWO_REGS x6, x7, 48 2467 SAVE_TWO_REGS x8, x9, 64 2468 SAVE_TWO_REGS x10, x11, 80 2469 SAVE_TWO_REGS x12, x13, 96 2470 SAVE_TWO_REGS x14, x15, 112 2471 SAVE_TWO_REGS x16, x17, 128 2472 SAVE_TWO_REGS x18, x19, 144 2473 // Save all potentially live caller-save floating-point registers. 2474 stp d0, d1, [sp, #160] 2475 stp d2, d3, [sp, #176] 2476 stp d4, d5, [sp, #192] 2477 stp d6, d7, [sp, #208] 2478 stp d16, d17, [sp, #224] 2479 stp d18, d19, [sp, #240] 2480 stp d20, d21, [sp, #256] 2481 stp d22, d23, [sp, #272] 2482 stp d24, d25, [sp, #288] 2483 stp d26, d27, [sp, #304] 2484 stp d28, d29, [sp, #320] 2485 stp d30, d31, [sp, #336] 2486 // Save return address. 2487 // (sp + #352 is a padding slot) 2488 SAVE_REG xLR, 360 2489 2490 .ifnc \wreg, w0 2491 mov w0, \wreg // Pass arg1 - obj from `wreg` 2492 .endif 2493 bl artReadBarrierMark // artReadBarrierMark(obj) 2494 .ifnc \wreg, w0 2495 mov \wreg, w0 // Return result into `wreg` 2496 .endif 2497 2498 // Restore core regs, except `xreg`, as `wreg` is used to return the 2499 // result of this function (simply remove it from the stack instead). 2500 POP_REGS_NE x0, x1, 0, \xreg 2501 POP_REGS_NE x2, x3, 16, \xreg 2502 POP_REGS_NE x4, x5, 32, \xreg 2503 POP_REGS_NE x6, x7, 48, \xreg 2504 POP_REGS_NE x8, x9, 64, \xreg 2505 POP_REGS_NE x10, x11, 80, \xreg 2506 POP_REGS_NE x12, x13, 96, \xreg 2507 POP_REGS_NE x14, x15, 112, \xreg 2508 POP_REGS_NE x16, x17, 128, \xreg 2509 POP_REGS_NE x18, x19, 144, \xreg 2510 // Restore floating-point registers. 2511 ldp d0, d1, [sp, #160] 2512 ldp d2, d3, [sp, #176] 2513 ldp d4, d5, [sp, #192] 2514 ldp d6, d7, [sp, #208] 2515 ldp d16, d17, [sp, #224] 2516 ldp d18, d19, [sp, #240] 2517 ldp d20, d21, [sp, #256] 2518 ldp d22, d23, [sp, #272] 2519 ldp d24, d25, [sp, #288] 2520 ldp d26, d27, [sp, #304] 2521 ldp d28, d29, [sp, #320] 2522 ldp d30, d31, [sp, #336] 2523 // Restore return address and remove padding. 2524 RESTORE_REG xLR, 360 2525 add sp, sp, #368 2526 .cfi_adjust_cfa_offset -368 2527.Lret_rb_\name: 2528 ret 2529END \name 2530.endm 2531 2532READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg00, w0, x0 2533READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, w1, x1 2534READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, w2, x2 2535READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, w3, x3 2536READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, w4, x4 2537READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, w5, x5 2538READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, w6, x6 2539READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, w7, x7 2540READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, w8, x8 2541READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, w9, x9 2542READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, w10, x10 2543READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, w11, x11 2544READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, w12, x12 2545READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, w13, x13 2546READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, w14, x14 2547READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg15, w15, x15 2548// READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg16, w16, x16 ip0 is blocked 2549READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, w17, x17 2550READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18 2551READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, w19, x19 2552READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, w20, x20 2553READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, w21, x21 2554READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, w22, x22 2555READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg23, w23, x23 2556READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg24, w24, x24 2557READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg25, w25, x25 2558READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg26, w26, x26 2559READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg27, w27, x27 2560READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg28, w28, x28 2561READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29 2562