quick_entrypoints_arm64.S revision 94ce9c2f41ea198f5fdcfc09c48b9984c95a9c61
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "asm_support_arm64.S" 18 19#include "arch/quick_alloc_entrypoints.S" 20 21 22.macro INCREASE_FRAME frame_adjustment 23 sub sp, sp, #(\frame_adjustment) 24 .cfi_adjust_cfa_offset (\frame_adjustment) 25.endm 26 27.macro DECREASE_FRAME frame_adjustment 28 add sp, sp, #(\frame_adjustment) 29 .cfi_adjust_cfa_offset -(\frame_adjustment) 30.endm 31 32.macro SAVE_REG reg, offset 33 str \reg, [sp, #(\offset)] 34 .cfi_rel_offset \reg, (\offset) 35.endm 36 37.macro RESTORE_REG reg, offset 38 ldr \reg, [sp, #(\offset)] 39 .cfi_restore \reg 40.endm 41 42.macro SAVE_TWO_REGS reg1, reg2, offset 43 stp \reg1, \reg2, [sp, #(\offset)] 44 .cfi_rel_offset \reg1, (\offset) 45 .cfi_rel_offset \reg2, (\offset) + 8 46.endm 47 48.macro RESTORE_TWO_REGS reg1, reg2, offset 49 ldp \reg1, \reg2, [sp, #(\offset)] 50 .cfi_restore \reg1 51 .cfi_restore \reg2 52.endm 53 54.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment 55 stp \reg1, \reg2, [sp, #-(\frame_adjustment)]! 56 .cfi_adjust_cfa_offset (\frame_adjustment) 57 .cfi_rel_offset \reg1, 0 58 .cfi_rel_offset \reg2, 8 59.endm 60 61.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment 62 ldp \reg1, \reg2, [sp], #(\frame_adjustment) 63 .cfi_restore \reg1 64 .cfi_restore \reg2 65 .cfi_adjust_cfa_offset -(\frame_adjustment) 66.endm 67 68 /* 69 * Macro that sets up the callee save frame to conform with 70 * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves) 71 */ 72.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 73 // art::Runtime** xIP0 = &art::Runtime::instance_ 74 adrp xIP0, :got:_ZN3art7Runtime9instance_E 75 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 76 77 // Our registers aren't intermixed - just spill in order. 78 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 79 80 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves]; 81 ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET] 82 83 INCREASE_FRAME 176 84 85 // Ugly compile-time check, but we only have the preprocessor. 86#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 176) 87#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM64) size not as expected." 88#endif 89 90 // Stack alignment filler [sp, #8]. 91 // FP callee-saves. 92 stp d8, d9, [sp, #16] 93 stp d10, d11, [sp, #32] 94 stp d12, d13, [sp, #48] 95 stp d14, d15, [sp, #64] 96 97 // GP callee-saves 98 SAVE_TWO_REGS x19, x20, 80 99 SAVE_TWO_REGS x21, x22, 96 100 SAVE_TWO_REGS x23, x24, 112 101 SAVE_TWO_REGS x25, x26, 128 102 SAVE_TWO_REGS x27, x28, 144 103 SAVE_TWO_REGS x29, xLR, 160 104 105 // Store ArtMethod* Runtime::callee_save_methods_[kSaveAllCalleeSaves]. 106 str xIP0, [sp] 107 // Place sp in Thread::Current()->top_quick_frame. 108 mov xIP0, sp 109 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 110.endm 111 112 /* 113 * Macro that sets up the callee save frame to conform with 114 * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). 115 */ 116.macro SETUP_SAVE_REFS_ONLY_FRAME 117 // art::Runtime** xIP0 = &art::Runtime::instance_ 118 adrp xIP0, :got:_ZN3art7Runtime9instance_E 119 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 120 121 // Our registers aren't intermixed - just spill in order. 122 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 123 124 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly]; 125 ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET] 126 127 INCREASE_FRAME 96 128 129 // Ugly compile-time check, but we only have the preprocessor. 130#if (FRAME_SIZE_SAVE_REFS_ONLY != 96) 131#error "FRAME_SIZE_SAVE_REFS_ONLY(ARM64) size not as expected." 132#endif 133 134 // GP callee-saves. 135 // x20 paired with ArtMethod* - see below. 136 SAVE_TWO_REGS x21, x22, 16 137 SAVE_TWO_REGS x23, x24, 32 138 SAVE_TWO_REGS x25, x26, 48 139 SAVE_TWO_REGS x27, x28, 64 140 SAVE_TWO_REGS x29, xLR, 80 141 142 // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsOnly]. 143 stp xIP0, x20, [sp] 144 .cfi_rel_offset x20, 8 145 146 // Place sp in Thread::Current()->top_quick_frame. 147 mov xIP0, sp 148 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 149.endm 150 151// TODO: Probably no need to restore registers preserved by aapcs64. 152.macro RESTORE_SAVE_REFS_ONLY_FRAME 153 // Callee-saves. 154 RESTORE_REG x20, 8 155 RESTORE_TWO_REGS x21, x22, 16 156 RESTORE_TWO_REGS x23, x24, 32 157 RESTORE_TWO_REGS x25, x26, 48 158 RESTORE_TWO_REGS x27, x28, 64 159 RESTORE_TWO_REGS x29, xLR, 80 160 161 DECREASE_FRAME 96 162.endm 163 164.macro POP_SAVE_REFS_ONLY_FRAME 165 DECREASE_FRAME 96 166.endm 167 168.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN 169 RESTORE_SAVE_REFS_ONLY_FRAME 170 ret 171.endm 172 173 174.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 175 INCREASE_FRAME 224 176 177 // Ugly compile-time check, but we only have the preprocessor. 178#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 224) 179#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM64) size not as expected." 180#endif 181 182 // Stack alignment filler [sp, #8]. 183 // FP args. 184 stp d0, d1, [sp, #16] 185 stp d2, d3, [sp, #32] 186 stp d4, d5, [sp, #48] 187 stp d6, d7, [sp, #64] 188 189 // Core args. 190 SAVE_TWO_REGS x1, x2, 80 191 SAVE_TWO_REGS x3, x4, 96 192 SAVE_TWO_REGS x5, x6, 112 193 194 // x7, Callee-saves. 195 SAVE_TWO_REGS x7, x20, 128 196 SAVE_TWO_REGS x21, x22, 144 197 SAVE_TWO_REGS x23, x24, 160 198 SAVE_TWO_REGS x25, x26, 176 199 SAVE_TWO_REGS x27, x28, 192 200 201 // x29(callee-save) and LR. 202 SAVE_TWO_REGS x29, xLR, 208 203 204.endm 205 206 /* 207 * Macro that sets up the callee save frame to conform with 208 * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). 209 * 210 * TODO This is probably too conservative - saving FP & LR. 211 */ 212.macro SETUP_SAVE_REFS_AND_ARGS_FRAME 213 // art::Runtime** xIP0 = &art::Runtime::instance_ 214 adrp xIP0, :got:_ZN3art7Runtime9instance_E 215 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 216 217 // Our registers aren't intermixed - just spill in order. 218 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 219 220 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefAndArgs]; 221 ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET] 222 223 SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 224 225 str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsAndArgs]. 226 // Place sp in Thread::Current()->top_quick_frame. 227 mov xIP0, sp 228 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 229.endm 230 231.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 232 SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL 233 str x0, [sp, #0] // Store ArtMethod* to bottom of stack. 234 // Place sp in Thread::Current()->top_quick_frame. 235 mov xIP0, sp 236 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 237.endm 238 239// TODO: Probably no need to restore registers preserved by aapcs64. 240.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME 241 // FP args. 242 ldp d0, d1, [sp, #16] 243 ldp d2, d3, [sp, #32] 244 ldp d4, d5, [sp, #48] 245 ldp d6, d7, [sp, #64] 246 247 // Core args. 248 RESTORE_TWO_REGS x1, x2, 80 249 RESTORE_TWO_REGS x3, x4, 96 250 RESTORE_TWO_REGS x5, x6, 112 251 252 // x7, Callee-saves. 253 RESTORE_TWO_REGS x7, x20, 128 254 RESTORE_TWO_REGS x21, x22, 144 255 RESTORE_TWO_REGS x23, x24, 160 256 RESTORE_TWO_REGS x25, x26, 176 257 RESTORE_TWO_REGS x27, x28, 192 258 259 // x29(callee-save) and LR. 260 RESTORE_TWO_REGS x29, xLR, 208 261 262 DECREASE_FRAME 224 263.endm 264 265 /* 266 * Macro that sets up the callee save frame to conform with 267 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 268 * when the SP has already been decremented by FRAME_SIZE_SAVE_EVERYTHING 269 * and saving registers x29 and LR is handled elsewhere. 270 */ 271.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR 272 // Ugly compile-time check, but we only have the preprocessor. 273#if (FRAME_SIZE_SAVE_EVERYTHING != 512) 274#error "FRAME_SIZE_SAVE_EVERYTHING(ARM64) size not as expected." 275#endif 276 277 // Save FP registers. 278 // For better performance, store d0 and d31 separately, so that all STPs are 16-byte aligned. 279 str d0, [sp, #8] 280 stp d1, d2, [sp, #16] 281 stp d3, d4, [sp, #32] 282 stp d5, d6, [sp, #48] 283 stp d7, d8, [sp, #64] 284 stp d9, d10, [sp, #80] 285 stp d11, d12, [sp, #96] 286 stp d13, d14, [sp, #112] 287 stp d15, d16, [sp, #128] 288 stp d17, d18, [sp, #144] 289 stp d19, d20, [sp, #160] 290 stp d21, d22, [sp, #176] 291 stp d23, d24, [sp, #192] 292 stp d25, d26, [sp, #208] 293 stp d27, d28, [sp, #224] 294 stp d29, d30, [sp, #240] 295 str d31, [sp, #256] 296 297 // Save core registers. 298 SAVE_REG x0, 264 299 SAVE_TWO_REGS x1, x2, 272 300 SAVE_TWO_REGS x3, x4, 288 301 SAVE_TWO_REGS x5, x6, 304 302 SAVE_TWO_REGS x7, x8, 320 303 SAVE_TWO_REGS x9, x10, 336 304 SAVE_TWO_REGS x11, x12, 352 305 SAVE_TWO_REGS x13, x14, 368 306 SAVE_TWO_REGS x15, x16, 384 307 SAVE_TWO_REGS x17, x18, 400 308 SAVE_TWO_REGS x19, x20, 416 309 SAVE_TWO_REGS x21, x22, 432 310 SAVE_TWO_REGS x23, x24, 448 311 SAVE_TWO_REGS x25, x26, 464 312 SAVE_TWO_REGS x27, x28, 480 313 314 // art::Runtime** xIP0 = &art::Runtime::instance_ 315 adrp xIP0, :got:_ZN3art7Runtime9instance_E 316 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 317 318 ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_; 319 320 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveEverything]; 321 ldr xIP0, [xIP0, RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET] 322 323 // Store ArtMethod* Runtime::callee_save_methods_[kSaveEverything]. 324 str xIP0, [sp] 325 // Place sp in Thread::Current()->top_quick_frame. 326 mov xIP0, sp 327 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 328.endm 329 330 /* 331 * Macro that sets up the callee save frame to conform with 332 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 333 */ 334.macro SETUP_SAVE_EVERYTHING_FRAME 335 INCREASE_FRAME 512 336 SAVE_TWO_REGS x29, xLR, 496 337 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR 338.endm 339 340.macro RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0 341 // Restore FP registers. 342 // For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned. 343 ldr d0, [sp, #8] 344 ldp d1, d2, [sp, #16] 345 ldp d3, d4, [sp, #32] 346 ldp d5, d6, [sp, #48] 347 ldp d7, d8, [sp, #64] 348 ldp d9, d10, [sp, #80] 349 ldp d11, d12, [sp, #96] 350 ldp d13, d14, [sp, #112] 351 ldp d15, d16, [sp, #128] 352 ldp d17, d18, [sp, #144] 353 ldp d19, d20, [sp, #160] 354 ldp d21, d22, [sp, #176] 355 ldp d23, d24, [sp, #192] 356 ldp d25, d26, [sp, #208] 357 ldp d27, d28, [sp, #224] 358 ldp d29, d30, [sp, #240] 359 ldr d31, [sp, #256] 360 361 // Restore core registers. 362 RESTORE_TWO_REGS x1, x2, 272 363 RESTORE_TWO_REGS x3, x4, 288 364 RESTORE_TWO_REGS x5, x6, 304 365 RESTORE_TWO_REGS x7, x8, 320 366 RESTORE_TWO_REGS x9, x10, 336 367 RESTORE_TWO_REGS x11, x12, 352 368 RESTORE_TWO_REGS x13, x14, 368 369 RESTORE_TWO_REGS x15, x16, 384 370 RESTORE_TWO_REGS x17, x18, 400 371 RESTORE_TWO_REGS x19, x20, 416 372 RESTORE_TWO_REGS x21, x22, 432 373 RESTORE_TWO_REGS x23, x24, 448 374 RESTORE_TWO_REGS x25, x26, 464 375 RESTORE_TWO_REGS x27, x28, 480 376 RESTORE_TWO_REGS x29, xLR, 496 377 378 DECREASE_FRAME 512 379.endm 380 381.macro RESTORE_SAVE_EVERYTHING_FRAME 382 RESTORE_REG x0, 264 383 RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0 384.endm 385 386.macro RETURN_IF_RESULT_IS_ZERO 387 cbnz x0, 1f // result non-zero branch over 388 ret // return 3891: 390.endm 391 392.macro RETURN_IF_RESULT_IS_NON_ZERO 393 cbz x0, 1f // result zero branch over 394 ret // return 3951: 396.endm 397 398 /* 399 * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending 400 * exception is Thread::Current()->exception_ when the runtime method frame is ready. 401 */ 402.macro DELIVER_PENDING_EXCEPTION_FRAME_READY 403 mov x0, xSELF 404 405 // Point of no return. 406 bl artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*) 407 brk 0 // Unreached 408.endm 409 410 /* 411 * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending 412 * exception is Thread::Current()->exception_. 413 */ 414.macro DELIVER_PENDING_EXCEPTION 415 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 416 DELIVER_PENDING_EXCEPTION_FRAME_READY 417.endm 418 419.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg 420 ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field. 421 cbnz \reg, 1f 422 ret 4231: 424 DELIVER_PENDING_EXCEPTION 425.endm 426 427.macro RETURN_OR_DELIVER_PENDING_EXCEPTION 428 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0 429.endm 430 431// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register. 432.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 433 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1 434.endm 435 436.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER 437 cbnz w0, 1f // result non-zero branch over 438 ret // return 4391: 440 DELIVER_PENDING_EXCEPTION 441.endm 442 443.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 444 .extern \cxx_name 445ENTRY \c_name 446 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 447 mov x0, xSELF // pass Thread::Current 448 bl \cxx_name // \cxx_name(Thread*) 449 brk 0 450END \c_name 451.endm 452 453.macro NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name 454 .extern \cxx_name 455ENTRY \c_name 456 SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context 457 mov x0, xSELF // pass Thread::Current 458 bl \cxx_name // \cxx_name(Thread*) 459 brk 0 460END \c_name 461.endm 462 463.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name 464 .extern \cxx_name 465ENTRY \c_name 466 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context. 467 mov x1, xSELF // pass Thread::Current. 468 bl \cxx_name // \cxx_name(arg, Thread*). 469 brk 0 470END \c_name 471.endm 472 473.macro TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name 474 .extern \cxx_name 475ENTRY \c_name 476 SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context 477 mov x2, xSELF // pass Thread::Current 478 bl \cxx_name // \cxx_name(arg1, arg2, Thread*) 479 brk 0 480END \c_name 481.endm 482 483 /* 484 * Called by managed code, saves callee saves and then calls artThrowException 485 * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. 486 */ 487ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode 488 489 /* 490 * Called by managed code to create and deliver a NullPointerException. 491 */ 492NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode 493 494 /* 495 * Call installed by a signal handler to create and deliver a NullPointerException. 496 */ 497 .extern art_quick_throw_null_pointer_exception_from_signal 498ENTRY art_quick_throw_null_pointer_exception_from_signal 499 // The fault handler pushes the gc map address, i.e. "return address", to stack 500 // and passes the fault address in LR. So we need to set up the CFI info accordingly. 501 .cfi_def_cfa_offset __SIZEOF_POINTER__ 502 .cfi_rel_offset lr, 0 503 // Save all registers as basis for long jump context. 504 INCREASE_FRAME (FRAME_SIZE_SAVE_EVERYTHING - __SIZEOF_POINTER__) 505 SAVE_REG x29, (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__) // LR already saved. 506 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR 507 mov x0, lr // pass the fault address stored in LR by the fault handler. 508 mov x1, xSELF // pass Thread::Current. 509 bl artThrowNullPointerExceptionFromSignal // (arg, Thread*). 510 brk 0 511END art_quick_throw_null_pointer_exception_from_signal 512 513 /* 514 * Called by managed code to create and deliver an ArithmeticException. 515 */ 516NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode 517 518 /* 519 * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds 520 * index, arg2 holds limit. 521 */ 522TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode 523 524 /* 525 * Called by managed code to create and deliver a StringIndexOutOfBoundsException 526 * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. 527 */ 528TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode 529 530 /* 531 * Called by managed code to create and deliver a StackOverflowError. 532 */ 533NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode 534 535 /* 536 * All generated callsites for interface invokes and invocation slow paths will load arguments 537 * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain 538 * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. 539 * NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1. 540 * 541 * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting 542 * of the target Method* in x0 and method->code_ in x1. 543 * 544 * If unsuccessful, the helper will return null/????. There will be a pending exception in the 545 * thread and we branch to another stub to deliver it. 546 * 547 * On success this wrapper will restore arguments and *jump* to the target, leaving the lr 548 * pointing back to the original caller. 549 * 550 * Adapted from ARM32 code. 551 * 552 * Clobbers xIP0. 553 */ 554.macro INVOKE_TRAMPOLINE_BODY cxx_name 555 .extern \cxx_name 556 SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves in case allocation triggers GC 557 // Helper signature is always 558 // (method_idx, *this_object, *caller_method, *self, sp) 559 560 mov x2, xSELF // pass Thread::Current 561 mov x3, sp 562 bl \cxx_name // (method_idx, this, Thread*, SP) 563 mov xIP0, x1 // save Method*->code_ 564 RESTORE_SAVE_REFS_AND_ARGS_FRAME 565 cbz x0, 1f // did we find the target? if not go to exception delivery 566 br xIP0 // tail call to target 5671: 568 DELIVER_PENDING_EXCEPTION 569.endm 570.macro INVOKE_TRAMPOLINE c_name, cxx_name 571ENTRY \c_name 572 INVOKE_TRAMPOLINE_BODY \cxx_name 573END \c_name 574.endm 575 576INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck 577 578INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck 579INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck 580INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck 581INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck 582 583 584.macro INVOKE_STUB_CREATE_FRAME 585 586SAVE_SIZE=15*8 // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved. 587SAVE_SIZE_AND_METHOD=SAVE_SIZE+8 588 589 590 mov x9, sp // Save stack pointer. 591 .cfi_register sp,x9 592 593 add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame. 594 sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args 595 and x10, x10, # ~0xf // Enforce 16 byte stack alignment. 596 mov sp, x10 // Set new SP. 597 598 sub x10, x9, #SAVE_SIZE // Calculate new FP (later). Done here as we must move SP 599 .cfi_def_cfa_register x10 // before this. 600 .cfi_adjust_cfa_offset SAVE_SIZE 601 602 str x28, [x10, #112] 603 .cfi_rel_offset x28, 112 604 605 stp x26, x27, [x10, #96] 606 .cfi_rel_offset x26, 96 607 .cfi_rel_offset x27, 104 608 609 stp x24, x25, [x10, #80] 610 .cfi_rel_offset x24, 80 611 .cfi_rel_offset x25, 88 612 613 stp x22, x23, [x10, #64] 614 .cfi_rel_offset x22, 64 615 .cfi_rel_offset x23, 72 616 617 stp x20, x21, [x10, #48] 618 .cfi_rel_offset x20, 48 619 .cfi_rel_offset x21, 56 620 621 stp x9, x19, [x10, #32] // Save old stack pointer and x19. 622 .cfi_rel_offset sp, 32 623 .cfi_rel_offset x19, 40 624 625 stp x4, x5, [x10, #16] // Save result and shorty addresses. 626 .cfi_rel_offset x4, 16 627 .cfi_rel_offset x5, 24 628 629 stp xFP, xLR, [x10] // Store LR & FP. 630 .cfi_rel_offset x29, 0 631 .cfi_rel_offset x30, 8 632 633 mov xFP, x10 // Use xFP now, as it's callee-saved. 634 .cfi_def_cfa_register x29 635 mov xSELF, x3 // Move thread pointer into SELF register. 636 637 // Copy arguments into stack frame. 638 // Use simple copy routine for now. 639 // 4 bytes per slot. 640 // X1 - source address 641 // W2 - args length 642 // X9 - destination address. 643 // W10 - temporary 644 add x9, sp, #8 // Destination address is bottom of stack + null. 645 646 // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler 647 // does not have unique-id variables. 6481: 649 cmp w2, #0 650 beq 2f 651 sub w2, w2, #4 // Need 65536 bytes of range. 652 ldr w10, [x1, x2] 653 str w10, [x9, x2] 654 655 b 1b 656 6572: 658 // Store null into ArtMethod* at bottom of frame. 659 str xzr, [sp] 660.endm 661 662.macro INVOKE_STUB_CALL_AND_RETURN 663 664 // load method-> METHOD_QUICK_CODE_OFFSET 665 ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] 666 // Branch to method. 667 blr x9 668 669 // Restore return value address and shorty address. 670 ldp x4,x5, [xFP, #16] 671 .cfi_restore x4 672 .cfi_restore x5 673 674 ldr x28, [xFP, #112] 675 .cfi_restore x28 676 677 ldp x26, x27, [xFP, #96] 678 .cfi_restore x26 679 .cfi_restore x27 680 681 ldp x24, x25, [xFP, #80] 682 .cfi_restore x24 683 .cfi_restore x25 684 685 ldp x22, x23, [xFP, #64] 686 .cfi_restore x22 687 .cfi_restore x23 688 689 ldp x20, x21, [xFP, #48] 690 .cfi_restore x20 691 .cfi_restore x21 692 693 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. 694 ldrb w10, [x5] 695 696 // Check the return type and store the correct register into the jvalue in memory. 697 // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables. 698 699 // Don't set anything for a void type. 700 cmp w10, #'V' 701 beq 3f 702 703 // Is it a double? 704 cmp w10, #'D' 705 bne 1f 706 str d0, [x4] 707 b 3f 708 7091: // Is it a float? 710 cmp w10, #'F' 711 bne 2f 712 str s0, [x4] 713 b 3f 714 7152: // Just store x0. Doesn't matter if it is 64 or 32 bits. 716 str x0, [x4] 717 7183: // Finish up. 719 ldp x2, x19, [xFP, #32] // Restore stack pointer and x19. 720 .cfi_restore x19 721 mov sp, x2 722 .cfi_restore sp 723 724 ldp xFP, xLR, [xFP] // Restore old frame pointer and link register. 725 .cfi_restore x29 726 .cfi_restore x30 727 728 ret 729 730.endm 731 732 733/* 734 * extern"C" void art_quick_invoke_stub(ArtMethod *method, x0 735 * uint32_t *args, x1 736 * uint32_t argsize, w2 737 * Thread *self, x3 738 * JValue *result, x4 739 * char *shorty); x5 740 * +----------------------+ 741 * | | 742 * | C/C++ frame | 743 * | LR'' | 744 * | FP'' | <- SP' 745 * +----------------------+ 746 * +----------------------+ 747 * | x28 | <- TODO: Remove callee-saves. 748 * | : | 749 * | x19 | 750 * | SP' | 751 * | X5 | 752 * | X4 | Saved registers 753 * | LR' | 754 * | FP' | <- FP 755 * +----------------------+ 756 * | uint32_t out[n-1] | 757 * | : : | Outs 758 * | uint32_t out[0] | 759 * | ArtMethod* | <- SP value=null 760 * +----------------------+ 761 * 762 * Outgoing registers: 763 * x0 - Method* 764 * x1-x7 - integer parameters. 765 * d0-d7 - Floating point parameters. 766 * xSELF = self 767 * SP = & of ArtMethod* 768 * x1 = "this" pointer. 769 * 770 */ 771ENTRY art_quick_invoke_stub 772 // Spill registers as per AACPS64 calling convention. 773 INVOKE_STUB_CREATE_FRAME 774 775 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 776 // Parse the passed shorty to determine which register to load. 777 // Load addresses for routines that load WXSD registers. 778 adr x11, .LstoreW2 779 adr x12, .LstoreX2 780 adr x13, .LstoreS0 781 adr x14, .LstoreD0 782 783 // Initialize routine offsets to 0 for integers and floats. 784 // x8 for integers, x15 for floating point. 785 mov x8, #0 786 mov x15, #0 787 788 add x10, x5, #1 // Load shorty address, plus one to skip return value. 789 ldr w1, [x9],#4 // Load "this" parameter, and increment arg pointer. 790 791 // Loop to fill registers. 792.LfillRegisters: 793 ldrb w17, [x10], #1 // Load next character in signature, and increment. 794 cbz w17, .LcallFunction // Exit at end of signature. Shorty 0 terminated. 795 796 cmp w17, #'F' // is this a float? 797 bne .LisDouble 798 799 cmp x15, # 8*12 // Skip this load if all registers full. 800 beq .Ladvance4 801 802 add x17, x13, x15 // Calculate subroutine to jump to. 803 br x17 804 805.LisDouble: 806 cmp w17, #'D' // is this a double? 807 bne .LisLong 808 809 cmp x15, # 8*12 // Skip this load if all registers full. 810 beq .Ladvance8 811 812 add x17, x14, x15 // Calculate subroutine to jump to. 813 br x17 814 815.LisLong: 816 cmp w17, #'J' // is this a long? 817 bne .LisOther 818 819 cmp x8, # 6*12 // Skip this load if all registers full. 820 beq .Ladvance8 821 822 add x17, x12, x8 // Calculate subroutine to jump to. 823 br x17 824 825.LisOther: // Everything else takes one vReg. 826 cmp x8, # 6*12 // Skip this load if all registers full. 827 beq .Ladvance4 828 829 add x17, x11, x8 // Calculate subroutine to jump to. 830 br x17 831 832.Ladvance4: 833 add x9, x9, #4 834 b .LfillRegisters 835 836.Ladvance8: 837 add x9, x9, #8 838 b .LfillRegisters 839 840// Macro for loading a parameter into a register. 841// counter - the register with offset into these tables 842// size - the size of the register - 4 or 8 bytes. 843// register - the name of the register to be loaded. 844.macro LOADREG counter size register return 845 ldr \register , [x9], #\size 846 add \counter, \counter, 12 847 b \return 848.endm 849 850// Store ints. 851.LstoreW2: 852 LOADREG x8 4 w2 .LfillRegisters 853 LOADREG x8 4 w3 .LfillRegisters 854 LOADREG x8 4 w4 .LfillRegisters 855 LOADREG x8 4 w5 .LfillRegisters 856 LOADREG x8 4 w6 .LfillRegisters 857 LOADREG x8 4 w7 .LfillRegisters 858 859// Store longs. 860.LstoreX2: 861 LOADREG x8 8 x2 .LfillRegisters 862 LOADREG x8 8 x3 .LfillRegisters 863 LOADREG x8 8 x4 .LfillRegisters 864 LOADREG x8 8 x5 .LfillRegisters 865 LOADREG x8 8 x6 .LfillRegisters 866 LOADREG x8 8 x7 .LfillRegisters 867 868// Store singles. 869.LstoreS0: 870 LOADREG x15 4 s0 .LfillRegisters 871 LOADREG x15 4 s1 .LfillRegisters 872 LOADREG x15 4 s2 .LfillRegisters 873 LOADREG x15 4 s3 .LfillRegisters 874 LOADREG x15 4 s4 .LfillRegisters 875 LOADREG x15 4 s5 .LfillRegisters 876 LOADREG x15 4 s6 .LfillRegisters 877 LOADREG x15 4 s7 .LfillRegisters 878 879// Store doubles. 880.LstoreD0: 881 LOADREG x15 8 d0 .LfillRegisters 882 LOADREG x15 8 d1 .LfillRegisters 883 LOADREG x15 8 d2 .LfillRegisters 884 LOADREG x15 8 d3 .LfillRegisters 885 LOADREG x15 8 d4 .LfillRegisters 886 LOADREG x15 8 d5 .LfillRegisters 887 LOADREG x15 8 d6 .LfillRegisters 888 LOADREG x15 8 d7 .LfillRegisters 889 890 891.LcallFunction: 892 893 INVOKE_STUB_CALL_AND_RETURN 894 895END art_quick_invoke_stub 896 897/* extern"C" 898 * void art_quick_invoke_static_stub(ArtMethod *method, x0 899 * uint32_t *args, x1 900 * uint32_t argsize, w2 901 * Thread *self, x3 902 * JValue *result, x4 903 * char *shorty); x5 904 */ 905ENTRY art_quick_invoke_static_stub 906 // Spill registers as per AACPS64 calling convention. 907 INVOKE_STUB_CREATE_FRAME 908 909 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 910 // Parse the passed shorty to determine which register to load. 911 // Load addresses for routines that load WXSD registers. 912 adr x11, .LstoreW1_2 913 adr x12, .LstoreX1_2 914 adr x13, .LstoreS0_2 915 adr x14, .LstoreD0_2 916 917 // Initialize routine offsets to 0 for integers and floats. 918 // x8 for integers, x15 for floating point. 919 mov x8, #0 920 mov x15, #0 921 922 add x10, x5, #1 // Load shorty address, plus one to skip return value. 923 924 // Loop to fill registers. 925.LfillRegisters2: 926 ldrb w17, [x10], #1 // Load next character in signature, and increment. 927 cbz w17, .LcallFunction2 // Exit at end of signature. Shorty 0 terminated. 928 929 cmp w17, #'F' // is this a float? 930 bne .LisDouble2 931 932 cmp x15, # 8*12 // Skip this load if all registers full. 933 beq .Ladvance4_2 934 935 add x17, x13, x15 // Calculate subroutine to jump to. 936 br x17 937 938.LisDouble2: 939 cmp w17, #'D' // is this a double? 940 bne .LisLong2 941 942 cmp x15, # 8*12 // Skip this load if all registers full. 943 beq .Ladvance8_2 944 945 add x17, x14, x15 // Calculate subroutine to jump to. 946 br x17 947 948.LisLong2: 949 cmp w17, #'J' // is this a long? 950 bne .LisOther2 951 952 cmp x8, # 7*12 // Skip this load if all registers full. 953 beq .Ladvance8_2 954 955 add x17, x12, x8 // Calculate subroutine to jump to. 956 br x17 957 958.LisOther2: // Everything else takes one vReg. 959 cmp x8, # 7*12 // Skip this load if all registers full. 960 beq .Ladvance4_2 961 962 add x17, x11, x8 // Calculate subroutine to jump to. 963 br x17 964 965.Ladvance4_2: 966 add x9, x9, #4 967 b .LfillRegisters2 968 969.Ladvance8_2: 970 add x9, x9, #8 971 b .LfillRegisters2 972 973// Store ints. 974.LstoreW1_2: 975 LOADREG x8 4 w1 .LfillRegisters2 976 LOADREG x8 4 w2 .LfillRegisters2 977 LOADREG x8 4 w3 .LfillRegisters2 978 LOADREG x8 4 w4 .LfillRegisters2 979 LOADREG x8 4 w5 .LfillRegisters2 980 LOADREG x8 4 w6 .LfillRegisters2 981 LOADREG x8 4 w7 .LfillRegisters2 982 983// Store longs. 984.LstoreX1_2: 985 LOADREG x8 8 x1 .LfillRegisters2 986 LOADREG x8 8 x2 .LfillRegisters2 987 LOADREG x8 8 x3 .LfillRegisters2 988 LOADREG x8 8 x4 .LfillRegisters2 989 LOADREG x8 8 x5 .LfillRegisters2 990 LOADREG x8 8 x6 .LfillRegisters2 991 LOADREG x8 8 x7 .LfillRegisters2 992 993// Store singles. 994.LstoreS0_2: 995 LOADREG x15 4 s0 .LfillRegisters2 996 LOADREG x15 4 s1 .LfillRegisters2 997 LOADREG x15 4 s2 .LfillRegisters2 998 LOADREG x15 4 s3 .LfillRegisters2 999 LOADREG x15 4 s4 .LfillRegisters2 1000 LOADREG x15 4 s5 .LfillRegisters2 1001 LOADREG x15 4 s6 .LfillRegisters2 1002 LOADREG x15 4 s7 .LfillRegisters2 1003 1004// Store doubles. 1005.LstoreD0_2: 1006 LOADREG x15 8 d0 .LfillRegisters2 1007 LOADREG x15 8 d1 .LfillRegisters2 1008 LOADREG x15 8 d2 .LfillRegisters2 1009 LOADREG x15 8 d3 .LfillRegisters2 1010 LOADREG x15 8 d4 .LfillRegisters2 1011 LOADREG x15 8 d5 .LfillRegisters2 1012 LOADREG x15 8 d6 .LfillRegisters2 1013 LOADREG x15 8 d7 .LfillRegisters2 1014 1015 1016.LcallFunction2: 1017 1018 INVOKE_STUB_CALL_AND_RETURN 1019 1020END art_quick_invoke_static_stub 1021 1022 1023 1024/* extern"C" void art_quick_osr_stub(void** stack, x0 1025 * size_t stack_size_in_bytes, x1 1026 * const uin8_t* native_pc, x2 1027 * JValue *result, x3 1028 * char *shorty, x4 1029 * Thread *self) x5 1030 */ 1031ENTRY art_quick_osr_stub 1032SAVE_SIZE=15*8 // x3, x4, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved. 1033 mov x9, sp // Save stack pointer. 1034 .cfi_register sp,x9 1035 1036 sub x10, sp, # SAVE_SIZE 1037 and x10, x10, # ~0xf // Enforce 16 byte stack alignment. 1038 mov sp, x10 // Set new SP. 1039 1040 str x28, [sp, #112] 1041 stp x26, x27, [sp, #96] 1042 stp x24, x25, [sp, #80] 1043 stp x22, x23, [sp, #64] 1044 stp x20, x21, [sp, #48] 1045 stp x9, x19, [sp, #32] // Save old stack pointer and x19. 1046 stp x3, x4, [sp, #16] // Save result and shorty addresses. 1047 stp xFP, xLR, [sp] // Store LR & FP. 1048 mov xSELF, x5 // Move thread pointer into SELF register. 1049 1050 sub sp, sp, #16 1051 str xzr, [sp] // Store null for ArtMethod* slot 1052 // Branch to stub. 1053 bl .Losr_entry 1054 add sp, sp, #16 1055 1056 // Restore return value address and shorty address. 1057 ldp x3,x4, [sp, #16] 1058 ldr x28, [sp, #112] 1059 ldp x26, x27, [sp, #96] 1060 ldp x24, x25, [sp, #80] 1061 ldp x22, x23, [sp, #64] 1062 ldp x20, x21, [sp, #48] 1063 1064 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. 1065 ldrb w10, [x4] 1066 1067 // Check the return type and store the correct register into the jvalue in memory. 1068 1069 // Don't set anything for a void type. 1070 cmp w10, #'V' 1071 beq .Losr_exit 1072 1073 // Is it a double? 1074 cmp w10, #'D' 1075 bne .Lno_double 1076 str d0, [x3] 1077 b .Losr_exit 1078 1079.Lno_double: // Is it a float? 1080 cmp w10, #'F' 1081 bne .Lno_float 1082 str s0, [x3] 1083 b .Losr_exit 1084 1085.Lno_float: // Just store x0. Doesn't matter if it is 64 or 32 bits. 1086 str x0, [x3] 1087 1088.Losr_exit: // Finish up. 1089 ldp x2, x19, [sp, #32] // Restore stack pointer and x19. 1090 ldp xFP, xLR, [sp] // Restore old frame pointer and link register. 1091 mov sp, x2 1092 ret 1093 1094.Losr_entry: 1095 // Update stack pointer for the callee 1096 sub sp, sp, x1 1097 1098 // Update link register slot expected by the callee. 1099 sub w1, w1, #8 1100 str lr, [sp, x1] 1101 1102 // Copy arguments into stack frame. 1103 // Use simple copy routine for now. 1104 // 4 bytes per slot. 1105 // X0 - source address 1106 // W1 - args length 1107 // SP - destination address. 1108 // W10 - temporary 1109.Losr_loop_entry: 1110 cmp w1, #0 1111 beq .Losr_loop_exit 1112 sub w1, w1, #4 1113 ldr w10, [x0, x1] 1114 str w10, [sp, x1] 1115 b .Losr_loop_entry 1116 1117.Losr_loop_exit: 1118 // Branch to the OSR entry point. 1119 br x2 1120 1121END art_quick_osr_stub 1122 1123 /* 1124 * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_ 1125 */ 1126 1127ENTRY art_quick_do_long_jump 1128 // Load FPRs 1129 ldp d0, d1, [x1], #16 1130 ldp d2, d3, [x1], #16 1131 ldp d4, d5, [x1], #16 1132 ldp d6, d7, [x1], #16 1133 ldp d8, d9, [x1], #16 1134 ldp d10, d11, [x1], #16 1135 ldp d12, d13, [x1], #16 1136 ldp d14, d15, [x1], #16 1137 ldp d16, d17, [x1], #16 1138 ldp d18, d19, [x1], #16 1139 ldp d20, d21, [x1], #16 1140 ldp d22, d23, [x1], #16 1141 ldp d24, d25, [x1], #16 1142 ldp d26, d27, [x1], #16 1143 ldp d28, d29, [x1], #16 1144 ldp d30, d31, [x1] 1145 1146 // Load GPRs 1147 // TODO: lots of those are smashed, could optimize. 1148 add x0, x0, #30*8 1149 ldp x30, x1, [x0], #-16 // LR & SP 1150 ldp x28, x29, [x0], #-16 1151 ldp x26, x27, [x0], #-16 1152 ldp x24, x25, [x0], #-16 1153 ldp x22, x23, [x0], #-16 1154 ldp x20, x21, [x0], #-16 1155 ldp x18, x19, [x0], #-16 1156 ldp x16, x17, [x0], #-16 1157 ldp x14, x15, [x0], #-16 1158 ldp x12, x13, [x0], #-16 1159 ldp x10, x11, [x0], #-16 1160 ldp x8, x9, [x0], #-16 1161 ldp x6, x7, [x0], #-16 1162 ldp x4, x5, [x0], #-16 1163 ldp x2, x3, [x0], #-16 1164 mov sp, x1 1165 1166 // Need to load PC, it's at the end (after the space for the unused XZR). Use x1. 1167 ldr x1, [x0, #33*8] 1168 // And the value of x0. 1169 ldr x0, [x0] 1170 1171 br x1 1172END art_quick_do_long_jump 1173 1174 /* 1175 * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the 1176 * possibly null object to lock. 1177 * 1178 * Derived from arm32 code. 1179 */ 1180 .extern artLockObjectFromCode 1181ENTRY art_quick_lock_object 1182 cbz w0, .Lslow_lock 1183 add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore 1184.Lretry_lock: 1185 ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop? 1186 ldxr w1, [x4] 1187 mov x3, x1 1188 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1189 cbnz w3, .Lnot_unlocked // already thin locked 1190 // unlocked case - x1: original lock word that's zero except for the read barrier bits. 1191 orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits 1192 stxr w3, w2, [x4] 1193 cbnz w3, .Llock_stxr_fail // store failed, retry 1194 dmb ishld // full (LoadLoad|LoadStore) memory barrier 1195 ret 1196.Lnot_unlocked: // x1: original lock word 1197 lsr w3, w1, LOCK_WORD_STATE_SHIFT 1198 cbnz w3, .Lslow_lock // if either of the top two bits are set, go slow path 1199 eor w2, w1, w2 // lock_word.ThreadId() ^ self->ThreadId() 1200 uxth w2, w2 // zero top 16 bits 1201 cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock 1202 // else contention, go to slow path 1203 mov x3, x1 // copy the lock word to check count overflow. 1204 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits. 1205 add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow 1206 lsr w3, w2, #LOCK_WORD_GC_STATE_SHIFT // if the first gc state bit is set, we overflowed. 1207 cbnz w3, .Lslow_lock // if we overflow the count go slow path 1208 add w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count for real 1209 stxr w3, w2, [x4] 1210 cbnz w3, .Llock_stxr_fail // store failed, retry 1211 ret 1212.Llock_stxr_fail: 1213 b .Lretry_lock // retry 1214.Lslow_lock: 1215 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block 1216 mov x1, xSELF // pass Thread::Current 1217 bl artLockObjectFromCode // (Object* obj, Thread*) 1218 RESTORE_SAVE_REFS_ONLY_FRAME 1219 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1220END art_quick_lock_object 1221 1222ENTRY art_quick_lock_object_no_inline 1223 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block 1224 mov x1, xSELF // pass Thread::Current 1225 bl artLockObjectFromCode // (Object* obj, Thread*) 1226 RESTORE_SAVE_REFS_ONLY_FRAME 1227 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1228END art_quick_lock_object_no_inline 1229 1230 /* 1231 * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. 1232 * x0 holds the possibly null object to lock. 1233 * 1234 * Derived from arm32 code. 1235 */ 1236 .extern artUnlockObjectFromCode 1237ENTRY art_quick_unlock_object 1238 cbz x0, .Lslow_unlock 1239 add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore 1240.Lretry_unlock: 1241#ifndef USE_READ_BARRIER 1242 ldr w1, [x4] 1243#else 1244 ldxr w1, [x4] // Need to use atomic instructions for read barrier 1245#endif 1246 lsr w2, w1, LOCK_WORD_STATE_SHIFT 1247 cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path 1248 ldr w2, [xSELF, #THREAD_ID_OFFSET] 1249 mov x3, x1 // copy lock word to check thread id equality 1250 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1251 eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId() 1252 uxth w3, w3 // zero top 16 bits 1253 cbnz w3, .Lslow_unlock // do lock word and self thread id's match? 1254 mov x3, x1 // copy lock word to detect transition to unlocked 1255 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits 1256 cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE 1257 bpl .Lrecursive_thin_unlock 1258 // transition to unlocked 1259 mov x3, x1 1260 and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits 1261 dmb ish // full (LoadStore|StoreStore) memory barrier 1262#ifndef USE_READ_BARRIER 1263 str w3, [x4] 1264#else 1265 stxr w2, w3, [x4] // Need to use atomic instructions for read barrier 1266 cbnz w2, .Lunlock_stxr_fail // store failed, retry 1267#endif 1268 ret 1269.Lrecursive_thin_unlock: // w1: original lock word 1270 sub w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count 1271#ifndef USE_READ_BARRIER 1272 str w1, [x4] 1273#else 1274 stxr w2, w1, [x4] // Need to use atomic instructions for read barrier 1275 cbnz w2, .Lunlock_stxr_fail // store failed, retry 1276#endif 1277 ret 1278.Lunlock_stxr_fail: 1279 b .Lretry_unlock // retry 1280.Lslow_unlock: 1281 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC 1282 mov x1, xSELF // pass Thread::Current 1283 bl artUnlockObjectFromCode // (Object* obj, Thread*) 1284 RESTORE_SAVE_REFS_ONLY_FRAME 1285 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1286END art_quick_unlock_object 1287 1288ENTRY art_quick_unlock_object_no_inline 1289 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC 1290 mov x1, xSELF // pass Thread::Current 1291 bl artUnlockObjectFromCode // (Object* obj, Thread*) 1292 RESTORE_SAVE_REFS_ONLY_FRAME 1293 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1294END art_quick_unlock_object_no_inline 1295 1296 /* 1297 * Entry from managed code that calls artIsAssignableFromCode and on failure calls 1298 * artThrowClassCastException. 1299 */ 1300 .extern artThrowClassCastException 1301ENTRY art_quick_check_cast 1302 // Store arguments and link register 1303 // Stack needs to be 16B aligned on calls. 1304 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 1305 SAVE_REG xLR, 24 1306 1307 // Call runtime code 1308 bl artIsAssignableFromCode 1309 1310 // Check for exception 1311 cbz x0, .Lthrow_class_cast_exception 1312 1313 // Restore and return 1314 .cfi_remember_state 1315 RESTORE_REG xLR, 24 1316 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1317 ret 1318 .cfi_restore_state // Reset unwind info so following code unwinds. 1319 1320.Lthrow_class_cast_exception: 1321 // Restore 1322 RESTORE_REG xLR, 24 1323 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1324 1325 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 1326 mov x2, xSELF // pass Thread::Current 1327 bl artThrowClassCastException // (Class*, Class*, Thread*) 1328 brk 0 // We should not return here... 1329END art_quick_check_cast 1330 1331// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude. 1332.macro POP_REG_NE xReg, offset, xExclude 1333 .ifnc \xReg, \xExclude 1334 ldr \xReg, [sp, #\offset] // restore xReg 1335 .cfi_restore \xReg 1336 .endif 1337.endm 1338 1339// Restore xReg1's value from [sp, #offset] if xReg1 is not the same as xExclude. 1340// Restore xReg2's value from [sp, #(offset + 8)] if xReg2 is not the same as xExclude. 1341.macro POP_REGS_NE xReg1, xReg2, offset, xExclude 1342 .ifc \xReg1, \xExclude 1343 ldr \xReg2, [sp, #(\offset + 8)] // restore xReg2 1344 .else 1345 .ifc \xReg2, \xExclude 1346 ldr \xReg1, [sp, #\offset] // restore xReg1 1347 .else 1348 ldp \xReg1, \xReg2, [sp, #\offset] // restore xReg1 and xReg2 1349 .endif 1350 .endif 1351 .cfi_restore \xReg1 1352 .cfi_restore \xReg2 1353.endm 1354 1355 /* 1356 * Macro to insert read barrier, only used in art_quick_aput_obj. 1357 * xDest, wDest and xObj are registers, offset is a defined literal such as 1358 * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle 1359 * name mismatch between instructions. This macro uses the lower 32b of register when possible. 1360 * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. 1361 */ 1362.macro READ_BARRIER xDest, wDest, xObj, xTemp, wTemp, offset, number 1363#ifdef USE_READ_BARRIER 1364#ifdef USE_BAKER_READ_BARRIER 1365 ldr \wTemp, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1366 tbnz \wTemp, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, .Lrb_slowpath\number 1367 // False dependency to avoid needing load/load fence. 1368 add \xObj, \xObj, \xTemp, lsr #32 1369 ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. 1370 UNPOISON_HEAP_REF \wDest 1371 b .Lrb_exit\number 1372#endif 1373.Lrb_slowpath\number: 1374 // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned. 1375 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 48 1376 SAVE_TWO_REGS x2, x3, 16 1377 SAVE_TWO_REGS x4, xLR, 32 1378 1379 // mov x0, \xRef // pass ref in x0 (no-op for now since parameter ref is unused) 1380 .ifnc \xObj, x1 1381 mov x1, \xObj // pass xObj 1382 .endif 1383 mov w2, #\offset // pass offset 1384 bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset) 1385 // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning. 1386 .ifnc \wDest, w0 1387 mov \wDest, w0 // save return value in wDest 1388 .endif 1389 1390 // Conditionally restore saved registers 1391 POP_REG_NE x0, 0, \xDest 1392 POP_REG_NE x1, 8, \xDest 1393 POP_REG_NE x2, 16, \xDest 1394 POP_REG_NE x3, 24, \xDest 1395 POP_REG_NE x4, 32, \xDest 1396 RESTORE_REG xLR, 40 1397 DECREASE_FRAME 48 1398.Lrb_exit\number: 1399#else 1400 ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. 1401 UNPOISON_HEAP_REF \wDest 1402#endif // USE_READ_BARRIER 1403.endm 1404 1405 /* 1406 * Entry from managed code for array put operations of objects where the value being stored 1407 * needs to be checked for compatibility. 1408 * x0 = array, x1 = index, x2 = value 1409 * 1410 * Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We 1411 * assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by 1412 * using index-zero-extension in load/stores. 1413 * 1414 * Temporaries: x3, x4 1415 * TODO: x4 OK? ip seems wrong here. 1416 */ 1417ENTRY art_quick_aput_obj_with_null_and_bound_check 1418 tst x0, x0 1419 bne art_quick_aput_obj_with_bound_check 1420 b art_quick_throw_null_pointer_exception 1421END art_quick_aput_obj_with_null_and_bound_check 1422 1423ENTRY art_quick_aput_obj_with_bound_check 1424 ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] 1425 cmp w3, w1 1426 bhi art_quick_aput_obj 1427 mov x0, x1 1428 mov x1, x3 1429 b art_quick_throw_array_bounds 1430END art_quick_aput_obj_with_bound_check 1431 1432#ifdef USE_READ_BARRIER 1433 .extern artReadBarrierSlow 1434#endif 1435ENTRY art_quick_aput_obj 1436 cbz x2, .Ldo_aput_null 1437 READ_BARRIER x3, w3, x0, x3, w3, MIRROR_OBJECT_CLASS_OFFSET, 0 // Heap reference = 32b 1438 // This also zero-extends to x3 1439 READ_BARRIER x3, w3, x3, x4, w4, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, 1 // Heap reference = 32b 1440 // This also zero-extends to x3 1441 READ_BARRIER x4, w4, x2, x4, w4, MIRROR_OBJECT_CLASS_OFFSET, 2 // Heap reference = 32b 1442 // This also zero-extends to x4 1443 cmp w3, w4 // value's type == array's component type - trivial assignability 1444 bne .Lcheck_assignability 1445.Ldo_aput: 1446 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1447 // "Compress" = do nothing 1448 POISON_HEAP_REF w2 1449 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1450 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1451 lsr x0, x0, #7 1452 strb w3, [x3, x0] 1453 ret 1454.Ldo_aput_null: 1455 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1456 // "Compress" = do nothing 1457 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1458 ret 1459.Lcheck_assignability: 1460 // Store arguments and link register 1461 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 1462 SAVE_TWO_REGS x2, xLR, 16 1463 1464 // Call runtime code 1465 mov x0, x3 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1466 mov x1, x4 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1467 bl artIsAssignableFromCode 1468 1469 // Check for exception 1470 cbz x0, .Lthrow_array_store_exception 1471 1472 // Restore 1473 .cfi_remember_state 1474 RESTORE_TWO_REGS x2, xLR, 16 1475 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1476 1477 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1478 // "Compress" = do nothing 1479 POISON_HEAP_REF w2 1480 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1481 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1482 lsr x0, x0, #7 1483 strb w3, [x3, x0] 1484 ret 1485 .cfi_restore_state // Reset unwind info so following code unwinds. 1486.Lthrow_array_store_exception: 1487 RESTORE_TWO_REGS x2, xLR, 16 1488 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1489 1490 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 1491 mov x1, x2 // Pass value. 1492 mov x2, xSELF // Pass Thread::Current. 1493 bl artThrowArrayStoreException // (Object*, Object*, Thread*). 1494 brk 0 // Unreached. 1495END art_quick_aput_obj 1496 1497// Macro to facilitate adding new allocation entrypoints. 1498.macro ONE_ARG_DOWNCALL name, entrypoint, return 1499 .extern \entrypoint 1500ENTRY \name 1501 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1502 mov x1, xSELF // pass Thread::Current 1503 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1504 RESTORE_SAVE_REFS_ONLY_FRAME 1505 \return 1506END \name 1507.endm 1508 1509// Macro to facilitate adding new allocation entrypoints. 1510.macro TWO_ARG_DOWNCALL name, entrypoint, return 1511 .extern \entrypoint 1512ENTRY \name 1513 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1514 mov x2, xSELF // pass Thread::Current 1515 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1516 RESTORE_SAVE_REFS_ONLY_FRAME 1517 \return 1518END \name 1519.endm 1520 1521// Macro to facilitate adding new allocation entrypoints. 1522.macro THREE_ARG_DOWNCALL name, entrypoint, return 1523 .extern \entrypoint 1524ENTRY \name 1525 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1526 mov x3, xSELF // pass Thread::Current 1527 bl \entrypoint 1528 RESTORE_SAVE_REFS_ONLY_FRAME 1529 \return 1530END \name 1531.endm 1532 1533// Macro to facilitate adding new allocation entrypoints. 1534.macro FOUR_ARG_DOWNCALL name, entrypoint, return 1535 .extern \entrypoint 1536ENTRY \name 1537 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1538 mov x4, xSELF // pass Thread::Current 1539 bl \entrypoint // 1540 RESTORE_SAVE_REFS_ONLY_FRAME 1541 \return 1542 DELIVER_PENDING_EXCEPTION 1543END \name 1544.endm 1545 1546// Macros taking opportunity of code similarities for downcalls with referrer. 1547.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return 1548 .extern \entrypoint 1549ENTRY \name 1550 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1551 ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1552 mov x2, xSELF // pass Thread::Current 1553 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP) 1554 RESTORE_SAVE_REFS_ONLY_FRAME 1555 \return 1556END \name 1557.endm 1558 1559.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return 1560 .extern \entrypoint 1561ENTRY \name 1562 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1563 ldr x2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1564 mov x3, xSELF // pass Thread::Current 1565 bl \entrypoint 1566 RESTORE_SAVE_REFS_ONLY_FRAME 1567 \return 1568END \name 1569.endm 1570 1571.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return 1572 .extern \entrypoint 1573ENTRY \name 1574 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1575 ldr x3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1576 mov x4, xSELF // pass Thread::Current 1577 bl \entrypoint 1578 RESTORE_SAVE_REFS_ONLY_FRAME 1579 \return 1580END \name 1581.endm 1582 1583.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1584 cbz w0, 1f // result zero branch over 1585 ret // return 15861: 1587 DELIVER_PENDING_EXCEPTION 1588.endm 1589 1590 /* 1591 * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on 1592 * failure. 1593 */ 1594TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1595 1596 /* 1597 * Entry from managed code when uninitialized static storage, this stub will run the class 1598 * initializer and deliver the exception on error. On success the static storage base is 1599 * returned. 1600 */ 1601ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1602 1603ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1604ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1605 1606ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1607ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1608ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1609ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1610ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1611ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1612ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1613 1614TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1615TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1616TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1617TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1618TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1619TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1620TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1621 1622TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1623TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1624TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1625TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1626 1627THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1628THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1629THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1630THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1631THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1632 1633// This is separated out as the argument order is different. 1634 .extern artSet64StaticFromCode 1635ENTRY art_quick_set64_static 1636 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1637 ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer 1638 // x2 contains the parameter 1639 mov x3, xSELF // pass Thread::Current 1640 bl artSet64StaticFromCode 1641 RESTORE_SAVE_REFS_ONLY_FRAME 1642 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1643END art_quick_set64_static 1644 1645 /* 1646 * Entry from managed code to resolve a string, this stub will 1647 * check the dex cache for a matching string (the fast path), and if not found, 1648 * it will allocate a String and deliver an exception on error. 1649 * On success the String is returned. R0 holds the string index. 1650 */ 1651 1652ENTRY art_quick_resolve_string 1653 SAVE_TWO_REGS_INCREASE_FRAME x29, xLR, 2 * __SIZEOF_POINTER__ 1654 ldr x29, [sp, #(2 * __SIZEOF_POINTER__)] // load referrer 1655 ldr w29, [x29, #ART_METHOD_DECLARING_CLASS_OFFSET] // load declaring class 1656 ldr x29, [x29, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] // load string dex cache 1657 ubfx lr, x0, #0, #STRING_DEX_CACHE_HASH_BITS // get masked string index into LR 1658 ldr x29, [x29, lr, lsl #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT] // load dex cache pair into x29 1659 cmp x0, x29, lsr #32 // compare against upper 32 bits 1660 bne .Lart_quick_resolve_string_slow_path 1661 ubfx x0, x29, #0, #32 // extract lower 32 bits into x0 1662#ifdef USE_READ_BARRIER 1663 // Most common case: GC is not marking. 1664 ldr w29, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 1665 cbnz x29, .Lart_quick_resolve_string_marking 1666.Lart_quick_resolve_string_no_rb: 1667#endif 1668 .cfi_remember_state 1669 RESTORE_TWO_REGS_DECREASE_FRAME x29, xLR, 2 * __SIZEOF_POINTER__ 1670 ret 1671 .cfi_restore_state 1672 .cfi_def_cfa_offset 16 // workaround for clang bug: 31975598 1673 1674#ifdef USE_READ_BARRIER 1675// GC is marking case, need to check the mark bit. 1676.Lart_quick_resolve_string_marking: 1677 ldr x29, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1678 tbnz x29, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_resolve_string_no_rb 1679 .cfi_remember_state 1680 RESTORE_TWO_REGS_DECREASE_FRAME x29, xLR, 2 * __SIZEOF_POINTER__ 1681 // Note: art_quick_read_barrier_mark_reg00 clobbers IP0 but the .Lslow_rb_* does not. 1682 b .Lslow_rb_art_quick_read_barrier_mark_reg00 // Get the marked string back. 1683 .cfi_restore_state 1684 .cfi_def_cfa_offset 16 // workaround for clang bug: 31975598 1685#endif 1686 1687// Slow path case, the index did not match. 1688.Lart_quick_resolve_string_slow_path: 1689 INCREASE_FRAME (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__) 1690 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR // save callee saves in case of GC 1691 mov x1, xSELF // pass Thread::Current 1692 bl artResolveStringFromCode // (int32_t string_idx, Thread* self) 1693 cbz w0, 1f // If result is null, deliver the OOME. 1694 .cfi_remember_state 1695 RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0 1696 ret // return 1697 .cfi_restore_state 1698 .cfi_def_cfa_offset FRAME_SIZE_SAVE_EVERYTHING // workaround for clang bug: 31975598 16991: 1700 DELIVER_PENDING_EXCEPTION_FRAME_READY 1701END art_quick_resolve_string 1702 1703// Generate the allocation entrypoints for each allocator. 1704GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS 1705// Comment out allocators that have arm64 specific asm. 1706// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm 1707// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) 1708// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) 1709GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1710// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB) implemented in asm 1711// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB) 1712GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1713GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB) 1714GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1715GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB) 1716GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB) 1717GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB) 1718 1719// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). 1720ENTRY art_quick_alloc_object_rosalloc 1721 // Fast path rosalloc allocation. 1722 // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1723 // x2-x7: free. 1724 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1725 // Load the class (x2) 1726 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1727 cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class 1728 ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local 1729 // allocation stack has room. 1730 // ldp won't work due to large offset. 1731 ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET] 1732 cmp x3, x4 1733 bhs .Lart_quick_alloc_object_rosalloc_slow_path 1734 ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3) 1735 cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread 1736 // local allocation. Also does the 1737 // finalizable and initialization 1738 // checks. 1739 bhs .Lart_quick_alloc_object_rosalloc_slow_path 1740 // Compute the rosalloc bracket index 1741 // from the size. Since the size is 1742 // already aligned we can combine the 1743 // two shifts together. 1744 add x4, xSELF, x3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT) 1745 // Subtract pointer size since ther 1746 // are no runs for 0 byte allocations 1747 // and the size is already aligned. 1748 ldr x4, [x4, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)] 1749 // Load the free list head (x3). This 1750 // will be the return val. 1751 ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1752 cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path 1753 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1754 ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head 1755 // and update the list head with the 1756 // next pointer. 1757 str x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1758 // Store the class pointer in the 1759 // header. This also overwrites the 1760 // next pointer. The offsets are 1761 // asserted to match. 1762#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET 1763#error "Class pointer needs to overwrite next pointer." 1764#endif 1765 POISON_HEAP_REF w2 1766 str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET] 1767 // Fence. This is "ish" not "ishst" so 1768 // that it also ensures ordering of 1769 // the object size load with respect 1770 // to later accesses to the class 1771 // object. Alternatively we could use 1772 // "ishst" if we use load-acquire for 1773 // the class status load. 1774 // Needs to be done before pushing on 1775 // allocation since Heap::VisitObjects 1776 // relies on seeing the class pointer. 1777 // b/28790624 1778 dmb ish 1779 // Push the new object onto the thread 1780 // local allocation stack and 1781 // increment the thread local 1782 // allocation stack top. 1783 ldr x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1784 str w3, [x1], #COMPRESSED_REFERENCE_SIZE // (Increment x1 as a side effect.) 1785 str x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1786 // Decrement the size of the free list 1787 ldr w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1788 sub x1, x1, #1 1789 // TODO: consider combining this store 1790 // and the list head store above using 1791 // strd. 1792 str w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1793 1794 mov x0, x3 // Set the return value and return. 1795 ret 1796.Lart_quick_alloc_object_rosalloc_slow_path: 1797 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1798 mov x2, xSELF // pass Thread::Current 1799 bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*) 1800 RESTORE_SAVE_REFS_ONLY_FRAME 1801 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1802END art_quick_alloc_object_rosalloc 1803 1804 1805// The common fast path code for art_quick_alloc_array_region_tlab. 1806.macro ALLOC_ARRAY_TLAB_FAST_PATH slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1807 // Check null class 1808 cbz \wClass, \slowPathLabel 1809 ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED \slowPathLabel, \xClass, \wClass, \xCount, \wCount, \xTemp0, \wTemp0, \xTemp1, \wTemp1, \xTemp2, \wTemp2 1810.endm 1811 1812// The common fast path code for art_quick_alloc_array_region_tlab. 1813.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1814 // Array classes are never finalizable or uninitialized, no need to check. 1815 ldr \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type 1816 UNPOISON_HEAP_REF \wTemp0 1817 ldr \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET] 1818 lsr \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16 1819 // bits. 1820 // xCount is holding a 32 bit value, 1821 // it can not overflow. 1822 lsl \xTemp1, \xCount, \xTemp0 // Calculate data size 1823 // Add array data offset and alignment. 1824 add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1825#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4 1826#error Long array data offset must be 4 greater than int array data offset. 1827#endif 1828 1829 add \xTemp0, \xTemp0, #1 // Add 4 to the length only if the 1830 // component size shift is 3 1831 // (for 64 bit alignment). 1832 and \xTemp0, \xTemp0, #4 1833 add \xTemp1, \xTemp1, \xTemp0 1834 and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask 1835 // (addr + 7) & ~7. The mask must 1836 // be 64 bits to keep high bits in 1837 // case of overflow. 1838 // Negative sized arrays are handled here since xCount holds a zero extended 32 bit value. 1839 // Negative ints become large 64 bit unsigned ints which will always be larger than max signed 1840 // 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int. 1841 cmp \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD // Possibly a large object, go slow 1842 bhs \slowPathLabel // path. 1843 1844 ldr \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Check tlab for space, note that 1845 // we use (end - begin) to handle 1846 // negative size arrays. It is 1847 // assumed that a negative size will 1848 // always be greater unsigned than 1849 // region size. 1850 ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET] 1851 sub \xTemp2, \xTemp2, \xTemp0 1852 cmp \xTemp1, \xTemp2 1853 bhi \slowPathLabel 1854 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1855 // Move old thread_local_pos to x0 1856 // for the return value. 1857 mov x0, \xTemp0 1858 add \xTemp0, \xTemp0, \xTemp1 1859 str \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1860 ldr \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. 1861 add \xTemp0, \xTemp0, #1 1862 str \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] 1863 POISON_HEAP_REF \wClass 1864 str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1865 str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length. 1866 // Fence. 1867 dmb ishst 1868 ret 1869.endm 1870 1871// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. 1872// 1873// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current 1874// x3-x7: free. 1875// Need to preserve x0 and x1 to the slow path. 1876.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel 1877 cbz x2, \slowPathLabel // Check null class 1878 ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel 1879.endm 1880 1881// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as 1882// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED. 1883.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel 1884 ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel 1885.endm 1886 1887.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel 1888 ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET] 1889 ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET] 1890 ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7). 1891 add x6, x4, x7 // Add object size to tlab pos. 1892 cmp x6, x5 // Check if it fits, overflow works 1893 // since the tlab pos and end are 32 1894 // bit values. 1895 bhi \slowPathLabel 1896 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1897 mov x0, x4 1898 str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1899 ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. 1900 add x5, x5, #1 1901 str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] 1902 POISON_HEAP_REF w2 1903 str w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1904 // Fence. This is "ish" not "ishst" so 1905 // that the code after this allocation 1906 // site will see the right values in 1907 // the fields of the class. 1908 // Alternatively we could use "ishst" 1909 // if we use load-acquire for the 1910 // object size load.) 1911 dmb ish 1912 ret 1913.endm 1914 1915// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). 1916ENTRY art_quick_alloc_object_tlab 1917 // Fast path tlab allocation. 1918 // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1919 // x2-x7: free. 1920#if defined(USE_READ_BARRIER) 1921 mvn x0, xzr // Read barrier not supported here. 1922 ret // Return -1. 1923#endif 1924 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1925 // Load the class (x2) 1926 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1927 ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path 1928.Lart_quick_alloc_object_tlab_slow_path: 1929 SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. 1930 mov x2, xSELF // Pass Thread::Current. 1931 bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*) 1932 RESTORE_SAVE_REFS_ONLY_FRAME 1933 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1934END art_quick_alloc_object_tlab 1935 1936// The common code for art_quick_alloc_object_*region_tlab 1937.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier 1938ENTRY \name 1939 // Fast path region tlab allocation. 1940 // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current 1941 // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index. 1942 // x2-x7: free. 1943#if !defined(USE_READ_BARRIER) 1944 mvn x0, xzr // Read barrier must be enabled here. 1945 ret // Return -1. 1946#endif 1947.if \is_resolved 1948 mov x2, x0 // class is actually stored in x0 already 1949.else 1950 ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 1951 // Load the class (x2) 1952 ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 1953 // If the class is null, go slow path. The check is required to read the lock word. 1954 cbz w2, .Lslow_path\name 1955.endif 1956.if \read_barrier 1957 // Most common case: GC is not marking. 1958 ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 1959 cbnz x3, .Lmarking\name 1960.endif 1961.Ldo_allocation\name: 1962 \fast_path .Lslow_path\name 1963.Lmarking\name: 1964.if \read_barrier 1965 // GC is marking, check the lock word of the class for the mark bit. 1966 // Class is not null, check mark bit in lock word. 1967 ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1968 // If the bit is not zero, do the allocation. 1969 tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name 1970 // The read barrier slow path. Mark 1971 // the class. 1972 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 // Save registers (x0, x1, lr). 1973 SAVE_REG xLR, 24 // Align sp by 16 bytes. 1974 mov x0, x2 // Pass the class as the first param. 1975 bl artReadBarrierMark 1976 mov x2, x0 // Get the (marked) class back. 1977 RESTORE_REG xLR, 24 1978 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 // Restore registers. 1979 b .Ldo_allocation\name 1980.endif 1981.Lslow_path\name: 1982 SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. 1983 mov x2, xSELF // Pass Thread::Current. 1984 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1985 RESTORE_SAVE_REFS_ONLY_FRAME 1986 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1987END \name 1988.endm 1989 1990// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB. 1991GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1 1992// No read barrier for the resolved or initialized cases since the caller is responsible for the 1993// read barrier due to the to-space invariant. 1994GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0 1995GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0 1996 1997// TODO: We could use this macro for the normal tlab allocator too. 1998 1999// The common code for art_quick_alloc_array_*region_tlab 2000.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path, is_resolved 2001ENTRY \name 2002 // Fast path array allocation for region tlab allocation. 2003 // x0: uint32_t type_idx 2004 // x1: int32_t component_count 2005 // x2: ArtMethod* method 2006 // x3-x7: free. 2007#if !defined(USE_READ_BARRIER) 2008 mvn x0, xzr // Read barrier must be enabled here. 2009 ret // Return -1. 2010#endif 2011.if \is_resolved 2012 mov x3, x0 2013 // If already resolved, class is stored in x0 2014.else 2015 ldr x3, [x2, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array 2016 // Load the class (x2) 2017 ldr w3, [x3, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] 2018.endif 2019 // Most common case: GC is not marking. 2020 ldr w4, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] 2021 cbnz x4, .Lmarking\name 2022.Ldo_allocation\name: 2023 \fast_path .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6 2024.Lmarking\name: 2025 // GC is marking, check the lock word of the class for the mark bit. 2026 // If the class is null, go slow path. The check is required to read the lock word. 2027 cbz w3, .Lslow_path\name 2028 // Class is not null, check mark bit in lock word. 2029 ldr w4, [x3, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 2030 // If the bit is not zero, do the allocation. 2031 tbnz w4, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name 2032 // The read barrier slow path. Mark 2033 // the class. 2034 stp x0, x1, [sp, #-32]! // Save registers (x0, x1, x2, lr). 2035 stp x2, xLR, [sp, #16] 2036 mov x0, x3 // Pass the class as the first param. 2037 bl artReadBarrierMark 2038 mov x3, x0 // Get the (marked) class back. 2039 ldp x2, xLR, [sp, #16] 2040 ldp x0, x1, [sp], #32 // Restore registers. 2041 b .Ldo_allocation\name 2042.Lslow_path\name: 2043 // x0: uint32_t type_idx / mirror::Class* klass (if resolved) 2044 // x1: int32_t component_count 2045 // x2: ArtMethod* method 2046 // x3: Thread* self 2047 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 2048 mov x3, xSELF // pass Thread::Current 2049 bl \entrypoint 2050 RESTORE_SAVE_REFS_ONLY_FRAME 2051 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 2052END \name 2053.endm 2054 2055GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_region_tlab, artAllocArrayFromCodeRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH, 0 2056// TODO: art_quick_alloc_array_resolved_region_tlab seems to not get called. Investigate compiler. 2057GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, 1 2058 2059 /* 2060 * Called by managed code when the thread has been asked to suspend. 2061 */ 2062 .extern artTestSuspendFromCode 2063ENTRY art_quick_test_suspend 2064 SETUP_SAVE_EVERYTHING_FRAME // save callee saves for stack crawl 2065 mov x0, xSELF 2066 bl artTestSuspendFromCode // (Thread*) 2067 RESTORE_SAVE_EVERYTHING_FRAME 2068 ret 2069END art_quick_test_suspend 2070 2071ENTRY art_quick_implicit_suspend 2072 mov x0, xSELF 2073 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves for stack crawl 2074 bl artTestSuspendFromCode // (Thread*) 2075 RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN 2076END art_quick_implicit_suspend 2077 2078 /* 2079 * Called by managed code that is attempting to call a method on a proxy class. On entry 2080 * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy 2081 * method agrees with a ref and args callee save frame. 2082 */ 2083 .extern artQuickProxyInvokeHandler 2084ENTRY art_quick_proxy_invoke_handler 2085 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 2086 mov x2, xSELF // pass Thread::Current 2087 mov x3, sp // pass SP 2088 bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP) 2089 ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] 2090 cbnz x2, .Lexception_in_proxy // success if no exception is pending 2091 RESTORE_SAVE_REFS_AND_ARGS_FRAME // Restore frame 2092 fmov d0, x0 // Store result in d0 in case it was float or double 2093 ret // return on success 2094.Lexception_in_proxy: 2095 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2096 DELIVER_PENDING_EXCEPTION 2097END art_quick_proxy_invoke_handler 2098 2099 /* 2100 * Called to resolve an imt conflict. 2101 * x0 is the conflict ArtMethod. 2102 * xIP1 is a hidden argument that holds the target interface method's dex method index. 2103 * 2104 * Note that this stub writes to xIP0, xIP1, and x0. 2105 */ 2106 .extern artInvokeInterfaceTrampoline 2107ENTRY art_quick_imt_conflict_trampoline 2108 ldr xIP0, [sp, #0] // Load referrer 2109 ldr xIP0, [xIP0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_64] // Load dex cache methods array 2110 ldr xIP0, [xIP0, xIP1, lsl #POINTER_SIZE_SHIFT] // Load interface method 2111 ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64] // Load ImtConflictTable 2112 ldr x0, [xIP1] // Load first entry in ImtConflictTable. 2113.Limt_table_iterate: 2114 cmp x0, xIP0 2115 // Branch if found. Benchmarks have shown doing a branch here is better. 2116 beq .Limt_table_found 2117 // If the entry is null, the interface method is not in the ImtConflictTable. 2118 cbz x0, .Lconflict_trampoline 2119 // Iterate over the entries of the ImtConflictTable. 2120 ldr x0, [xIP1, #(2 * __SIZEOF_POINTER__)]! 2121 b .Limt_table_iterate 2122.Limt_table_found: 2123 // We successfully hit an entry in the table. Load the target method 2124 // and jump to it. 2125 ldr x0, [xIP1, #__SIZEOF_POINTER__] 2126 ldr xIP0, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] 2127 br xIP0 2128.Lconflict_trampoline: 2129 // Call the runtime stub to populate the ImtConflictTable and jump to the 2130 // resolved method. 2131 INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline 2132END art_quick_imt_conflict_trampoline 2133 2134ENTRY art_quick_resolution_trampoline 2135 SETUP_SAVE_REFS_AND_ARGS_FRAME 2136 mov x2, xSELF 2137 mov x3, sp 2138 bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP) 2139 cbz x0, 1f 2140 mov xIP0, x0 // Remember returned code pointer in xIP0. 2141 ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP. 2142 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2143 br xIP0 21441: 2145 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2146 DELIVER_PENDING_EXCEPTION 2147END art_quick_resolution_trampoline 2148 2149/* 2150 * Generic JNI frame layout: 2151 * 2152 * #-------------------# 2153 * | | 2154 * | caller method... | 2155 * #-------------------# <--- SP on entry 2156 * | Return X30/LR | 2157 * | X29/FP | callee save 2158 * | X28 | callee save 2159 * | X27 | callee save 2160 * | X26 | callee save 2161 * | X25 | callee save 2162 * | X24 | callee save 2163 * | X23 | callee save 2164 * | X22 | callee save 2165 * | X21 | callee save 2166 * | X20 | callee save 2167 * | X19 | callee save 2168 * | X7 | arg7 2169 * | X6 | arg6 2170 * | X5 | arg5 2171 * | X4 | arg4 2172 * | X3 | arg3 2173 * | X2 | arg2 2174 * | X1 | arg1 2175 * | D7 | float arg 8 2176 * | D6 | float arg 7 2177 * | D5 | float arg 6 2178 * | D4 | float arg 5 2179 * | D3 | float arg 4 2180 * | D2 | float arg 3 2181 * | D1 | float arg 2 2182 * | D0 | float arg 1 2183 * | Method* | <- X0 2184 * #-------------------# 2185 * | local ref cookie | // 4B 2186 * | handle scope size | // 4B 2187 * #-------------------# 2188 * | JNI Call Stack | 2189 * #-------------------# <--- SP on native call 2190 * | | 2191 * | Stack for Regs | The trampoline assembly will pop these values 2192 * | | into registers for native call 2193 * #-------------------# 2194 * | Native code ptr | 2195 * #-------------------# 2196 * | Free scratch | 2197 * #-------------------# 2198 * | Ptr to (1) | <--- SP 2199 * #-------------------# 2200 */ 2201 /* 2202 * Called to do a generic JNI down-call 2203 */ 2204ENTRY art_quick_generic_jni_trampoline 2205 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 2206 2207 // Save SP , so we can have static CFI info. 2208 mov x28, sp 2209 .cfi_def_cfa_register x28 2210 2211 // This looks the same, but is different: this will be updated to point to the bottom 2212 // of the frame when the handle scope is inserted. 2213 mov xFP, sp 2214 2215 mov xIP0, #5120 2216 sub sp, sp, xIP0 2217 2218 // prepare for artQuickGenericJniTrampoline call 2219 // (Thread*, SP) 2220 // x0 x1 <= C calling convention 2221 // xSELF xFP <= where they are 2222 2223 mov x0, xSELF // Thread* 2224 mov x1, xFP 2225 bl artQuickGenericJniTrampoline // (Thread*, sp) 2226 2227 // The C call will have registered the complete save-frame on success. 2228 // The result of the call is: 2229 // x0: pointer to native code, 0 on error. 2230 // x1: pointer to the bottom of the used area of the alloca, can restore stack till there. 2231 2232 // Check for error = 0. 2233 cbz x0, .Lexception_in_native 2234 2235 // Release part of the alloca. 2236 mov sp, x1 2237 2238 // Save the code pointer 2239 mov xIP0, x0 2240 2241 // Load parameters from frame into registers. 2242 // TODO Check with artQuickGenericJniTrampoline. 2243 // Also, check again APPCS64 - the stack arguments are interleaved. 2244 ldp x0, x1, [sp] 2245 ldp x2, x3, [sp, #16] 2246 ldp x4, x5, [sp, #32] 2247 ldp x6, x7, [sp, #48] 2248 2249 ldp d0, d1, [sp, #64] 2250 ldp d2, d3, [sp, #80] 2251 ldp d4, d5, [sp, #96] 2252 ldp d6, d7, [sp, #112] 2253 2254 add sp, sp, #128 2255 2256 blr xIP0 // native call. 2257 2258 // result sign extension is handled in C code 2259 // prepare for artQuickGenericJniEndTrampoline call 2260 // (Thread*, result, result_f) 2261 // x0 x1 x2 <= C calling convention 2262 mov x1, x0 // Result (from saved). 2263 mov x0, xSELF // Thread register. 2264 fmov x2, d0 // d0 will contain floating point result, but needs to go into x2 2265 2266 bl artQuickGenericJniEndTrampoline 2267 2268 // Pending exceptions possible. 2269 ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] 2270 cbnz x2, .Lexception_in_native 2271 2272 // Tear down the alloca. 2273 mov sp, x28 2274 .cfi_def_cfa_register sp 2275 2276 // Tear down the callee-save frame. 2277 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2278 2279 // store into fpr, for when it's a fpr return... 2280 fmov d0, x0 2281 ret 2282 2283.Lexception_in_native: 2284 // Move to x1 then sp to please assembler. 2285 ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 2286 mov sp, x1 2287 .cfi_def_cfa_register sp 2288 # This will create a new save-all frame, required by the runtime. 2289 DELIVER_PENDING_EXCEPTION 2290END art_quick_generic_jni_trampoline 2291 2292/* 2293 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those 2294 * of a quick call: 2295 * x0 = method being called/to bridge to. 2296 * x1..x7, d0..d7 = arguments to that method. 2297 */ 2298ENTRY art_quick_to_interpreter_bridge 2299 SETUP_SAVE_REFS_AND_ARGS_FRAME // Set up frame and save arguments. 2300 2301 // x0 will contain mirror::ArtMethod* method. 2302 mov x1, xSELF // How to get Thread::Current() ??? 2303 mov x2, sp 2304 2305 // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 2306 // mirror::ArtMethod** sp) 2307 bl artQuickToInterpreterBridge 2308 2309 RESTORE_SAVE_REFS_AND_ARGS_FRAME // TODO: no need to restore arguments in this case. 2310 2311 fmov d0, x0 2312 2313 RETURN_OR_DELIVER_PENDING_EXCEPTION 2314END art_quick_to_interpreter_bridge 2315 2316 2317// 2318// Instrumentation-related stubs 2319// 2320 .extern artInstrumentationMethodEntryFromCode 2321ENTRY art_quick_instrumentation_entry 2322 SETUP_SAVE_REFS_AND_ARGS_FRAME 2323 2324 mov x20, x0 // Preserve method reference in a callee-save. 2325 2326 mov x2, xSELF 2327 mov x3, xLR 2328 bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, LR) 2329 2330 mov xIP0, x0 // x0 = result of call. 2331 mov x0, x20 // Reload method reference. 2332 2333 RESTORE_SAVE_REFS_AND_ARGS_FRAME // Note: will restore xSELF 2334 adr xLR, art_quick_instrumentation_exit 2335 br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit. 2336END art_quick_instrumentation_entry 2337 2338 .extern artInstrumentationMethodExitFromCode 2339ENTRY art_quick_instrumentation_exit 2340 mov xLR, #0 // Clobber LR for later checks. 2341 2342 SETUP_SAVE_REFS_ONLY_FRAME 2343 2344 // We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then 2345 // we would need to fully restore it. As there are a lot of callee-save registers, it seems 2346 // easier to have an extra small stack area. 2347 2348 str x0, [sp, #-16]! // Save integer result. 2349 .cfi_adjust_cfa_offset 16 2350 str d0, [sp, #8] // Save floating-point result. 2351 2352 add x1, sp, #16 // Pass SP. 2353 mov x2, x0 // Pass integer result. 2354 fmov x3, d0 // Pass floating-point result. 2355 mov x0, xSELF // Pass Thread. 2356 bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res) 2357 2358 mov xIP0, x0 // Return address from instrumentation call. 2359 mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize 2360 2361 ldr d0, [sp, #8] // Restore floating-point result. 2362 ldr x0, [sp], #16 // Restore integer result, and drop stack area. 2363 .cfi_adjust_cfa_offset 16 2364 2365 POP_SAVE_REFS_ONLY_FRAME 2366 2367 br xIP0 // Tail-call out. 2368END art_quick_instrumentation_exit 2369 2370 /* 2371 * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization 2372 * will long jump to the upcall with a special exception of -1. 2373 */ 2374 .extern artDeoptimize 2375ENTRY art_quick_deoptimize 2376 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 2377 mov x0, xSELF // Pass thread. 2378 bl artDeoptimize // artDeoptimize(Thread*) 2379 brk 0 2380END art_quick_deoptimize 2381 2382 /* 2383 * Compiled code has requested that we deoptimize into the interpreter. The deoptimization 2384 * will long jump to the upcall with a special exception of -1. 2385 */ 2386 .extern artDeoptimizeFromCompiledCode 2387ENTRY art_quick_deoptimize_from_compiled_code 2388 SETUP_SAVE_EVERYTHING_FRAME 2389 mov x0, xSELF // Pass thread. 2390 bl artDeoptimizeFromCompiledCode // artDeoptimizeFromCompiledCode(Thread*) 2391 brk 0 2392END art_quick_deoptimize_from_compiled_code 2393 2394 2395 /* 2396 * String's indexOf. 2397 * 2398 * TODO: Not very optimized. 2399 * On entry: 2400 * x0: string object (known non-null) 2401 * w1: char to match (known <= 0xFFFF) 2402 * w2: Starting offset in string data 2403 */ 2404ENTRY art_quick_indexof 2405 ldr w3, [x0, #MIRROR_STRING_COUNT_OFFSET] 2406 add x0, x0, #MIRROR_STRING_VALUE_OFFSET 2407#if (STRING_COMPRESSION_FEATURE) 2408 /* w4 holds count (with flag) and w3 holds actual length */ 2409 mov w4, w3 2410 and w3, w3, #2147483647 2411#endif 2412 /* Clamp start to [0..count] */ 2413 cmp w2, #0 2414 csel w2, wzr, w2, lt 2415 cmp w2, w3 2416 csel w2, w3, w2, gt 2417 2418 /* Save a copy to compute result */ 2419 mov x5, x0 2420 2421#if (STRING_COMPRESSION_FEATURE) 2422 tbnz w4, #31, .Lstring_indexof_compressed 2423#endif 2424 /* Build pointer to start of data to compare and pre-bias */ 2425 add x0, x0, x2, lsl #1 2426 sub x0, x0, #2 2427 /* Compute iteration count */ 2428 sub w2, w3, w2 2429 2430 /* 2431 * At this point we have: 2432 * x0: start of the data to test 2433 * w1: char to compare 2434 * w2: iteration count 2435 * x5: original start of string data 2436 */ 2437 2438 subs w2, w2, #4 2439 b.lt .Lindexof_remainder 2440 2441.Lindexof_loop4: 2442 ldrh w6, [x0, #2]! 2443 ldrh w7, [x0, #2]! 2444 ldrh wIP0, [x0, #2]! 2445 ldrh wIP1, [x0, #2]! 2446 cmp w6, w1 2447 b.eq .Lmatch_0 2448 cmp w7, w1 2449 b.eq .Lmatch_1 2450 cmp wIP0, w1 2451 b.eq .Lmatch_2 2452 cmp wIP1, w1 2453 b.eq .Lmatch_3 2454 subs w2, w2, #4 2455 b.ge .Lindexof_loop4 2456 2457.Lindexof_remainder: 2458 adds w2, w2, #4 2459 b.eq .Lindexof_nomatch 2460 2461.Lindexof_loop1: 2462 ldrh w6, [x0, #2]! 2463 cmp w6, w1 2464 b.eq .Lmatch_3 2465 subs w2, w2, #1 2466 b.ne .Lindexof_loop1 2467 2468.Lindexof_nomatch: 2469 mov x0, #-1 2470 ret 2471 2472.Lmatch_0: 2473 sub x0, x0, #6 2474 sub x0, x0, x5 2475 asr x0, x0, #1 2476 ret 2477.Lmatch_1: 2478 sub x0, x0, #4 2479 sub x0, x0, x5 2480 asr x0, x0, #1 2481 ret 2482.Lmatch_2: 2483 sub x0, x0, #2 2484 sub x0, x0, x5 2485 asr x0, x0, #1 2486 ret 2487.Lmatch_3: 2488 sub x0, x0, x5 2489 asr x0, x0, #1 2490 ret 2491#if (STRING_COMPRESSION_FEATURE) 2492 /* 2493 * Comparing compressed string character-per-character with 2494 * input character 2495 */ 2496.Lstring_indexof_compressed: 2497 add x0, x0, x2 2498 sub x0, x0, #1 2499 sub w2, w3, w2 2500.Lstring_indexof_compressed_loop: 2501 subs w2, w2, #1 2502 b.lt .Lindexof_nomatch 2503 ldrb w6, [x0, #1]! 2504 cmp w6, w1 2505 b.eq .Lstring_indexof_compressed_matched 2506 b .Lstring_indexof_compressed_loop 2507.Lstring_indexof_compressed_matched: 2508 sub x0, x0, x5 2509 ret 2510#endif 2511END art_quick_indexof 2512 2513 /* 2514 * Create a function `name` calling the ReadBarrier::Mark routine, 2515 * getting its argument and returning its result through W register 2516 * `wreg` (corresponding to X register `xreg`), saving and restoring 2517 * all caller-save registers. 2518 * 2519 * If `wreg` is different from `w0`, the generated function follows a 2520 * non-standard runtime calling convention: 2521 * - register `wreg` is used to pass the (sole) argument of this 2522 * function (instead of W0); 2523 * - register `wreg` is used to return the result of this function 2524 * (instead of W0); 2525 * - W0 is treated like a normal (non-argument) caller-save register; 2526 * - everything else is the same as in the standard runtime calling 2527 * convention (e.g. standard callee-save registers are preserved). 2528 */ 2529.macro READ_BARRIER_MARK_REG name, wreg, xreg 2530ENTRY \name 2531 // Reference is null, no work to do at all. 2532 cbz \wreg, .Lret_rb_\name 2533 /* 2534 * Allocate 46 stack slots * 8 = 368 bytes: 2535 * - 20 slots for core registers X0-X19 2536 * - 24 slots for floating-point registers D0-D7 and D16-D31 2537 * - 1 slot for return address register XLR 2538 * - 1 padding slot for 16-byte stack alignment 2539 */ 2540 // Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler. 2541 ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 2542 tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lslow_rb_\name 2543.Lret_rb_\name: 2544 ret 2545.Lslow_rb_\name: 2546 // Save all potentially live caller-save core registers. 2547 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 368 2548 SAVE_TWO_REGS x2, x3, 16 2549 SAVE_TWO_REGS x4, x5, 32 2550 SAVE_TWO_REGS x6, x7, 48 2551 SAVE_TWO_REGS x8, x9, 64 2552 SAVE_TWO_REGS x10, x11, 80 2553 SAVE_TWO_REGS x12, x13, 96 2554 SAVE_TWO_REGS x14, x15, 112 2555 SAVE_TWO_REGS x16, x17, 128 2556 SAVE_TWO_REGS x18, x19, 144 2557 // Save all potentially live caller-save floating-point registers. 2558 stp d0, d1, [sp, #160] 2559 stp d2, d3, [sp, #176] 2560 stp d4, d5, [sp, #192] 2561 stp d6, d7, [sp, #208] 2562 stp d16, d17, [sp, #224] 2563 stp d18, d19, [sp, #240] 2564 stp d20, d21, [sp, #256] 2565 stp d22, d23, [sp, #272] 2566 stp d24, d25, [sp, #288] 2567 stp d26, d27, [sp, #304] 2568 stp d28, d29, [sp, #320] 2569 stp d30, d31, [sp, #336] 2570 // Save return address. 2571 // (sp + #352 is a padding slot) 2572 SAVE_REG xLR, 360 2573 2574 .ifnc \wreg, w0 2575 mov w0, \wreg // Pass arg1 - obj from `wreg` 2576 .endif 2577 bl artReadBarrierMark // artReadBarrierMark(obj) 2578 .ifnc \wreg, w0 2579 mov \wreg, w0 // Return result into `wreg` 2580 .endif 2581 2582 // Restore core regs, except `xreg`, as `wreg` is used to return the 2583 // result of this function (simply remove it from the stack instead). 2584 POP_REGS_NE x0, x1, 0, \xreg 2585 POP_REGS_NE x2, x3, 16, \xreg 2586 POP_REGS_NE x4, x5, 32, \xreg 2587 POP_REGS_NE x6, x7, 48, \xreg 2588 POP_REGS_NE x8, x9, 64, \xreg 2589 POP_REGS_NE x10, x11, 80, \xreg 2590 POP_REGS_NE x12, x13, 96, \xreg 2591 POP_REGS_NE x14, x15, 112, \xreg 2592 POP_REGS_NE x16, x17, 128, \xreg 2593 POP_REGS_NE x18, x19, 144, \xreg 2594 // Restore floating-point registers. 2595 ldp d0, d1, [sp, #160] 2596 ldp d2, d3, [sp, #176] 2597 ldp d4, d5, [sp, #192] 2598 ldp d6, d7, [sp, #208] 2599 ldp d16, d17, [sp, #224] 2600 ldp d18, d19, [sp, #240] 2601 ldp d20, d21, [sp, #256] 2602 ldp d22, d23, [sp, #272] 2603 ldp d24, d25, [sp, #288] 2604 ldp d26, d27, [sp, #304] 2605 ldp d28, d29, [sp, #320] 2606 ldp d30, d31, [sp, #336] 2607 // Restore return address and remove padding. 2608 RESTORE_REG xLR, 360 2609 DECREASE_FRAME 368 2610 ret 2611END \name 2612.endm 2613 2614READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg00, w0, x0 2615READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, w1, x1 2616READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, w2, x2 2617READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, w3, x3 2618READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, w4, x4 2619READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, w5, x5 2620READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, w6, x6 2621READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, w7, x7 2622READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, w8, x8 2623READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, w9, x9 2624READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, w10, x10 2625READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, w11, x11 2626READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, w12, x12 2627READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, w13, x13 2628READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, w14, x14 2629READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg15, w15, x15 2630// READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg16, w16, x16 ip0 is blocked 2631READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, w17, x17 2632READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18 2633READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, w19, x19 2634READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, w20, x20 2635READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, w21, x21 2636READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, w22, x22 2637READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg23, w23, x23 2638READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg24, w24, x24 2639READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg25, w25, x25 2640READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg26, w26, x26 2641READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg27, w27, x27 2642READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg28, w28, x28 2643READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29 2644