call_arm64.cc revision 0f45f22eb3c52f0ece4c56989180e79c6680d825
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the Thumb2 ISA. */ 18 19#include "arm64_lir.h" 20#include "codegen_arm64.h" 21#include "dex/quick/mir_to_lir-inl.h" 22#include "gc/accounting/card_table.h" 23#include "entrypoints/quick/quick_entrypoints.h" 24 25namespace art { 26 27/* 28 * The sparse table in the literal pool is an array of <key,displacement> 29 * pairs. For each set, we'll load them as a pair using ldp. 30 * The test loop will look something like: 31 * 32 * adr r_base, <table> 33 * ldr r_val, [rA64_SP, v_reg_off] 34 * mov r_idx, #table_size 35 * loop: 36 * cbz r_idx, quit 37 * ldp r_key, r_disp, [r_base], #8 38 * sub r_idx, #1 39 * cmp r_val, r_key 40 * b.ne loop 41 * adr r_base, #0 ; This is the instruction from which we compute displacements 42 * add r_base, r_disp 43 * br r_base 44 * quit: 45 */ 46void Arm64Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, 47 RegLocation rl_src) { 48 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 49 if (cu_->verbose) { 50 DumpSparseSwitchTable(table); 51 } 52 // Add the table to the list - we'll process it later 53 SwitchTable *tab_rec = 54 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 55 tab_rec->table = table; 56 tab_rec->vaddr = current_dalvik_offset_; 57 uint32_t size = table[1]; 58 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); 59 switch_tables_.Insert(tab_rec); 60 61 // Get the switch value 62 rl_src = LoadValue(rl_src, kCoreReg); 63 RegStorage r_base = AllocTempWide(); 64 // Allocate key and disp temps. 65 RegStorage r_key = AllocTemp(); 66 RegStorage r_disp = AllocTemp(); 67 // Materialize a pointer to the switch table 68 NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, WrapPointer(tab_rec)); 69 // Set up r_idx 70 RegStorage r_idx = AllocTemp(); 71 LoadConstant(r_idx, size); 72 73 // Entry of loop. 74 LIR* loop_entry = NewLIR0(kPseudoTargetLabel); 75 LIR* branch_out = NewLIR2(kA64Cbz2rt, r_idx.GetReg(), 0); 76 77 // Load next key/disp. 78 NewLIR4(kA64LdpPost4rrXD, r_key.GetReg(), r_disp.GetReg(), r_base.GetReg(), 2); 79 OpRegRegImm(kOpSub, r_idx, r_idx, 1); 80 81 // Go to next case, if key does not match. 82 OpRegReg(kOpCmp, r_key, rl_src.reg); 83 OpCondBranch(kCondNe, loop_entry); 84 85 // Key does match: branch to case label. 86 LIR* switch_label = NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, -1); 87 tab_rec->anchor = switch_label; 88 89 // Add displacement to base branch address and go! 90 OpRegRegRegExtend(kOpAdd, r_base, r_base, As64BitReg(r_disp), kA64Sxtw, 0U); 91 NewLIR1(kA64Br1x, r_base.GetReg()); 92 93 // Loop exit label. 94 LIR* loop_exit = NewLIR0(kPseudoTargetLabel); 95 branch_out->target = loop_exit; 96} 97 98 99void Arm64Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, 100 RegLocation rl_src) { 101 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 102 if (cu_->verbose) { 103 DumpPackedSwitchTable(table); 104 } 105 // Add the table to the list - we'll process it later 106 SwitchTable *tab_rec = 107 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 108 tab_rec->table = table; 109 tab_rec->vaddr = current_dalvik_offset_; 110 uint32_t size = table[1]; 111 tab_rec->targets = 112 static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); 113 switch_tables_.Insert(tab_rec); 114 115 // Get the switch value 116 rl_src = LoadValue(rl_src, kCoreReg); 117 RegStorage table_base = AllocTempWide(); 118 // Materialize a pointer to the switch table 119 NewLIR3(kA64Adr2xd, table_base.GetReg(), 0, WrapPointer(tab_rec)); 120 int low_key = s4FromSwitchData(&table[2]); 121 RegStorage key_reg; 122 // Remove the bias, if necessary 123 if (low_key == 0) { 124 key_reg = rl_src.reg; 125 } else { 126 key_reg = AllocTemp(); 127 OpRegRegImm(kOpSub, key_reg, rl_src.reg, low_key); 128 } 129 // Bounds check - if < 0 or >= size continue following switch 130 OpRegImm(kOpCmp, key_reg, size - 1); 131 LIR* branch_over = OpCondBranch(kCondHi, NULL); 132 133 // Load the displacement from the switch table 134 RegStorage disp_reg = AllocTemp(); 135 LoadBaseIndexed(table_base, As64BitReg(key_reg), disp_reg, 2, k32); 136 137 // Get base branch address. 138 RegStorage branch_reg = AllocTempWide(); 139 LIR* switch_label = NewLIR3(kA64Adr2xd, branch_reg.GetReg(), 0, -1); 140 tab_rec->anchor = switch_label; 141 142 // Add displacement to base branch address and go! 143 OpRegRegRegExtend(kOpAdd, branch_reg, branch_reg, As64BitReg(disp_reg), kA64Sxtw, 0U); 144 NewLIR1(kA64Br1x, branch_reg.GetReg()); 145 146 // branch_over target here 147 LIR* target = NewLIR0(kPseudoTargetLabel); 148 branch_over->target = target; 149} 150 151/* 152 * Array data table format: 153 * ushort ident = 0x0300 magic value 154 * ushort width width of each element in the table 155 * uint size number of elements in the table 156 * ubyte data[size*width] table of data values (may contain a single-byte 157 * padding at the end) 158 * 159 * Total size is 4+(width * size + 1)/2 16-bit code units. 160 */ 161void Arm64Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { 162 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 163 // Add the table to the list - we'll process it later 164 FillArrayData *tab_rec = 165 static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData)); 166 tab_rec->table = table; 167 tab_rec->vaddr = current_dalvik_offset_; 168 uint16_t width = tab_rec->table[1]; 169 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16); 170 tab_rec->size = (size * width) + 8; 171 172 fill_array_data_.Insert(tab_rec); 173 174 // Making a call - use explicit registers 175 FlushAllRegs(); /* Everything to home location */ 176 LoadValueDirectFixed(rl_src, rs_x0); 177 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData).Int32Value(), 178 rs_xLR); 179 // Materialize a pointer to the fill data image 180 NewLIR3(kA64Adr2xd, rx1, 0, WrapPointer(tab_rec)); 181 ClobberCallerSave(); 182 LIR* call_inst = OpReg(kOpBlx, rs_xLR); 183 MarkSafepointPC(call_inst); 184} 185 186/* 187 * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more 188 * details see monitor.cc. 189 */ 190void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 191 // x0/w0 = object 192 // w1 = thin lock thread id 193 // x2 = address of lock word 194 // w3 = lock word / store failure 195 // TUNING: How much performance we get when we inline this? 196 // Since we've already flush all register. 197 FlushAllRegs(); 198 LoadValueDirectFixed(rl_src, rs_x0); // = TargetReg(kArg0, kRef) 199 LockCallTemps(); // Prepare for explicit register usage 200 LIR* null_check_branch = nullptr; 201 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { 202 null_check_branch = nullptr; // No null check. 203 } else { 204 // If the null-check fails its handled by the slow-path to reduce exception related meta-data. 205 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 206 null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); 207 } 208 } 209 Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); 210 OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value()); 211 NewLIR2(kA64Ldxr2rX, rw3, rx2); 212 MarkPossibleNullPointerException(opt_flags); 213 LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_x1, 0, NULL); 214 NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2); 215 LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_x1, 0, NULL); 216 217 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); 218 not_unlocked_branch->target = slow_path_target; 219 if (null_check_branch != nullptr) { 220 null_check_branch->target = slow_path_target; 221 } 222 // TODO: move to a slow path. 223 // Go expensive route - artLockObjectFromCode(obj); 224 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pLockObject).Int32Value(), rs_xLR); 225 ClobberCallerSave(); 226 LIR* call_inst = OpReg(kOpBlx, rs_xLR); 227 MarkSafepointPC(call_inst); 228 229 LIR* success_target = NewLIR0(kPseudoTargetLabel); 230 lock_success_branch->target = success_target; 231 GenMemBarrier(kLoadAny); 232} 233 234/* 235 * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more 236 * details see monitor.cc. Note the code below doesn't use ldxr/stxr as the code holds the lock 237 * and can only give away ownership if its suspended. 238 */ 239void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 240 // x0/w0 = object 241 // w1 = thin lock thread id 242 // w2 = lock word 243 // TUNING: How much performance we get when we inline this? 244 // Since we've already flush all register. 245 FlushAllRegs(); 246 LoadValueDirectFixed(rl_src, rs_x0); // Get obj 247 LockCallTemps(); // Prepare for explicit register usage 248 LIR* null_check_branch = nullptr; 249 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { 250 null_check_branch = nullptr; // No null check. 251 } else { 252 // If the null-check fails its handled by the slow-path to reduce exception related meta-data. 253 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 254 null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); 255 } 256 } 257 Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); 258 Load32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2); 259 MarkPossibleNullPointerException(opt_flags); 260 LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w1, rs_w2, NULL); 261 GenMemBarrier(kAnyStore); 262 Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_wzr); 263 LIR* unlock_success_branch = OpUnconditionalBranch(NULL); 264 265 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); 266 slow_unlock_branch->target = slow_path_target; 267 if (null_check_branch != nullptr) { 268 null_check_branch->target = slow_path_target; 269 } 270 // TODO: move to a slow path. 271 // Go expensive route - artUnlockObjectFromCode(obj); 272 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject).Int32Value(), rs_xLR); 273 ClobberCallerSave(); 274 LIR* call_inst = OpReg(kOpBlx, rs_xLR); 275 MarkSafepointPC(call_inst); 276 277 LIR* success_target = NewLIR0(kPseudoTargetLabel); 278 unlock_success_branch->target = success_target; 279} 280 281void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) { 282 int ex_offset = Thread::ExceptionOffset<8>().Int32Value(); 283 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 284 LoadRefDisp(rs_xSELF, ex_offset, rl_result.reg, kNotVolatile); 285 StoreRefDisp(rs_xSELF, ex_offset, rs_xzr, kNotVolatile); 286 StoreValue(rl_dest, rl_result); 287} 288 289/* 290 * Mark garbage collection card. Skip if the value we're storing is null. 291 */ 292void Arm64Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { 293 RegStorage reg_card_base = AllocTempWide(); 294 RegStorage reg_card_no = AllocTempWide(); // Needs to be wide as addr is ref=64b 295 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); 296 LoadWordDisp(rs_xSELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base); 297 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); 298 // TODO(Arm64): generate "strb wB, [xB, wC, uxtw]" rather than "strb wB, [xB, xC]"? 299 StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 300 0, kUnsignedByte); 301 LIR* target = NewLIR0(kPseudoTargetLabel); 302 branch_over->target = target; 303 FreeTemp(reg_card_base); 304 FreeTemp(reg_card_no); 305} 306 307void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { 308 /* 309 * On entry, x0 to x7 are live. Let the register allocation 310 * mechanism know so it doesn't try to use any of them when 311 * expanding the frame or flushing. 312 * Reserve x8 & x9 for temporaries. 313 */ 314 LockTemp(rs_x0); 315 LockTemp(rs_x1); 316 LockTemp(rs_x2); 317 LockTemp(rs_x3); 318 LockTemp(rs_x4); 319 LockTemp(rs_x5); 320 LockTemp(rs_x6); 321 LockTemp(rs_x7); 322 LockTemp(rs_x8); 323 LockTemp(rs_x9); 324 325 /* 326 * We can safely skip the stack overflow check if we're 327 * a leaf *and* our frame size < fudge factor. 328 */ 329 bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm64); 330 331 NewLIR0(kPseudoMethodEntry); 332 333 const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64) - 334 Thread::kStackOverflowSignalReservedBytes; 335 const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes; 336 const int spill_count = num_core_spills_ + num_fp_spills_; 337 const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf; // SP 16 byte alignment. 338 const int frame_size_without_spills = frame_size_ - spill_size; 339 340 if (!skip_overflow_check) { 341 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 342 if (!large_frame) { 343 // Load stack limit 344 LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x9); 345 } 346 } else { 347 // TODO(Arm64) Implement implicit checks. 348 // Implicit stack overflow check. 349 // Generate a load from [sp, #-framesize]. If this is in the stack 350 // redzone we will get a segmentation fault. 351 // Load32Disp(rs_wSP, -Thread::kStackOverflowReservedBytes, rs_wzr); 352 // MarkPossibleStackOverflowException(); 353 LOG(FATAL) << "Implicit stack overflow checks not implemented."; 354 } 355 } 356 357 if (frame_size_ > 0) { 358 OpRegImm64(kOpSub, rs_sp, spill_size); 359 } 360 361 /* Need to spill any FP regs? */ 362 if (fp_spill_mask_) { 363 int spill_offset = spill_size - kArm64PointerSize*(num_fp_spills_ + num_core_spills_); 364 SpillFPRegs(rs_sp, spill_offset, fp_spill_mask_); 365 } 366 367 /* Spill core callee saves. */ 368 if (core_spill_mask_) { 369 int spill_offset = spill_size - kArm64PointerSize*num_core_spills_; 370 SpillCoreRegs(rs_sp, spill_offset, core_spill_mask_); 371 } 372 373 if (!skip_overflow_check) { 374 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 375 class StackOverflowSlowPath: public LIRSlowPath { 376 public: 377 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) : 378 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), 379 sp_displace_(sp_displace) { 380 } 381 void Compile() OVERRIDE { 382 m2l_->ResetRegPool(); 383 m2l_->ResetDefTracking(); 384 GenerateTargetLabel(kPseudoThrowTarget); 385 // Unwinds stack. 386 m2l_->OpRegImm(kOpAdd, rs_sp, sp_displace_); 387 m2l_->ClobberCallerSave(); 388 ThreadOffset<8> func_offset = QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow); 389 m2l_->LockTemp(rs_x8); 390 m2l_->LoadWordDisp(rs_xSELF, func_offset.Int32Value(), rs_x8); 391 m2l_->NewLIR1(kA64Br1x, rs_x8.GetReg()); 392 m2l_->FreeTemp(rs_x8); 393 } 394 395 private: 396 const size_t sp_displace_; 397 }; 398 399 if (large_frame) { 400 // Compare Expected SP against bottom of stack. 401 // Branch to throw target if there is not enough room. 402 OpRegRegImm(kOpSub, rs_x9, rs_sp, frame_size_without_spills); 403 LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x8); 404 LIR* branch = OpCmpBranch(kCondUlt, rs_x9, rs_x8, nullptr); 405 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size)); 406 OpRegCopy(rs_sp, rs_x9); // Establish stack after checks. 407 } else { 408 /* 409 * If the frame is small enough we are guaranteed to have enough space that remains to 410 * handle signals on the user stack. 411 * Establishes stack before checks. 412 */ 413 OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size_without_spills); 414 LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_x9, nullptr); 415 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_)); 416 } 417 } else { 418 OpRegImm(kOpSub, rs_sp, frame_size_without_spills); 419 } 420 } else { 421 OpRegImm(kOpSub, rs_sp, frame_size_without_spills); 422 } 423 424 FlushIns(ArgLocs, rl_method); 425 426 FreeTemp(rs_x0); 427 FreeTemp(rs_x1); 428 FreeTemp(rs_x2); 429 FreeTemp(rs_x3); 430 FreeTemp(rs_x4); 431 FreeTemp(rs_x5); 432 FreeTemp(rs_x6); 433 FreeTemp(rs_x7); 434 FreeTemp(rs_x8); 435 FreeTemp(rs_x9); 436} 437 438void Arm64Mir2Lir::GenExitSequence() { 439 /* 440 * In the exit path, r0/r1 are live - make sure they aren't 441 * allocated by the register utilities as temps. 442 */ 443 LockTemp(rs_x0); 444 LockTemp(rs_x1); 445 446 NewLIR0(kPseudoMethodExit); 447 448 // Restore saves and drop stack frame. 449 // 2 versions: 450 // 451 // 1. (Original): Try to address directly, then drop the whole frame. 452 // Limitation: ldp is a 7b signed immediate. There should have been a DCHECK! 453 // 454 // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be 455 // in range. Then drop the rest. 456 // 457 // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads 458 // in variant 1. 459 460 if (frame_size_ <= 504) { 461 // "Magic" constant, 63 (max signed 7b) * 8. Do variant 1. 462 // Could be tighter, as the last load is below frame_size_ offset. 463 if (fp_spill_mask_) { 464 int spill_offset = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_); 465 UnSpillFPRegs(rs_sp, spill_offset, fp_spill_mask_); 466 } 467 if (core_spill_mask_) { 468 int spill_offset = frame_size_ - kArm64PointerSize * num_core_spills_; 469 UnSpillCoreRegs(rs_sp, spill_offset, core_spill_mask_); 470 } 471 472 OpRegImm64(kOpAdd, rs_sp, frame_size_); 473 } else { 474 // Second variant. Drop the frame part. 475 int drop = 0; 476 // TODO: Always use the first formula, as num_fp_spills would be zero? 477 if (fp_spill_mask_) { 478 drop = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_); 479 } else { 480 drop = frame_size_ - kArm64PointerSize * num_core_spills_; 481 } 482 483 // Drop needs to be 16B aligned, so that SP keeps aligned. 484 drop = RoundDown(drop, 16); 485 486 OpRegImm64(kOpAdd, rs_sp, drop); 487 488 if (fp_spill_mask_) { 489 int offset = frame_size_ - drop - kArm64PointerSize * (num_fp_spills_ + num_core_spills_); 490 UnSpillFPRegs(rs_sp, offset, fp_spill_mask_); 491 } 492 if (core_spill_mask_) { 493 int offset = frame_size_ - drop - kArm64PointerSize * num_core_spills_; 494 UnSpillCoreRegs(rs_sp, offset, core_spill_mask_); 495 } 496 497 OpRegImm64(kOpAdd, rs_sp, frame_size_ - drop); 498 } 499 500 // Finally return. 501 NewLIR0(kA64Ret); 502} 503 504void Arm64Mir2Lir::GenSpecialExitSequence() { 505 NewLIR0(kA64Ret); 506} 507 508} // namespace art 509