call_arm64.cc revision 83b1940e6482b9d8feba5c492507735686650ea5
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the Thumb2 ISA. */ 18 19#include "arm64_lir.h" 20#include "codegen_arm64.h" 21#include "dex/quick/mir_to_lir-inl.h" 22#include "gc/accounting/card_table.h" 23#include "entrypoints/quick/quick_entrypoints.h" 24 25namespace art { 26 27/* 28 * The sparse table in the literal pool is an array of <key,displacement> 29 * pairs. For each set, we'll load them as a pair using ldp. 30 * The test loop will look something like: 31 * 32 * adr r_base, <table> 33 * ldr r_val, [rA64_SP, v_reg_off] 34 * mov r_idx, #table_size 35 * loop: 36 * cbz r_idx, quit 37 * ldp r_key, r_disp, [r_base], #8 38 * sub r_idx, #1 39 * cmp r_val, r_key 40 * b.ne loop 41 * adr r_base, #0 ; This is the instruction from which we compute displacements 42 * add r_base, r_disp 43 * br r_base 44 * quit: 45 */ 46void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) { 47 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 48 if (cu_->verbose) { 49 DumpSparseSwitchTable(table); 50 } 51 // Add the table to the list - we'll process it later 52 SwitchTable *tab_rec = 53 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 54 tab_rec->table = table; 55 tab_rec->vaddr = current_dalvik_offset_; 56 uint32_t size = table[1]; 57 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); 58 switch_tables_.Insert(tab_rec); 59 60 // Get the switch value 61 rl_src = LoadValue(rl_src, kCoreReg); 62 RegStorage r_base = AllocTempWide(); 63 // Allocate key and disp temps. 64 RegStorage r_key = AllocTemp(); 65 RegStorage r_disp = AllocTemp(); 66 // Materialize a pointer to the switch table 67 NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, WrapPointer(tab_rec)); 68 // Set up r_idx 69 RegStorage r_idx = AllocTemp(); 70 LoadConstant(r_idx, size); 71 72 // Entry of loop. 73 LIR* loop_entry = NewLIR0(kPseudoTargetLabel); 74 LIR* branch_out = NewLIR2(kA64Cbz2rt, r_idx.GetReg(), 0); 75 76 // Load next key/disp. 77 NewLIR4(kA64LdpPost4rrXD, r_key.GetReg(), r_disp.GetReg(), r_base.GetReg(), 2); 78 OpRegRegImm(kOpSub, r_idx, r_idx, 1); 79 80 // Go to next case, if key does not match. 81 OpRegReg(kOpCmp, r_key, rl_src.reg); 82 OpCondBranch(kCondNe, loop_entry); 83 84 // Key does match: branch to case label. 85 LIR* switch_label = NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, -1); 86 tab_rec->anchor = switch_label; 87 88 // Add displacement to base branch address and go! 89 OpRegRegRegExtend(kOpAdd, r_base, r_base, As64BitReg(r_disp), kA64Sxtw, 0U); 90 NewLIR1(kA64Br1x, r_base.GetReg()); 91 92 // Loop exit label. 93 LIR* loop_exit = NewLIR0(kPseudoTargetLabel); 94 branch_out->target = loop_exit; 95} 96 97 98void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) { 99 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 100 if (cu_->verbose) { 101 DumpPackedSwitchTable(table); 102 } 103 // Add the table to the list - we'll process it later 104 SwitchTable *tab_rec = 105 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 106 tab_rec->table = table; 107 tab_rec->vaddr = current_dalvik_offset_; 108 uint32_t size = table[1]; 109 tab_rec->targets = 110 static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); 111 switch_tables_.Insert(tab_rec); 112 113 // Get the switch value 114 rl_src = LoadValue(rl_src, kCoreReg); 115 RegStorage table_base = AllocTempWide(); 116 // Materialize a pointer to the switch table 117 NewLIR3(kA64Adr2xd, table_base.GetReg(), 0, WrapPointer(tab_rec)); 118 int low_key = s4FromSwitchData(&table[2]); 119 RegStorage key_reg; 120 // Remove the bias, if necessary 121 if (low_key == 0) { 122 key_reg = rl_src.reg; 123 } else { 124 key_reg = AllocTemp(); 125 OpRegRegImm(kOpSub, key_reg, rl_src.reg, low_key); 126 } 127 // Bounds check - if < 0 or >= size continue following switch 128 OpRegImm(kOpCmp, key_reg, size - 1); 129 LIR* branch_over = OpCondBranch(kCondHi, NULL); 130 131 // Load the displacement from the switch table 132 RegStorage disp_reg = AllocTemp(); 133 LoadBaseIndexed(table_base, As64BitReg(key_reg), disp_reg, 2, k32); 134 135 // Get base branch address. 136 RegStorage branch_reg = AllocTempWide(); 137 LIR* switch_label = NewLIR3(kA64Adr2xd, branch_reg.GetReg(), 0, -1); 138 tab_rec->anchor = switch_label; 139 140 // Add displacement to base branch address and go! 141 OpRegRegRegExtend(kOpAdd, branch_reg, branch_reg, As64BitReg(disp_reg), kA64Sxtw, 0U); 142 NewLIR1(kA64Br1x, branch_reg.GetReg()); 143 144 // branch_over target here 145 LIR* target = NewLIR0(kPseudoTargetLabel); 146 branch_over->target = target; 147} 148 149/* 150 * Array data table format: 151 * ushort ident = 0x0300 magic value 152 * ushort width width of each element in the table 153 * uint size number of elements in the table 154 * ubyte data[size*width] table of data values (may contain a single-byte 155 * padding at the end) 156 * 157 * Total size is 4+(width * size + 1)/2 16-bit code units. 158 */ 159void Arm64Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { 160 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 161 // Add the table to the list - we'll process it later 162 FillArrayData *tab_rec = 163 static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData)); 164 tab_rec->table = table; 165 tab_rec->vaddr = current_dalvik_offset_; 166 uint16_t width = tab_rec->table[1]; 167 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16); 168 tab_rec->size = (size * width) + 8; 169 170 fill_array_data_.Insert(tab_rec); 171 172 // Making a call - use explicit registers 173 FlushAllRegs(); /* Everything to home location */ 174 LoadValueDirectFixed(rl_src, rs_x0); 175 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData).Int32Value(), 176 rs_xLR); 177 // Materialize a pointer to the fill data image 178 NewLIR3(kA64Adr2xd, rx1, 0, WrapPointer(tab_rec)); 179 ClobberCallerSave(); 180 LIR* call_inst = OpReg(kOpBlx, rs_xLR); 181 MarkSafepointPC(call_inst); 182} 183 184/* 185 * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more 186 * details see monitor.cc. 187 */ 188void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 189 // x0/w0 = object 190 // w1 = thin lock thread id 191 // x2 = address of lock word 192 // w3 = lock word / store failure 193 // TUNING: How much performance we get when we inline this? 194 // Since we've already flush all register. 195 FlushAllRegs(); 196 LoadValueDirectFixed(rl_src, rs_x0); // = TargetReg(kArg0, kRef) 197 LockCallTemps(); // Prepare for explicit register usage 198 LIR* null_check_branch = nullptr; 199 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { 200 null_check_branch = nullptr; // No null check. 201 } else { 202 // If the null-check fails its handled by the slow-path to reduce exception related meta-data. 203 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 204 null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); 205 } 206 } 207 Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); 208 OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value()); 209 NewLIR2(kA64Ldxr2rX, rw3, rx2); 210 MarkPossibleNullPointerException(opt_flags); 211 LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_x1, 0, NULL); 212 NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2); 213 LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_x1, 0, NULL); 214 215 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); 216 not_unlocked_branch->target = slow_path_target; 217 if (null_check_branch != nullptr) { 218 null_check_branch->target = slow_path_target; 219 } 220 // TODO: move to a slow path. 221 // Go expensive route - artLockObjectFromCode(obj); 222 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pLockObject).Int32Value(), rs_xLR); 223 ClobberCallerSave(); 224 LIR* call_inst = OpReg(kOpBlx, rs_xLR); 225 MarkSafepointPC(call_inst); 226 227 LIR* success_target = NewLIR0(kPseudoTargetLabel); 228 lock_success_branch->target = success_target; 229 GenMemBarrier(kLoadAny); 230} 231 232/* 233 * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more 234 * details see monitor.cc. Note the code below doesn't use ldxr/stxr as the code holds the lock 235 * and can only give away ownership if its suspended. 236 */ 237void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 238 // x0/w0 = object 239 // w1 = thin lock thread id 240 // w2 = lock word 241 // TUNING: How much performance we get when we inline this? 242 // Since we've already flush all register. 243 FlushAllRegs(); 244 LoadValueDirectFixed(rl_src, rs_x0); // Get obj 245 LockCallTemps(); // Prepare for explicit register usage 246 LIR* null_check_branch = nullptr; 247 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { 248 null_check_branch = nullptr; // No null check. 249 } else { 250 // If the null-check fails its handled by the slow-path to reduce exception related meta-data. 251 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 252 null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); 253 } 254 } 255 Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); 256 Load32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2); 257 MarkPossibleNullPointerException(opt_flags); 258 LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w1, rs_w2, NULL); 259 GenMemBarrier(kAnyStore); 260 Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_wzr); 261 LIR* unlock_success_branch = OpUnconditionalBranch(NULL); 262 263 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); 264 slow_unlock_branch->target = slow_path_target; 265 if (null_check_branch != nullptr) { 266 null_check_branch->target = slow_path_target; 267 } 268 // TODO: move to a slow path. 269 // Go expensive route - artUnlockObjectFromCode(obj); 270 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject).Int32Value(), rs_xLR); 271 ClobberCallerSave(); 272 LIR* call_inst = OpReg(kOpBlx, rs_xLR); 273 MarkSafepointPC(call_inst); 274 275 LIR* success_target = NewLIR0(kPseudoTargetLabel); 276 unlock_success_branch->target = success_target; 277} 278 279void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) { 280 int ex_offset = Thread::ExceptionOffset<8>().Int32Value(); 281 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 282 LoadRefDisp(rs_xSELF, ex_offset, rl_result.reg, kNotVolatile); 283 StoreRefDisp(rs_xSELF, ex_offset, rs_xzr, kNotVolatile); 284 StoreValue(rl_dest, rl_result); 285} 286 287/* 288 * Mark garbage collection card. Skip if the value we're storing is null. 289 */ 290void Arm64Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { 291 RegStorage reg_card_base = AllocTempWide(); 292 RegStorage reg_card_no = AllocTempWide(); // Needs to be wide as addr is ref=64b 293 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); 294 LoadWordDisp(rs_xSELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base); 295 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); 296 // TODO(Arm64): generate "strb wB, [xB, wC, uxtw]" rather than "strb wB, [xB, xC]"? 297 StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 298 0, kUnsignedByte); 299 LIR* target = NewLIR0(kPseudoTargetLabel); 300 branch_over->target = target; 301 FreeTemp(reg_card_base); 302 FreeTemp(reg_card_no); 303} 304 305void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { 306 /* 307 * On entry, x0 to x7 are live. Let the register allocation 308 * mechanism know so it doesn't try to use any of them when 309 * expanding the frame or flushing. 310 * Reserve x8 & x9 for temporaries. 311 */ 312 LockTemp(rs_x0); 313 LockTemp(rs_x1); 314 LockTemp(rs_x2); 315 LockTemp(rs_x3); 316 LockTemp(rs_x4); 317 LockTemp(rs_x5); 318 LockTemp(rs_x6); 319 LockTemp(rs_x7); 320 LockTemp(rs_xIP0); 321 LockTemp(rs_xIP1); 322 323 /* TUNING: 324 * Use AllocTemp() and reuse LR if possible to give us the freedom on adjusting the number 325 * of temp registers. 326 */ 327 328 /* 329 * We can safely skip the stack overflow check if we're 330 * a leaf *and* our frame size < fudge factor. 331 */ 332 bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm64); 333 334 NewLIR0(kPseudoMethodEntry); 335 336 const int spill_count = num_core_spills_ + num_fp_spills_; 337 const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf; // SP 16 byte alignment. 338 const int frame_size_without_spills = frame_size_ - spill_size; 339 340 if (!skip_overflow_check) { 341 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 342 // Load stack limit 343 LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1); 344 } else { 345 // Implicit stack overflow check. 346 // Generate a load from [sp, #-framesize]. If this is in the stack 347 // redzone we will get a segmentation fault. 348 349 // TODO: If the frame size is small enough, is it possible to make this a pre-indexed load, 350 // so that we can avoid the following "sub sp" when spilling? 351 OpRegRegImm(kOpSub, rs_x8, rs_sp, GetStackOverflowReservedBytes(kArm64)); 352 LoadWordDisp(rs_x8, 0, rs_x8); 353 MarkPossibleStackOverflowException(); 354 } 355 } 356 357 int spilled_already = 0; 358 if (spill_size > 0) { 359 spilled_already = SpillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_); 360 DCHECK(spill_size == spilled_already || frame_size_ == spilled_already); 361 } 362 363 if (spilled_already != frame_size_) { 364 OpRegImm(kOpSub, rs_sp, frame_size_without_spills); 365 } 366 367 if (!skip_overflow_check) { 368 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 369 class StackOverflowSlowPath: public LIRSlowPath { 370 public: 371 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) : 372 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), 373 sp_displace_(sp_displace) { 374 } 375 void Compile() OVERRIDE { 376 m2l_->ResetRegPool(); 377 m2l_->ResetDefTracking(); 378 GenerateTargetLabel(kPseudoThrowTarget); 379 // Unwinds stack. 380 m2l_->OpRegImm(kOpAdd, rs_sp, sp_displace_); 381 m2l_->ClobberCallerSave(); 382 ThreadOffset<8> func_offset = QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow); 383 m2l_->LockTemp(rs_xIP0); 384 m2l_->LoadWordDisp(rs_xSELF, func_offset.Int32Value(), rs_xIP0); 385 m2l_->NewLIR1(kA64Br1x, rs_xIP0.GetReg()); 386 m2l_->FreeTemp(rs_xIP0); 387 } 388 389 private: 390 const size_t sp_displace_; 391 }; 392 393 LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr); 394 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_)); 395 } 396 } 397 398 FlushIns(ArgLocs, rl_method); 399 400 FreeTemp(rs_x0); 401 FreeTemp(rs_x1); 402 FreeTemp(rs_x2); 403 FreeTemp(rs_x3); 404 FreeTemp(rs_x4); 405 FreeTemp(rs_x5); 406 FreeTemp(rs_x6); 407 FreeTemp(rs_x7); 408 FreeTemp(rs_xIP0); 409 FreeTemp(rs_xIP1); 410} 411 412void Arm64Mir2Lir::GenExitSequence() { 413 /* 414 * In the exit path, r0/r1 are live - make sure they aren't 415 * allocated by the register utilities as temps. 416 */ 417 LockTemp(rs_x0); 418 LockTemp(rs_x1); 419 420 NewLIR0(kPseudoMethodExit); 421 422 UnspillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_); 423 424 // Finally return. 425 NewLIR0(kA64Ret); 426} 427 428void Arm64Mir2Lir::GenSpecialExitSequence() { 429 NewLIR0(kA64Ret); 430} 431 432} // namespace art 433